[med-svn] [r-cran-luminescence] 08/10: New upstream version 0.6.4

Andreas Tille tille at debian.org
Tue Oct 10 16:56:22 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-luminescence.

commit 2820188e95f0dc314c4a5b46d06713c18d404054
Author: Andreas Tille <tille at debian.org>
Date:   Tue Oct 10 18:51:02 2017 +0200

    New upstream version 0.6.4
---
 DESCRIPTION                            |   90 +
 MD5                                    |  221 ++
 NAMESPACE                              |  244 +++
 NEWS                                   |   22 +
 R/Analyse_SAR.OSLdata.R                |  652 ++++++
 R/CW2pHMi.R                            |  320 +++
 R/CW2pLM.R                             |  135 ++
 R/CW2pLMi.R                            |  263 +++
 R/CW2pPMi.R                            |  253 +++
 R/Luminescence-package.R               |  607 ++++++
 R/RLum-class.R                         |   87 +
 R/RLum.Analysis-class.R                |  681 ++++++
 R/RLum.Data-class.R                    |   32 +
 R/RLum.Data.Curve-class.R              |  458 ++++
 R/RLum.Data.Image-class.R              |  345 +++
 R/RLum.Data.Spectrum-class.R           |  350 +++
 R/RLum.Results-class.R                 |  391 ++++
 R/RcppExports.R                        |   11 +
 R/Risoe.BINfileData2RLum.Analysis.R    |  315 +++
 R/Risoe.BINfileData2RLum.Data.Curve.R  |  141 ++
 R/RisoeBINfileData-class.R             |  276 +++
 R/Second2Gray.R                        |  218 ++
 R/analyse_IRSAR.RF.R                   | 1914 +++++++++++++++++
 R/analyse_SAR.CWOSL.R                  | 1693 +++++++++++++++
 R/analyse_SAR.TL.R                     |  581 +++++
 R/analyse_baSAR.R                      | 2463 +++++++++++++++++++++
 R/analyse_pIRIRSequence.R              |  838 ++++++++
 R/app_RLum.R                           |   24 +
 R/apply_CosmicRayRemoval.R             |  316 +++
 R/apply_EfficiencyCorrection.R         |  110 +
 R/bin_RLum.Data.R                      |   50 +
 R/calc_AliquotSize.R                   |  416 ++++
 R/calc_CentralDose.R                   |  259 +++
 R/calc_CommonDose.R                    |  178 ++
 R/calc_CosmicDoseRate.R                |  574 +++++
 R/calc_FadingCorr.R                    |  452 ++++
 R/calc_FastRatio.R                     |  363 ++++
 R/calc_FiniteMixture.R                 |  570 +++++
 R/calc_FuchsLang2001.R                 |  231 ++
 R/calc_HomogeneityTest.R               |  128 ++
 R/calc_IEU.R                           |  448 ++++
 R/calc_MaxDose.R                       |   99 +
 R/calc_MinDose.R                       |  852 ++++++++
 R/calc_OSLLxTxRatio.R                  |  491 +++++
 R/calc_SourceDoseRate.R                |  229 ++
 R/calc_Statistics.R                    |  256 +++
 R/calc_TLLxTxRatio.R                   |  247 +++
 R/calc_ThermalLifetime.R               |  400 ++++
 R/calc_gSGC.R                          |  448 ++++
 R/extract_IrradiationTimes.R           |  443 ++++
 R/fit_CWCurve.R                        |  816 +++++++
 R/fit_LMCurve.R                        | 1014 +++++++++
 R/get_Layout.R                         |  643 ++++++
 R/get_Quote.R                          |   94 +
 R/get_RLum.R                           |  117 +
 R/get_Risoe.BINfileData.R              |   29 +
 R/get_rightAnswer.R                    |   17 +
 R/internal_as.latex.table.R            |  219 ++
 R/internals_RLum.R                     |   45 +
 R/length_RLum.R                        |   32 +
 R/merge_RLum.Analysis.R                |  145 ++
 R/merge_RLum.Data.Curve.R              |  303 +++
 R/merge_RLum.R                         |  124 ++
 R/merge_RLum.Results.R                 |  132 ++
 R/merge_Risoe.BINfileData.R            |  257 +++
 R/methods_DRAC.R                       |  248 +++
 R/methods_RLum.R                       |  500 +++++
 R/model_LuminescenceSignals.R          |   41 +
 R/names_RLum.R                         |   28 +
 R/plot_AbanicoPlot.R                   | 3691 ++++++++++++++++++++++++++++++++
 R/plot_DRTResults.R                    |  826 +++++++
 R/plot_DetPlot.R                       |  347 +++
 R/plot_FilterCombinations.R            |  297 +++
 R/plot_GrowthCurve.R                   | 1686 +++++++++++++++
 R/plot_Histogram.R                     |  773 +++++++
 R/plot_KDE.R                           | 1213 +++++++++++
 R/plot_NRt.R                           |  235 ++
 R/plot_RLum.Analysis.R                 |  727 +++++++
 R/plot_RLum.Data.Curve.R               |  315 +++
 R/plot_RLum.Data.Image.R               |  211 ++
 R/plot_RLum.Data.Spectrum.R            |  902 ++++++++
 R/plot_RLum.R                          |  175 ++
 R/plot_RLum.Results.R                  | 1123 ++++++++++
 R/plot_RadialPlot.R                    | 1552 ++++++++++++++
 R/plot_Risoe.BINfileData.R             |  237 ++
 R/plot_ViolinPlot.R                    |  269 +++
 R/read_BIN2R.R                         | 1472 +++++++++++++
 R/read_Daybreak2R.R                    |  261 +++
 R/read_SPE2R.R                         |  437 ++++
 R/read_XSYG2R.R                        |  753 +++++++
 R/replicate_RLum.R                     |   26 +
 R/report_RLum.R                        |  742 +++++++
 R/set_RLum.R                           |   77 +
 R/set_Risoe.BINfileData.R              |   25 +
 R/structure_RLum.R                     |   43 +
 R/template_DRAC.R                      |  324 +++
 R/tune_Data.R                          |  106 +
 R/use_DRAC.R                           |  342 +++
 R/verify_SingleGrainData.R             |  462 ++++
 R/write_R2BIN.R                        | 1320 ++++++++++++
 R/zzz.R                                |  280 +++
 data/BaseDataSet.CosmicDoseRate.RData  |  Bin 0 -> 869 bytes
 data/ExampleData.BINfileData.RData     |  Bin 0 -> 344081 bytes
 data/ExampleData.CW_OSL_Curve.RData    |  Bin 0 -> 11430 bytes
 data/ExampleData.DeValues.RData        |  Bin 0 -> 3526 bytes
 data/ExampleData.FittingLM.RData       |  Bin 0 -> 21647 bytes
 data/ExampleData.LxTxData.RData        |  Bin 0 -> 299 bytes
 data/ExampleData.LxTxOSLData.RData     |  Bin 0 -> 827 bytes
 data/ExampleData.RLum.Analysis.RData   |  Bin 0 -> 5792 bytes
 data/ExampleData.RLum.Data.Image.RData |  Bin 0 -> 130823 bytes
 data/ExampleData.XSYG.RData            |  Bin 0 -> 75045 bytes
 data/datalist                          |   10 +
 debian/changelog                       |   21 -
 debian/compat                          |    1 -
 debian/control                         |   37 -
 debian/copyright                       |   28 -
 debian/rules                           |    4 -
 debian/source/format                   |    1 -
 debian/watch                           |    2 -
 inst/CITATION                          |   49 +
 inst/NEWS.Rd                           |   38 +
 inst/doc/S4classObjects.pdf            |  Bin 0 -> 150279 bytes
 inst/doc/index.html                    |   75 +
 man/Analyse_SAR.OSLdata.Rd             |  151 ++
 man/BaseDataSet.CosmicDoseRate.Rd      |  111 +
 man/CW2pHMi.Rd                         |  177 ++
 man/CW2pLM.Rd                          |   90 +
 man/CW2pLMi.Rd                         |  134 ++
 man/CW2pPMi.Rd                         |  141 ++
 man/ExampleData.BINfileData.Rd         |   62 +
 man/ExampleData.CW_OSL_Curve.Rd        |   44 +
 man/ExampleData.DeValues.Rd            |   57 +
 man/ExampleData.FittingLM.Rd           |   29 +
 man/ExampleData.LxTxData.Rd            |   26 +
 man/ExampleData.LxTxOSLData.Rd         |   26 +
 man/ExampleData.RLum.Analysis.Rd       |   48 +
 man/ExampleData.RLum.Data.Image.Rd     |   41 +
 man/ExampleData.XSYG.Rd                |   97 +
 man/Luminescence-package.Rd            |  102 +
 man/RLum-class.Rd                      |   62 +
 man/RLum.Analysis-class.Rd             |  184 ++
 man/RLum.Data-class.Rd                 |   34 +
 man/RLum.Data.Curve-class.Rd           |  147 ++
 man/RLum.Data.Image-class.Rd           |  120 ++
 man/RLum.Data.Spectrum-class.Rd        |  129 ++
 man/RLum.Results-class.Rd              |  153 ++
 man/Risoe.BINfileData-class.Rd         |  204 ++
 man/Risoe.BINfileData2RLum.Analysis.Rd |   87 +
 man/Second2Gray.Rd                     |  106 +
 man/analyse_IRSAR.RF.Rd                |  325 +++
 man/analyse_SAR.CWOSL.Rd               |  211 ++
 man/analyse_SAR.TL.Rd                  |  114 +
 man/analyse_baSAR.Rd                   |  386 ++++
 man/analyse_pIRIRSequence.Rd           |  173 ++
 man/app_RLum.Rd                        |   26 +
 man/apply_CosmicRayRemoval.Rd          |  107 +
 man/apply_EfficiencyCorrection.Rd      |   63 +
 man/as.Rd                              |   77 +
 man/bin_RLum.Data.Rd                   |   60 +
 man/calc_AliquotSize.Rd                |  132 ++
 man/calc_CentralDose.Rd                |  100 +
 man/calc_CommonDose.Rd                 |   93 +
 man/calc_CosmicDoseRate.Rd             |  233 ++
 man/calc_FadingCorr.Rd                 |  172 ++
 man/calc_FastRatio.Rd                  |  107 +
 man/calc_FiniteMixture.Rd              |  170 ++
 man/calc_FuchsLang2001.Rd              |   93 +
 man/calc_HomogeneityTest.Rd            |   59 +
 man/calc_IEU.Rd                        |   81 +
 man/calc_MaxDose.Rd                    |  112 +
 man/calc_MinDose.Rd                    |  274 +++
 man/calc_OSLLxTxRatio.Rd               |  162 ++
 man/calc_SourceDoseRate.Rd             |  143 ++
 man/calc_Statistics.Rd                 |   75 +
 man/calc_TLLxTxRatio.Rd                |   88 +
 man/calc_ThermalLifetime.Rd            |  139 ++
 man/calc_gSGC.Rd                       |   83 +
 man/extract_IrradiationTimes.Rd        |  135 ++
 man/fit_CWCurve.Rd                     |  176 ++
 man/fit_LMCurve.Rd                     |  218 ++
 man/get_Layout.Rd                      |   60 +
 man/get_Quote.Rd                       |   36 +
 man/get_RLum.Rd                        |   73 +
 man/get_Risoe.BINfileData.Rd           |   39 +
 man/get_rightAnswer.Rd                 |   30 +
 man/length_RLum.Rd                     |   40 +
 man/merge_RLum.Analysis.Rd             |   69 +
 man/merge_RLum.Data.Curve.Rd           |  130 ++
 man/merge_RLum.Rd                      |   74 +
 man/merge_RLum.Results.Rd              |   27 +
 man/merge_Risoe.BINfileData.Rd         |   94 +
 man/methods_RLum.Rd                    |  258 +++
 man/model_LuminescenceSignals.Rd       |   46 +
 man/names_RLum.Rd                      |   40 +
 man/plot_AbanicoPlot.Rd                |  425 ++++
 man/plot_DRTResults.Rd                 |  173 ++
 man/plot_DetPlot.Rd                    |  135 ++
 man/plot_FilterCombinations.Rd         |  127 ++
 man/plot_GrowthCurve.Rd                |  194 ++
 man/plot_Histogram.Rd                  |  123 ++
 man/plot_KDE.Rd                        |  169 ++
 man/plot_NRt.Rd                        |  135 ++
 man/plot_RLum.Analysis.Rd              |  110 +
 man/plot_RLum.Data.Curve.Rd            |   71 +
 man/plot_RLum.Data.Image.Rd            |  103 +
 man/plot_RLum.Data.Spectrum.Rd         |  213 ++
 man/plot_RLum.Rd                       |   80 +
 man/plot_RLum.Results.Rd               |   69 +
 man/plot_RadialPlot.Rd                 |  263 +++
 man/plot_Risoe.BINfileData.Rd          |  116 +
 man/plot_ViolinPlot.Rd                 |   90 +
 man/read_BIN2R.Rd                      |  118 +
 man/read_Daybreak2R.Rd                 |   48 +
 man/read_SPE2R.Rd                      |  116 +
 man/read_XSYG2R.Rd                     |  159 ++
 man/replicate_RLum.Rd                  |   32 +
 man/report_RLum.Rd                     |  188 ++
 man/sTeve.Rd                           |   47 +
 man/set_RLum.Rd                        |   75 +
 man/set_Risoe.BINfileData.Rd           |   39 +
 man/structure_RLum.Rd                  |   52 +
 man/template_DRAC.Rd                   |   79 +
 man/tune_Data.Rd                       |   61 +
 man/use_DRAC.Rd                        |  107 +
 man/verify_SingleGrainData.Rd          |  136 ++
 man/write_R2BIN.Rd                     |   86 +
 src/RcppExports.cpp                    |   31 +
 src/analyse_IRSARRF_SRS.cpp            |   66 +
 src/create_UID.cpp                     |   33 +
 229 files changed, 59444 insertions(+), 94 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100644
index 0000000..cc0f1b1
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,90 @@
+Package: Luminescence
+Type: Package
+Title: Comprehensive Luminescence Dating Data Analysis
+Version: 0.6.4
+Date: 2016-09-09
+Author: Sebastian Kreutzer [aut, trl, cre],
+    Michael Dietze [aut],
+    Christoph Burow [aut, trl, dtc],
+    Margret C. Fuchs [aut],
+    Christoph Schmidt [aut],
+    Manfred Fischer [aut, trl],
+    Johannes Friedrich [aut],
+    Norbert Mercier [aut],
+    Rachel K. Smedley [aut],
+    Julie Durcan [aut],
+    Georgina King [aut],
+    Markus Fuchs [ths]
+Authors at R: c(
+    person("Sebastian", "Kreutzer", role = c("aut", "trl", "cre"), email = "sebastian.kreutzer at u-bordeaux-montaigne.fr"),
+    person("Michael", "Dietze", role = c("aut")),
+    person("Christoph", "Burow", role = c("aut", "trl", "dtc")),
+    person("Margret C.", "Fuchs", role = c("aut")),
+    person("Christoph", "Schmidt", role = c("aut")),
+    person("Manfred", "Fischer", role = c("aut", "trl")),
+    person("Johannes", "Friedrich", role = c("aut")),
+    person("Norbert", "Mercier", role = c("aut")),
+    person("Rachel K.", "Smedley", role = c("aut")),
+    person("Julie", "Durcan", role = c("aut")),
+    person("Georgina", "King", role = c("aut")),
+    person("Markus", "Fuchs", role = c("ths")))
+Maintainer: Sebastian Kreutzer <sebastian.kreutzer at u-bordeaux-montaigne.fr>
+Description: A collection of various R functions for the purpose of Luminescence
+    dating data analysis. This includes, amongst others, data import, export,
+    application of age models, curve deconvolution, sequence analysis and
+    plotting of equivalent dose distributions.
+Contact: Package Developer Team <team at r-luminescence.de>
+License: GPL-3
+Depends: R (>= 3.3.0), utils
+LinkingTo: Rcpp (>= 0.12.5), RcppArmadillo (>= 0.6.700.6.0)
+Imports: bbmle (>= 1.0.18), data.table (>= 1.9.6), httr (>= 1.1.0),
+        matrixStats (>= 0.50.2), methods, Rcpp (>= 0.12.5), minpack.lm
+        (>= 1.2-0), raster (>= 2.5-2), readxl (>= 0.1.1), shape (>=
+        1.4.2), parallel, XML (>= 3.98-1.4), zoo (>= 1.7-13)
+Suggests: RLumShiny (>= 0.1.0), RLumModel (>= 0.1.1), plotly (>=
+        3.4.13), rmarkdown (>= 0.9.6), rjags (>= 4-6), coda (>=
+        0.18-1), pander (>= 0.6.0), rstudioapi (>= 0.5)
+URL: https://CRAN.R-project.org/package=Luminescence
+Collate: 'Analyse_SAR.OSLdata.R' 'CW2pHMi.R' 'CW2pLM.R' 'CW2pLMi.R'
+        'CW2pPMi.R' 'Luminescence-package.R' 'RcppExports.R'
+        'replicate_RLum.R' 'RLum-class.R' 'names_RLum.R'
+        'structure_RLum.R' 'length_RLum.R' 'set_RLum.R' 'get_RLum.R'
+        'RLum.Analysis-class.R' 'RLum.Data-class.R' 'bin_RLum.Data.R'
+        'RLum.Data.Curve-class.R' 'RLum.Data.Image-class.R'
+        'RLum.Data.Spectrum-class.R' 'RLum.Results-class.R'
+        'Risoe.BINfileData2RLum.Analysis.R'
+        'Risoe.BINfileData2RLum.Data.Curve.R' 'set_Risoe.BINfileData.R'
+        'get_Risoe.BINfileData.R' 'RisoeBINfileData-class.R'
+        'Second2Gray.R' 'analyse_IRSAR.RF.R' 'analyse_SAR.CWOSL.R'
+        'analyse_SAR.TL.R' 'analyse_baSAR.R' 'analyse_pIRIRSequence.R'
+        'app_RLum.R' 'apply_CosmicRayRemoval.R'
+        'apply_EfficiencyCorrection.R' 'calc_AliquotSize.R'
+        'calc_CentralDose.R' 'calc_CommonDose.R'
+        'calc_CosmicDoseRate.R' 'calc_FadingCorr.R' 'calc_FastRatio.R'
+        'calc_FiniteMixture.R' 'calc_FuchsLang2001.R'
+        'calc_HomogeneityTest.R' 'calc_IEU.R' 'calc_MaxDose.R'
+        'calc_MinDose.R' 'calc_OSLLxTxRatio.R' 'calc_SourceDoseRate.R'
+        'calc_Statistics.R' 'calc_TLLxTxRatio.R'
+        'calc_ThermalLifetime.R' 'calc_gSGC.R'
+        'extract_IrradiationTimes.R' 'fit_CWCurve.R' 'fit_LMCurve.R'
+        'get_Layout.R' 'get_Quote.R' 'get_rightAnswer.R'
+        'internal_as.latex.table.R' 'internals_RLum.R'
+        'merge_RLum.Analysis.R' 'merge_RLum.Data.Curve.R'
+        'merge_RLum.R' 'merge_RLum.Results.R'
+        'merge_Risoe.BINfileData.R' 'methods_DRAC.R' 'methods_RLum.R'
+        'model_LuminescenceSignals.R' 'plot_AbanicoPlot.R'
+        'plot_DRTResults.R' 'plot_DetPlot.R'
+        'plot_FilterCombinations.R' 'plot_GrowthCurve.R'
+        'plot_Histogram.R' 'plot_KDE.R' 'plot_NRt.R'
+        'plot_RLum.Analysis.R' 'plot_RLum.Data.Curve.R'
+        'plot_RLum.Data.Image.R' 'plot_RLum.Data.Spectrum.R'
+        'plot_RLum.R' 'plot_RLum.Results.R' 'plot_RadialPlot.R'
+        'plot_Risoe.BINfileData.R' 'plot_ViolinPlot.R' 'read_BIN2R.R'
+        'read_Daybreak2R.R' 'read_SPE2R.R' 'read_XSYG2R.R'
+        'report_RLum.R' 'template_DRAC.R' 'tune_Data.R' 'use_DRAC.R'
+        'verify_SingleGrainData.R' 'write_R2BIN.R' 'zzz.R'
+RoxygenNote: 5.0.1
+NeedsCompilation: yes
+Packaged: 2016-09-09 11:58:10 UTC; kreutzer
+Repository: CRAN
+Date/Publication: 2016-09-09 14:52:58
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..812ccc1
--- /dev/null
+++ b/MD5
@@ -0,0 +1,221 @@
+ad769e06062b5a3edbed7b3b63058ad3 *DESCRIPTION
+29d4399a175e491061b95554c8ba4324 *NAMESPACE
+167712857b8f4d609f964747ece2621b *NEWS
+c1a575eb8fba9c7a3eb7b7b54dfd3581 *R/Analyse_SAR.OSLdata.R
+e3abc05b7f74052b2eafd3855cb7df28 *R/CW2pHMi.R
+980bc4ac52eef3b4f1e840f761d63e2d *R/CW2pLM.R
+36283b60db7cd988f02cfc4beccf9f56 *R/CW2pLMi.R
+aade4defaaafa0a62f0adb5f18022990 *R/CW2pPMi.R
+bb43916f4f08f0e82ddc313de0fd1a2f *R/Luminescence-package.R
+f96b164b48d5343ecaaf4f9b84c74354 *R/RLum-class.R
+54e44e008c02c50942b3afa406494d8f *R/RLum.Analysis-class.R
+738d70f4534e1b5823b0b453e4f2035b *R/RLum.Data-class.R
+14df8e5f6bec944d186b9d6114d366bf *R/RLum.Data.Curve-class.R
+4d18f10245b689a9bb4c88dbf243fe99 *R/RLum.Data.Image-class.R
+79955b868a4822f69b32eb2cb6c30189 *R/RLum.Data.Spectrum-class.R
+cbe9744df2ae9107ede141a02d84a6d8 *R/RLum.Results-class.R
+fe869556fc94aa48cd5a2bfab284dd20 *R/RcppExports.R
+91de57856b65a6649bdce203e7a5eb3e *R/Risoe.BINfileData2RLum.Analysis.R
+90207ff2e2fb8785f4ffc4ee6062600e *R/Risoe.BINfileData2RLum.Data.Curve.R
+c7a2b8661391bc793d466179a74e690a *R/RisoeBINfileData-class.R
+02853953b3a9798309fdd0a9901f096a *R/Second2Gray.R
+aee94ab4556298034b0d567a78adf26c *R/analyse_IRSAR.RF.R
+a50cc85e044b59abf8e082a939035f6c *R/analyse_SAR.CWOSL.R
+ac7c1b7fa5a5cc170f6f355159e910d0 *R/analyse_SAR.TL.R
+aa980706c80a418b5ebf190bbd5d926b *R/analyse_baSAR.R
+ef51962e710f824771edfb8c1c304761 *R/analyse_pIRIRSequence.R
+4d59db74f2b3fc7e552a2acb280bfa98 *R/app_RLum.R
+e8bf70cc879f026a61634bd74d0fc0c7 *R/apply_CosmicRayRemoval.R
+bf6ff653d8a02380264316f3d5758031 *R/apply_EfficiencyCorrection.R
+d27071ceeee72ccb2306634d52367f5f *R/bin_RLum.Data.R
+f9a945af29b554ddc3fa51bb651b47d6 *R/calc_AliquotSize.R
+bbf5f5197d32499f73adf831a3e178c5 *R/calc_CentralDose.R
+627bb5b760cec008b5d725edab15995d *R/calc_CommonDose.R
+9ea4691d8050dd190d1d86c2682fd993 *R/calc_CosmicDoseRate.R
+0aa6f92b7dc89a9dabf468159e0e404e *R/calc_FadingCorr.R
+9764f24c652abdbfa450a989306bedd2 *R/calc_FastRatio.R
+88aeee30c02f49b9b2d50a636bceff1d *R/calc_FiniteMixture.R
+46d7564604b36f3b4dacd7b6e041fe95 *R/calc_FuchsLang2001.R
+3a73c5b14ee3f97469707d72c43dbd05 *R/calc_HomogeneityTest.R
+e7887dbe151b36cbd3e11e436e1e3f60 *R/calc_IEU.R
+6694ae47a51fdc6ba2e07173f7cbb24c *R/calc_MaxDose.R
+fc9e69c883a32a1abadddb47833ca1c8 *R/calc_MinDose.R
+1c66a4302b86ee4dbeef4943c8eccad8 *R/calc_OSLLxTxRatio.R
+2b0adde7f32ed5bfc4c18b1a3f0a9a51 *R/calc_SourceDoseRate.R
+910f5308f61c4dd2d6cdae887510976d *R/calc_Statistics.R
+e72d868f0bb0c5300149e652a4255e29 *R/calc_TLLxTxRatio.R
+16add648f8e85c742ec636dd332898e9 *R/calc_ThermalLifetime.R
+2f2f72d9a72654d953b7789b437b4164 *R/calc_gSGC.R
+d8257bb5d050544905df5489d6fd1376 *R/extract_IrradiationTimes.R
+8b6cc8ec465126940f29fe1fbf353eec *R/fit_CWCurve.R
+9a0a68c1b241e7388bcd07ca958bd663 *R/fit_LMCurve.R
+d73bb484d61479632cd36fc76714e0df *R/get_Layout.R
+7c21ec1847ba7eefc70fa07022503a9b *R/get_Quote.R
+86c7bd983f6902f78e69624ce8ec0645 *R/get_RLum.R
+90259b563bb83ef834d7ffed77dbd37f *R/get_Risoe.BINfileData.R
+b45bae48aecccf0556288eff0f8b60ca *R/get_rightAnswer.R
+173e3a6060036c32353ad079bd8964ba *R/internal_as.latex.table.R
+60511c58e3602b048c6131b88824a8f0 *R/internals_RLum.R
+e2b2ea53ad479b52b03771df74c003c3 *R/length_RLum.R
+221e79181b7da8308c0f178c96b1e3d2 *R/merge_RLum.Analysis.R
+9320718a23373e2c8940c8ed33e8ff02 *R/merge_RLum.Data.Curve.R
+72b5981712a785746e35a60c6665ce8d *R/merge_RLum.R
+13175fd8d36cd775aab857c982a94998 *R/merge_RLum.Results.R
+a4c513bb9917ee68b16f5e463d1c6d6a *R/merge_Risoe.BINfileData.R
+290a2239f4642011957d6e7c80928a1d *R/methods_DRAC.R
+ea509faaf10077d2d5d6c8f64337b146 *R/methods_RLum.R
+d91a70974803ee1a11cf3ba5f637b926 *R/model_LuminescenceSignals.R
+8145486bbfaea0afacf5fa95350b21b4 *R/names_RLum.R
+7d2124d991664742501b08f2803024b7 *R/plot_AbanicoPlot.R
+4322ddddbc85722e7379ce1d92b6da52 *R/plot_DRTResults.R
+43b3c9d49c0a558ab3df5546152dd550 *R/plot_DetPlot.R
+36f6cad21154487d237fb16e5c48217e *R/plot_FilterCombinations.R
+0f1fe20ef527d63c4cf0042c12f8c61e *R/plot_GrowthCurve.R
+019d96fea55d9b18f27c99d92cea10ad *R/plot_Histogram.R
+04101363fc122ac90230252c5190d57e *R/plot_KDE.R
+b270a4b63b7da7a20afd13a8144e5d1f *R/plot_NRt.R
+2d90ce5919b2b3944fa99490f09e473e *R/plot_RLum.Analysis.R
+a9048e93d962b33e55ae9ecd1e905060 *R/plot_RLum.Data.Curve.R
+d6feaf083c303f395a3fd60a4551a6b9 *R/plot_RLum.Data.Image.R
+69d886955acd7a7db3ee3912a0b586e2 *R/plot_RLum.Data.Spectrum.R
+03cec951ade16cfb79a793266b6d5832 *R/plot_RLum.R
+e9f501c16d94132f6ea3fe1ad6cb9fa3 *R/plot_RLum.Results.R
+900e03933972b275fc312e0dbf22cdb6 *R/plot_RadialPlot.R
+203eff338826b122c5fb22f723351c66 *R/plot_Risoe.BINfileData.R
+f6770bdbcf0064b3cb1c40c36ea741a8 *R/plot_ViolinPlot.R
+a50296fa37cf7bee171a22b31f29e1e1 *R/read_BIN2R.R
+07c94e3465db89666f5b3a5dbdf8cd31 *R/read_Daybreak2R.R
+a4f7e396d8b3e12d7f3d52f40254956d *R/read_SPE2R.R
+1e055bd1fbc5e07d54c995902841bfd1 *R/read_XSYG2R.R
+123ed927a9bf2662bb6440022eab158c *R/replicate_RLum.R
+c1dba55660ac08dddc9c2bb26a7ff189 *R/report_RLum.R
+a34034da44c2e10436a9a033d1fc5e9a *R/set_RLum.R
+ad168661d4ea741ccf71544e0b5fc4d7 *R/set_Risoe.BINfileData.R
+a8b9523cf0d7070ef8131ff6128fc0f6 *R/structure_RLum.R
+1a69763394a8fe7ed145ac7a7a886194 *R/template_DRAC.R
+a530148476bff7efc7a117e4c5f02eb0 *R/tune_Data.R
+6e75ef3d269317c01e90f1b83b616ba5 *R/use_DRAC.R
+0faf38a0a9fe9591c6f2a66311efdd26 *R/verify_SingleGrainData.R
+00992ba149e893f7cc98f8291ddddd62 *R/write_R2BIN.R
+891f53b99754d66521c894303a0d5cfb *R/zzz.R
+8eb217fc4380f23781dac785d7690941 *data/BaseDataSet.CosmicDoseRate.RData
+aa6811a6273a8735ce38d2fa0356ef9e *data/ExampleData.BINfileData.RData
+3e72ccbe5fef2feee752206fc52bd358 *data/ExampleData.CW_OSL_Curve.RData
+d6477245d9abca8d86d0eb6d1d1f319b *data/ExampleData.DeValues.RData
+2688778759b5d9ddcd458c98421f5d36 *data/ExampleData.FittingLM.RData
+76abec3d75bbea44fac9f599c0de9f0f *data/ExampleData.LxTxData.RData
+efa094f829c940630aefef99d8eea775 *data/ExampleData.LxTxOSLData.RData
+dd79ddebf77e9d0f482470546512db58 *data/ExampleData.RLum.Analysis.RData
+ee4c8be21bfb1f15b4056edb4b160513 *data/ExampleData.RLum.Data.Image.RData
+c723aab7895f3f8394a10da6d0a6b16d *data/ExampleData.XSYG.RData
+7b4198deaeab8582031b5932341e1477 *data/datalist
+1dbbc084b671a0760f1fd0697d97c2b5 *inst/CITATION
+f09d77f958c2e55e605fe52d20bfc7ed *inst/NEWS.Rd
+12ab72be52e77951d8b5e1ee97f4702e *inst/doc/S4classObjects.pdf
+a7018449ba9936ff3384170611f7c8e4 *inst/doc/index.html
+f6f96ddcffb234907a5048b82d41bdcb *man/Analyse_SAR.OSLdata.Rd
+39d2ec86949546906eba52f6ede31d9d *man/BaseDataSet.CosmicDoseRate.Rd
+9c3b94da5f2d1017c301e99d6fd3d85d *man/CW2pHMi.Rd
+fb86f24fff9409e66cf2243fab61f6e0 *man/CW2pLM.Rd
+875c29b363e184fef1f3647a4197a331 *man/CW2pLMi.Rd
+9c0c5c12473a243c85ae9932056388e2 *man/CW2pPMi.Rd
+7da15a0b7b859211f23e7de4ed267297 *man/ExampleData.BINfileData.Rd
+af667acdedf76cd45c6e5097007c5f47 *man/ExampleData.CW_OSL_Curve.Rd
+934d5e6fa6cf6ef550a947046d49a50f *man/ExampleData.DeValues.Rd
+2702625dd7e4625d5013ed890834ebf3 *man/ExampleData.FittingLM.Rd
+f374ef65f2950cfd41336f4c940123de *man/ExampleData.LxTxData.Rd
+09ab0799e419cf82dd43cb17876372a9 *man/ExampleData.LxTxOSLData.Rd
+99c1a16b5168800a2803d126e659e65b *man/ExampleData.RLum.Analysis.Rd
+12325ed0411eefbaa8e7e8cd6f1f4569 *man/ExampleData.RLum.Data.Image.Rd
+341baf0d297a76f22d55c0b3e0546701 *man/ExampleData.XSYG.Rd
+8a80f89e24da9c05d239466f9153e368 *man/Luminescence-package.Rd
+6fd39a2be77918f7d647ea3017be25fc *man/RLum-class.Rd
+1509c02b0431e792f34c1e11e0bef19a *man/RLum.Analysis-class.Rd
+d2d88a12194c0e2640385b9bd6073604 *man/RLum.Data-class.Rd
+4ed4f390c5240a11adbf3960287c0199 *man/RLum.Data.Curve-class.Rd
+af950947c45be19bc66f42838ab35663 *man/RLum.Data.Image-class.Rd
+66c6d3ed693969ac4ff595d709320e89 *man/RLum.Data.Spectrum-class.Rd
+771fd62edf01a1a1c8d18c8f8a88959d *man/RLum.Results-class.Rd
+124822d87d8653811e913ad6a95d6d9a *man/Risoe.BINfileData-class.Rd
+3e54a534f02460c0de9f8cff2834c29e *man/Risoe.BINfileData2RLum.Analysis.Rd
+f8f4446787186c76492df6472ceab4e6 *man/Second2Gray.Rd
+e3f3abaa6da3f478161ec4e350dfd5d6 *man/analyse_IRSAR.RF.Rd
+de1999d5f6a68ca98011300f51a22794 *man/analyse_SAR.CWOSL.Rd
+658a9de4dd4fcb271dfa0f8d4b0d9464 *man/analyse_SAR.TL.Rd
+42ef16376b7e252fbd03c4b1e2ea002d *man/analyse_baSAR.Rd
+c19c0d6247e8cc383d8aeae6f0717e31 *man/analyse_pIRIRSequence.Rd
+4b5ea17fff3d7a703760271ea081bdda *man/app_RLum.Rd
+efd324636ab5b62d8a7c79717d70bd53 *man/apply_CosmicRayRemoval.Rd
+def80e2dcc3ef4a85a099f529ac07a60 *man/apply_EfficiencyCorrection.Rd
+e367a6f98fcf482b0c3f56ce835ce672 *man/as.Rd
+1c5968fb3f86cfa213e2dcb90e619831 *man/bin_RLum.Data.Rd
+ada7f222b491152172466be0ad52064f *man/calc_AliquotSize.Rd
+5a7feee11b8306cec091458020c098fb *man/calc_CentralDose.Rd
+bb6b17e344490a7199b27895bd6abf3a *man/calc_CommonDose.Rd
+4d38b8beac23beae22b43e5dd5ec326c *man/calc_CosmicDoseRate.Rd
+e8bfc08f7fef6d70e17c83844d26441c *man/calc_FadingCorr.Rd
+5d02b979050b2f3a1640e5fbf7fc1105 *man/calc_FastRatio.Rd
+e007846a7d7be9464f0970b58506a6f9 *man/calc_FiniteMixture.Rd
+f1a9ff7b70f56a4b5c50193ac9d85d83 *man/calc_FuchsLang2001.Rd
+c0b16d0e2d98b7a2aa977740b849951f *man/calc_HomogeneityTest.Rd
+de7283b67e9cb3281d17b9771cc01a1c *man/calc_IEU.Rd
+2898cbc0cbba5cb6099a15ec93f90a42 *man/calc_MaxDose.Rd
+5952fd1adfcb1cc0f080a8beb5e9bc52 *man/calc_MinDose.Rd
+ce5efa7cbffacfea46f58ed5b827d8de *man/calc_OSLLxTxRatio.Rd
+5e295e0195875fa39673281c9a1ca00e *man/calc_SourceDoseRate.Rd
+109c9705a679c1d8b69cd1c3575e1c0c *man/calc_Statistics.Rd
+e3a3471405e8f5e3a007682a2afa0046 *man/calc_TLLxTxRatio.Rd
+cf2735f1e82d92b1b9b2c24de42bc03b *man/calc_ThermalLifetime.Rd
+ac0c563d4a0ccae3072c557319c0348a *man/calc_gSGC.Rd
+504d1e8cbb1b0fa2c63b52a856932934 *man/extract_IrradiationTimes.Rd
+08557bda7bba49c70c214bfdc3ed461b *man/fit_CWCurve.Rd
+f03d933d09467bd3337784902c0cdb4a *man/fit_LMCurve.Rd
+89cc198c98577c495eff803df48719dc *man/get_Layout.Rd
+31cce908e034e48c7bf919b314c2a593 *man/get_Quote.Rd
+ada62940c1f10fe005b1d457f46f012c *man/get_RLum.Rd
+8871034e7484e61454360ac349d25926 *man/get_Risoe.BINfileData.Rd
+29fb6b3419cac59f816b2096fe4d61fe *man/get_rightAnswer.Rd
+8dafe354753f27d8a2ea3a76f043be84 *man/length_RLum.Rd
+6e8ef969c68afb79324b021f0a6c5d32 *man/merge_RLum.Analysis.Rd
+37c54307c88869500dfd789d8b4f2c76 *man/merge_RLum.Data.Curve.Rd
+59bbea14359f1998c93ea0b7740e26a6 *man/merge_RLum.Rd
+d49663484d0887b6bac5e63ec68ca76a *man/merge_RLum.Results.Rd
+b1ac5a5a8cf88e909150d103fb20e156 *man/merge_Risoe.BINfileData.Rd
+02620a556fe2d098c09cac9e6f37abc1 *man/methods_RLum.Rd
+91ece2023bd21e008e68b4c34e943421 *man/model_LuminescenceSignals.Rd
+63dc8aae12ccda26f04a510daf6a5d58 *man/names_RLum.Rd
+ab0911053b849d28df131bb93cfeeeec *man/plot_AbanicoPlot.Rd
+4d389771a38651bda6c3b21cb76e8c2e *man/plot_DRTResults.Rd
+b6df2c9c5cb551bb607c56067e070c01 *man/plot_DetPlot.Rd
+4b51c3f5beec6b3f53a89afda462af75 *man/plot_FilterCombinations.Rd
+a238a35140698db5d70dc0161015397c *man/plot_GrowthCurve.Rd
+449cbfcf19feae6ba9555629616b28a5 *man/plot_Histogram.Rd
+d9a50f2d5115340e610f365318edc85d *man/plot_KDE.Rd
+76483428bb5fb7044dbe4092f9390296 *man/plot_NRt.Rd
+ef6b9a8c1a8a571d5ba0978619ca38e3 *man/plot_RLum.Analysis.Rd
+f5d46b22e8954b6f4558ee3a4a82cdd2 *man/plot_RLum.Data.Curve.Rd
+4669985f2700cfaf4464caff7bff880b *man/plot_RLum.Data.Image.Rd
+098f12925f9f1d0b4b385b312765918c *man/plot_RLum.Data.Spectrum.Rd
+ed6a34b90da65ddcb34e30891c4397a9 *man/plot_RLum.Rd
+8e93b0f6cc2acdaa54f3de39ba9f9e8f *man/plot_RLum.Results.Rd
+4189844c293b23aaa1fe22cf597f46ba *man/plot_RadialPlot.Rd
+5dd93419c7cbaa0c83a2350792b74ded *man/plot_Risoe.BINfileData.Rd
+15878fc8c6efc4a5ffe1c086ddde687d *man/plot_ViolinPlot.Rd
+2db23c413280b1ceacb92786d223221e *man/read_BIN2R.Rd
+a5b18d92de11221f18230fae56edc2bb *man/read_Daybreak2R.Rd
+41bad2f3d619a56ab3b96eaf9fa7a7d2 *man/read_SPE2R.Rd
+64b4051fba2e7e72a223e1d8f71d25d8 *man/read_XSYG2R.Rd
+34f97f9bf23ed57cb0ce2e57f21ee1fd *man/replicate_RLum.Rd
+e88598b8db0e446b1f4b6ed4d4726b71 *man/report_RLum.Rd
+6d94dde798f52744ed7b2c6441cc08c6 *man/sTeve.Rd
+9780fe682dba53efe53fe28773ad32c4 *man/set_RLum.Rd
+b442db81eda6ccf8b860893bfeeaa737 *man/set_Risoe.BINfileData.Rd
+0db898f0db30c9dd8a5ee02e59510dac *man/structure_RLum.Rd
+ced903c1ce612d683004923c8cfd5b39 *man/template_DRAC.Rd
+0bc5b9d9078e954efdad5f1d183c6aff *man/tune_Data.Rd
+263622116992566da3f0b6cc788db56e *man/use_DRAC.Rd
+51dacf72117ea39dc45bb22330439d21 *man/verify_SingleGrainData.Rd
+e9a96cd7970d3c2f447d63dbcdb4d9ff *man/write_R2BIN.Rd
+0500e163992f25bdb1746049f772fe63 *src/RcppExports.cpp
+6bacfacce37d289d8ca7fa19384c2fad *src/analyse_IRSARRF_SRS.cpp
+30434cc523b9b2c9704d7331aefd8a5f *src/create_UID.cpp
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..0cf7df3
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,244 @@
+# Generated by roxygen2: do not edit by hand
+
+S3method("$",RLum.Analysis)
+S3method("$",RLum.Data.Curve)
+S3method("$",RLum.Results)
+S3method("$<-",DRAC.list)
+S3method("*",RLum.Data.Curve)
+S3method("+",RLum.Data.Curve)
+S3method("-",RLum.Data.Curve)
+S3method("/",RLum.Data.Curve)
+S3method("[",RLum.Analysis)
+S3method("[",RLum.Data.Curve)
+S3method("[",RLum.Data.Image)
+S3method("[",RLum.Data.Spectrum)
+S3method("[",RLum.Results)
+S3method("[<-",DRAC.list)
+S3method("[[",RLum.Analysis)
+S3method("[[",RLum.Results)
+S3method("[[<-",DRAC.list)
+S3method(as.data.frame,DRAC.list)
+S3method(as.data.frame,RLum.Data.Curve)
+S3method(as.data.frame,RLum.Data.Spectrum)
+S3method(as.list,RLum.Analysis)
+S3method(as.list,RLum.Data.Curve)
+S3method(as.list,RLum.Results)
+S3method(as.matrix,RLum.Data.Curve)
+S3method(as.matrix,RLum.Data.Spectrum)
+S3method(dim,RLum.Data.Curve)
+S3method(dim,RLum.Data.Spectrum)
+S3method(hist,RLum.Analysis)
+S3method(hist,RLum.Data.Curve)
+S3method(hist,RLum.Data.Image)
+S3method(hist,RLum.Results)
+S3method(length,RLum.Analysis)
+S3method(length,RLum.Data.Curve)
+S3method(length,RLum.Results)
+S3method(length,Risoe.BINfileData)
+S3method(merge,RLum)
+S3method(names,RLum.Analysis)
+S3method(names,RLum.Data.Curve)
+S3method(names,RLum.Data.Image)
+S3method(names,RLum.Data.Spectrum)
+S3method(names,RLum.Results)
+S3method(names,Risoe.BINfileData)
+S3method(plot,RLum.Analysis)
+S3method(plot,RLum.Data.Curve)
+S3method(plot,RLum.Data.Image)
+S3method(plot,RLum.Data.Spectrum)
+S3method(plot,RLum.Results)
+S3method(plot,Risoe.BINfileData)
+S3method(plot,list)
+S3method(print,DRAC.highlights)
+S3method(print,DRAC.list)
+S3method(rep,RLum)
+S3method(row.names,RLum.Data.Spectrum)
+S3method(subset,Risoe.BINfileData)
+S3method(summary,RLum.Analysis)
+S3method(summary,RLum.Data.Curve)
+S3method(summary,RLum.Data.Image)
+S3method(summary,RLum.Results)
+S3method(unlist,RLum.Analysis)
+export(Analyse_SAR.OSLdata)
+export(CW2pHMi)
+export(CW2pLM)
+export(CW2pLMi)
+export(CW2pPMi)
+export(Risoe.BINfileData2RLum.Analysis)
+export(Second2Gray)
+export(analyse_IRSAR.RF)
+export(analyse_SAR.CWOSL)
+export(analyse_SAR.TL)
+export(analyse_baSAR)
+export(analyse_pIRIRSequence)
+export(app_RLum)
+export(apply_CosmicRayRemoval)
+export(apply_EfficiencyCorrection)
+export(bin.RLum.Data.Curve)
+export(bin_RLum.Data)
+export(calc_AliquotSize)
+export(calc_CentralDose)
+export(calc_CommonDose)
+export(calc_CosmicDoseRate)
+export(calc_FadingCorr)
+export(calc_FastRatio)
+export(calc_FiniteMixture)
+export(calc_FuchsLang2001)
+export(calc_HomogeneityTest)
+export(calc_IEU)
+export(calc_MaxDose)
+export(calc_MinDose)
+export(calc_OSLLxTxRatio)
+export(calc_SourceDoseRate)
+export(calc_Statistics)
+export(calc_TLLxTxRatio)
+export(calc_ThermalLifetime)
+export(calc_gSGC)
+export(extract_IrradiationTimes)
+export(fit_CWCurve)
+export(fit_LMCurve)
+export(get_Layout)
+export(get_Quote)
+export(get_RLum)
+export(get_Risoe.BINfileData)
+export(get_rightAnswer)
+export(is.RLum)
+export(is.RLum.Analysis)
+export(is.RLum.Data)
+export(is.RLum.Data.Curve)
+export(is.RLum.Data.Image)
+export(is.RLum.Data.Spectrum)
+export(is.RLum.Results)
+export(length_RLum)
+export(merge_RLum)
+export(merge_RLum.Analysis)
+export(merge_RLum.Data.Curve)
+export(merge_RLum.Results)
+export(merge_Risoe.BINfileData)
+export(model_LuminescenceSignals)
+export(names_RLum)
+export(plot_AbanicoPlot)
+export(plot_DRTResults)
+export(plot_DetPlot)
+export(plot_FilterCombinations)
+export(plot_GrowthCurve)
+export(plot_Histogram)
+export(plot_KDE)
+export(plot_NRt)
+export(plot_RLum)
+export(plot_RLum.Analysis)
+export(plot_RLum.Data.Curve)
+export(plot_RLum.Data.Image)
+export(plot_RLum.Data.Spectrum)
+export(plot_RLum.Results)
+export(plot_RadialPlot)
+export(plot_Risoe.BINfileData)
+export(plot_ViolinPlot)
+export(read_BIN2R)
+export(read_Daybreak2R)
+export(read_SPE2R)
+export(read_XSYG2R)
+export(replicate_RLum)
+export(report_RLum)
+export(sTeve)
+export(set_RLum)
+export(set_Risoe.BINfileData)
+export(structure_RLum)
+export(template_DRAC)
+export(tune_Data)
+export(use_DRAC)
+export(verify_SingleGrainData)
+export(write_R2BIN)
+exportClasses(RLum)
+exportClasses(RLum.Analysis)
+exportClasses(RLum.Data)
+exportClasses(RLum.Data.Curve)
+exportClasses(RLum.Data.Image)
+exportClasses(RLum.Data.Spectrum)
+exportClasses(RLum.Results)
+exportClasses(Risoe.BINfileData)
+exportMethods(bin_RLum.Data)
+exportMethods(get_RLum)
+exportMethods(get_Risoe.BINfileData)
+exportMethods(length_RLum)
+exportMethods(names_RLum)
+exportMethods(replicate_RLum)
+exportMethods(set_RLum)
+exportMethods(set_Risoe.BINfileData)
+exportMethods(show)
+exportMethods(structure_RLum)
+import(bbmle)
+import(data.table)
+import(methods)
+import(utils)
+importClassesFrom(raster,RasterBrick)
+importFrom(Rcpp,evalCpp)
+importFrom(grDevices,adjustcolor)
+importFrom(grDevices,axisTicks)
+importFrom(grDevices,colorRampPalette)
+importFrom(grDevices,dev.off)
+importFrom(grDevices,gray.colors)
+importFrom(grDevices,rgb)
+importFrom(grDevices,topo.colors)
+importFrom(graphics,abline)
+importFrom(graphics,arrows)
+importFrom(graphics,axTicks)
+importFrom(graphics,axis)
+importFrom(graphics,barplot)
+importFrom(graphics,box)
+importFrom(graphics,boxplot)
+importFrom(graphics,contour)
+importFrom(graphics,curve)
+importFrom(graphics,frame)
+importFrom(graphics,grconvertX)
+importFrom(graphics,grconvertY)
+importFrom(graphics,grid)
+importFrom(graphics,hist)
+importFrom(graphics,layout)
+importFrom(graphics,legend)
+importFrom(graphics,lines)
+importFrom(graphics,mtext)
+importFrom(graphics,par)
+importFrom(graphics,persp)
+importFrom(graphics,plot.default)
+importFrom(graphics,points)
+importFrom(graphics,polygon)
+importFrom(graphics,rug)
+importFrom(graphics,segments)
+importFrom(graphics,text)
+importFrom(graphics,title)
+importFrom(parallel,makeCluster)
+importFrom(parallel,parLapply)
+importFrom(parallel,stopCluster)
+importFrom(raster,brick)
+importFrom(raster,contour)
+importFrom(raster,nlayers)
+importFrom(raster,plotRGB)
+importFrom(raster,raster)
+importFrom(stats,approx)
+importFrom(stats,as.formula)
+importFrom(stats,complete.cases)
+importFrom(stats,density)
+importFrom(stats,dnorm)
+importFrom(stats,glm)
+importFrom(stats,lm)
+importFrom(stats,median)
+importFrom(stats,na.exclude)
+importFrom(stats,na.omit)
+importFrom(stats,nls)
+importFrom(stats,nls.control)
+importFrom(stats,pchisq)
+importFrom(stats,pnorm)
+importFrom(stats,quantile)
+importFrom(stats,rnorm)
+importFrom(stats,runif)
+importFrom(stats,sd)
+importFrom(stats,setNames)
+importFrom(stats,smooth)
+importFrom(stats,smooth.spline)
+importFrom(stats,spline)
+importFrom(stats,t.test)
+importFrom(stats,uniroot)
+importFrom(stats,var)
+importFrom(stats,weighted.mean)
+useDynLib(Luminescence)
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..5ac1963
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,22 @@
+NEWS for the R Package Luminescence
+
+Changes in version 0.6.4 (9th September 2016):
+
+  Bugfixes and changes:
+
+         • ‘analyse_baSAR()’
+
+             • Fix problem that causes a function crash if an XLS-file
+               was provided as input for the grain selection.
+
+         • ‘analyse_pIRIRSequence()’
+
+             • Account for a minor layout problem while plotting the
+               combined growth curve (y-axis scaling was not
+               sufficient)
+
+         • ‘plot_AbanicoPlot()’
+
+             • The relative and absolute standard deviation were mixed
+               up in in the summary; fixed.
+
diff --git a/R/Analyse_SAR.OSLdata.R b/R/Analyse_SAR.OSLdata.R
new file mode 100644
index 0000000..6b61463
--- /dev/null
+++ b/R/Analyse_SAR.OSLdata.R
@@ -0,0 +1,652 @@
+#' Analyse SAR CW-OSL measurements.
+#'
+#' The function analyses SAR CW-OSL curve data and provides a summary of the
+#' measured data for every position. The output of the function is optimised
+#' for SAR OSL measurements on quartz.
+#'
+#' The function works only for standard SAR protocol measurements introduced by
+#' Murray and Wintle (2000) with CW-OSL curves. For the calculation of the
+#' Lx/Tx value the function \link{calc_OSLLxTxRatio} is used. \cr\cr
+#'
+#' \bold{Provided rejection criteria}\cr\cr \sQuote{recyling ratio}: calculated
+#' for every repeated regeneration dose point.\cr \sQuote{recuperation}:
+#' recuperation rate calculated by comparing the Lx/Tx values of the zero
+#' regeneration point with the Ln/Tn value (the Lx/Tx ratio of the natural
+#' signal). For methodological background see Aitken and Smith (1988)\cr
+#'
+#' \sQuote{IRSL/BOSL}: the integrated counts (\code{signal.integral}) of an
+#' IRSL curve are compared to the integrated counts of the first regenerated
+#' dose point. It is assumed that IRSL curves got the same dose as the first
+#' regenerated dose point. \strong{Note:} This is not the IR depletation ratio
+#' described by Duller (2003).
+#'
+#' @param input.data \link{Risoe.BINfileData-class} (\bold{required}): input
+#' data from a Risoe BIN file, produced by the function \link{read_BIN2R}.
+#'
+#' @param signal.integral \link{vector} (\bold{required}): channels used for
+#' the signal integral, e.g. \code{signal.integral=c(1:2)}
+#'
+#' @param background.integral \link{vector} (\bold{required}): channels used
+#' for the background integral, e.g. \code{background.integral=c(85:100)}
+#'
+#' @param position \link{vector} (optional): reader positions that want to be
+#' analysed (e.g. \code{position=c(1:48)}. Empty positions are automatically
+#' omitted. If no value is given all positions are analysed by default.
+#'
+#' @param run \link{vector} (optional): range of runs used for the analysis. If
+#' no value is given the range of the runs in the sequence is deduced from the
+#' Risoe.BINfileData object.
+#'
+#' @param set \link{vector} (optional): range of sets used for the analysis. If
+#' no value is given the range of the sets in the sequence is deduced from the
+#' \code{Risoe.BINfileData} object.
+#'
+#' @param dtype \code{\link{character}} (optional): allows to further limit the
+#' curves by their data type (\code{DTYPE}), e.g., \code{dtype = c("Natural",
+#' "Dose")} limits the curves to this two data types. By default all values are
+#' allowed. See \link{Risoe.BINfileData-class} for allowed data types.
+#'
+#' @param keep.SEL \code{\link{logical}} (default): option allowing to use the
+#' \code{SEL} element of the \link{Risoe.BINfileData-class} manually. NOTE: In
+#' this case any limitation provided by \code{run}, \code{set} and \code{dtype}
+#' are ignored!
+#'
+#' @param info.measurement \link{character} (with default): option to provide
+#' information about the measurement on the plot output (e.g. name of the BIN
+#' or BINX file).
+#'
+#' @param output.plot \link{logical} (with default): plot output
+#' (\code{TRUE/FALSE})
+#'
+#' @param output.plot.single \link{logical} (with default): single plot output
+#' (\code{TRUE/FALSE}) to allow for plotting the results in single plot
+#' windows. Requires \code{output.plot = TRUE}.
+#'
+#' @param cex.global \link{numeric} (with default): global scaling factor.
+#'
+#' @param \dots further arguments that will be passed to the function
+#' \code{\link{calc_OSLLxTxRatio}} (supported: \code{background.count.distribution}, \code{sigmab},
+#' \code{sig0}; e.g., for instrumental error)
+#' and can be used to adjust the plot. Supported" \code{mtext}, \code{log}
+#'
+#' @return A plot (optional) and \link{list} is returned containing the
+#' following elements: \item{LnLxTnTx}{\link{data.frame} of all calculated
+#' Lx/Tx values including signal, background counts and the dose points.}
+#' \item{RejectionCriteria}{\link{data.frame} with values that might by used as
+#' rejection criteria. NA is produced if no R0 dose point exists.}
+#' \item{SARParameters}{\link{data.frame} of additional measurement parameters
+#' obtained from the BIN file, e.g. preheat or read temperature (not valid for
+#' all types of measurements).}
+#'
+#'
+#' @note Rejection criteria are calculated but not considered during the
+#' analysis to discard values.\cr\cr
+#'
+#' \bold{The analysis of IRSL data is not directly supported}. You may want to
+#' consider using the functions \code{\link{analyse_SAR.CWOSL}} or
+#' \code{\link{analyse_pIRIRSequence}} instead.\cr
+#'
+#' \bold{The development of this function will not be continued. We recommend
+#' to use the function \link{analyse_SAR.CWOSL} or instead.}
+#'
+#'
+#' @section Function version: 0.2.17
+#'
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France), Margret C. Fuchs, HZDR, Freiberg (Germany)
+#' @seealso \link{calc_OSLLxTxRatio}, \link{Risoe.BINfileData-class},
+#' \link{read_BIN2R}
+#'
+#' and for further analysis \link{plot_GrowthCurve}
+#'
+#' @references Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
+#' after bleaching. Quaternary Science Reviews 7, 387-393.
+#'
+#' Duller, G., 2003. Distinguishing quartz and feldspar in single grain
+#' luminescence measurements. Radiation Measurements, 37 (2), 161-165.
+#'
+#' Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an
+#' improved single-aliquot regenerative-dose protocol. Radiation Measurements
+#' 32, 57-73.
+#' @keywords datagen dplot
+#'
+#' @examples
+#'
+#'
+#' ##load data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##analyse data
+#' output <- Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data,
+#'                               signal.integral = c(1:5),
+#'                               background.integral = c(900:1000),
+#'                               position = c(1:1),
+#'                               output.plot = TRUE)
+#'
+#' ##combine results relevant for further analysis
+#' output.SAR <- data.frame(Dose = output$LnLxTnTx[[1]]$Dose,
+#'                          LxTx = output$LnLxTnTx[[1]]$LxTx,
+#'                          LxTx.Error = output$LnLxTnTx[[1]]$LxTx.Error)
+#' output.SAR
+#'
+#' @export
+Analyse_SAR.OSLdata <- function(
+  input.data,
+  signal.integral,
+  background.integral,
+  position,
+  run,
+  set,
+  dtype,
+  keep.SEL = FALSE,
+  info.measurement = "unkown measurement",
+  output.plot = FALSE,
+  output.plot.single = FALSE,
+  cex.global = 1,
+  ...
+){
+
+  ##============================================================================##
+  ##CONFIG
+  ##============================================================================##
+
+  ##set colors gallery to provide more colors
+
+  col <- get("col", pos = .LuminescenceEnv)
+
+  ##============================================================================##
+  ##ERROR HANDLING
+  ##============================================================================##
+
+  if(missing(input.data)==TRUE){stop("[Analyse_SAR.OSLdata] No input data given!")
+  }else{sample.data<-input.data}
+
+  if(missing(signal.integral)==TRUE){stop("[Analyse_SAR.OSLdata] No signal integral is given!")}
+  if(missing(background.integral)==TRUE){stop("[Analyse_SAR.OSLdata] No background integral is given!")}
+
+  ##set values for run and set if they are not defined by the user
+  if(missing(position)==TRUE){position<-min(sample.data at METADATA[,"POSITION"]):max(sample.data at METADATA[,"POSITION"])}
+
+  if(missing(run)==TRUE){run<-min(sample.data at METADATA[,"RUN"]):max(sample.data at METADATA[,"RUN"])}
+
+  if(missing(set)==TRUE){set<-min(sample.data at METADATA[,"SET"]):max(sample.data at METADATA[,"SET"])}
+
+  if(missing(dtype)){dtype <- c("Natural",
+                                "N+dose",
+                                "Bleach",
+                                "Bleach+dose",
+                                "Natural (Bleach)",
+                                "N+dose (Bleach)",
+                                "Dose",
+                                "Background")}
+
+
+  # Deal with extra arguments ----------------------------------------------------
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  background.count.distribution <-
+    if ("background.count.distribution" %in% names(extraArgs)) {
+      extraArgs$background.count.distribution
+    } else
+    {
+      "non-poisson"
+    }
+
+  sigmab <- if("sigmab" %in% names(extraArgs)) {extraArgs$sigmab} else
+  {NULL}
+
+  ##============================================================================##
+  ##CALCULATIONS
+  ##============================================================================##
+
+
+  ##loop over all positions
+  for (i in position){
+
+    ##checking if position is valid
+    if(length(which(sample.data at METADATA["POSITION"]==i))>0){
+
+      ##check if OSL curves are part of the data set
+      if(nrow(sample.data at METADATA[sample.data at METADATA[,"LTYPE"]=="OSL",]) == 0){
+
+        stop("[Analyse_SAR.OSLdata()] No 'OSL' curves found!")
+
+      }
+
+      if(!keep.SEL){
+        ##select all OSL data depending on the run and set
+        sample.data at METADATA[,"SEL"]<-FALSE
+        sample.data at METADATA[sample.data at METADATA[,"LTYPE"]=="OSL" &
+                               sample.data at METADATA[,"RUN"]%in%run==TRUE &
+                               sample.data at METADATA[,"SET"]%in%set==TRUE &
+                               sample.data at METADATA[,"DTYPE"]%in%dtype==TRUE, "SEL"] <- TRUE
+      }
+
+      ##grep all OSL curve IDs
+      OSL.curveID<-sample.data at METADATA[sample.data at METADATA["SEL"]==TRUE &
+                                          sample.data at METADATA["POSITION"]==i,"ID"]
+
+      ##estimate LnLx.curveID and TnTx.curveID from records
+      LnLx.curveID<-OSL.curveID[seq(1,length(OSL.curveID),by=2)]
+      TnTx.curveID<-OSL.curveID[seq(2,length(OSL.curveID),by=2)]
+
+
+      ##Provide Values For Growth Curve Fitting
+
+      ##(1) get dose information
+      Dose<-sapply(1:length(LnLx.curveID),function(x){
+        Dose<-sample.data at METADATA[sample.data at METADATA["ID"]==LnLx.curveID[x],"IRR_TIME"]
+      })
+
+      ##(2) set LxTx curves
+      LnLxTnTx.curves<-(sapply(1:length(LnLx.curveID),function(x){
+
+        ##produce data.frames for Lx/Tx calculations
+        Lx.HIGH<-sample.data at METADATA[sample.data at METADATA[,"ID"]==LnLx.curveID[x],"HIGH"]
+        Lx.NPOINTS<-sample.data at METADATA[sample.data at METADATA[,"ID"]==LnLx.curveID[x],"NPOINTS"]
+        Tx.HIGH<-sample.data at METADATA[sample.data at METADATA[,"ID"]==TnTx.curveID[x],"HIGH"]
+        Tx.NPOINTS<-sample.data at METADATA[sample.data at METADATA[,"ID"]==TnTx.curveID[x],"NPOINTS"]
+
+        Lx.curve<-data.frame(x=seq(Lx.HIGH/Lx.NPOINTS,Lx.HIGH,by=Lx.HIGH/Lx.NPOINTS),
+                             y=unlist(sample.data at DATA[LnLx.curveID[x]]))
+        Tx.curve<-data.frame(x=seq(Tx.HIGH/Tx.NPOINTS,Tx.HIGH,by=Tx.HIGH/Tx.NPOINTS),
+                             y=unlist(sample.data at DATA[TnTx.curveID[x]]))
+
+        return(list(Lx.curve,Tx.curve))
+      }))
+
+      ##(3) calculate Lx/Tx ratio
+      LnLxTnTx <- get_RLum(
+        merge_RLum(lapply(1:length(LnLxTnTx.curves[1, ]), function(k) {
+          calc_OSLLxTxRatio(
+            Lx.data = as.data.frame(LnLxTnTx.curves[1, k]),
+            Tx.data = as.data.frame(LnLxTnTx.curves[2, k]),
+            signal.integral = signal.integral,
+            background.integral = background.integral,
+            background.count.distribution = background.count.distribution,
+            sigmab = sigmab
+          )
+        })))
+
+
+      ##finally combine to data.frame including the record ID for further analysis
+      LnLxTnTx <- cbind(LnLxTnTx,LnLx.curveID,TnTx.curveID)
+
+      ##(4.1) set info concerning the kind of regeneration points
+
+      ##generate unique dose id - this are also the # for the generated points
+      temp.DoseID<-c(0:(length(Dose)-1))
+      temp.DoseName<-paste("R",temp.DoseID,sep="")
+      temp.DoseName<-cbind(Name=temp.DoseName,Dose)
+
+      ##set natural
+      temp.DoseName[temp.DoseName[,"Name"]=="R0","Name"]<-"Natural"
+
+      ##set R0
+      temp.DoseName[temp.DoseName[,"Name"]!="Natural" & temp.DoseName[,"Dose"]==0,"Name"]<-"R0"
+
+      ##find duplicated doses (including 0 dose - which means the Natural)
+      temp.DoseDuplicated<-duplicated(temp.DoseName[,"Dose"])
+
+      ##combine temp.DoseName
+      temp.DoseName<-cbind(temp.DoseName,Repeated=temp.DoseDuplicated)
+
+      ##correct value for R0 (it is not really repeated)
+      temp.DoseName[temp.DoseName[,"Dose"]==0,"Repeated"]<-FALSE
+
+
+      ##(5) Combine all values in a data.frame
+      temp.LnLxTnTx<-data.frame(Name=temp.DoseName[,"Name"],
+                                Dose=Dose,
+                                Repeated=as.logical(temp.DoseName[,"Repeated"]))
+      LnLxTnTx<-cbind(temp.LnLxTnTx,LnLxTnTx)
+      LnLxTnTx[,"Name"]<-as.character(LnLxTnTx[,"Name"])
+
+      ##(6) Calculate Recyling Ratio and Recuperation Rate
+
+      ##(6.1)
+      ##Calculate Recycling Ratio
+
+      if(length(LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,"Repeated"])>0){
+
+        ##identify repeated doses
+        temp.Repeated<-LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,c("Name","Dose","LxTx")]
+
+        ##find corresponding previous dose for the repeated dose
+        temp.Previous<-t(sapply(1:length(temp.Repeated[,1]),function(x){
+          LnLxTnTx[LnLxTnTx[,"Dose"]==temp.Repeated[x,"Dose"] &
+                     LnLxTnTx[,"Repeated"]==FALSE,c("Name","Dose","LxTx")]
+        }))
+
+        ##convert to data.frame
+        temp.Previous<-as.data.frame(temp.Previous)
+
+        ##set column names
+        temp.ColNames<-sapply(1:length(temp.Repeated[,1]),function(x){
+          paste(temp.Repeated[x,"Name"],"/",
+                temp.Previous[temp.Previous[,"Dose"]==temp.Repeated[x,"Dose"],"Name"]
+                ,sep="")
+        })
+
+        ##Calculate Recycling Ratio
+        RecyclingRatio<-as.numeric(temp.Repeated[,"LxTx"])/as.numeric(temp.Previous[,"LxTx"])
+
+        ##Just transform the matrix and add column names
+        RecyclingRatio<-t(RecyclingRatio)
+        colnames(RecyclingRatio) <- unique(temp.ColNames)
+
+      }else{RecyclingRatio<-NA}
+
+      ##(6.2)
+      ##Recuperation Rate
+
+      if("R0" %in% LnLxTnTx[,"Name"]==TRUE){
+        Recuperation<-round(LnLxTnTx[LnLxTnTx[,"Name"]=="R0","LxTx"]/LnLxTnTx[LnLxTnTx[,"Name"]=="Natural","LxTx"],digits=4)
+      }else{Recuperation<-NA}
+
+
+      ##(6.3) IRSL
+      ##Print IRSL Curves if IRSL curve is set
+      sample.data at METADATA[,"SEL"]<-FALSE
+      sample.data at METADATA[sample.data at METADATA["LTYPE"]=="IRSL" &
+                             sample.data at METADATA[,"RUN"]%in%run==TRUE &
+                             sample.data at METADATA[,"SET"]%in%set==TRUE,"SEL"]<-TRUE
+
+
+      ##get IRSL curve ID & ID for Reg1 again
+      IRSL.curveID<-sample.data at METADATA[sample.data at METADATA["SEL"]==TRUE & sample.data at METADATA["POSITION"]==i,"ID"]
+
+      ##if no IRSL curve the length of the object is 0
+      if(length(IRSL.curveID)>0){
+
+        ##chose an IRSL curve with a dose of the first regeneration point
+        Reg1again.curveID<-LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE & LnLxTnTx[,"Dose"]==LnLxTnTx[2,"Dose"],"LnLx.curveID"]
+
+        if(length(Reg1again.curveID)>0){
+
+          ##BOSL/IRSL
+          IRSL_BOSL<-round(sum(unlist(sample.data at DATA[IRSL.curveID])[signal.integral])
+                           /sum(unlist(sample.data at DATA[Reg1again.curveID])[signal.integral]),digits=4)
+        }else{IRSL_BOSL<-NA}
+      }else{IRSL_BOSL<-NA}
+
+      ##Combine the two values
+      if(exists("RejectionCriteria")==FALSE){
+        RejectionCriteria<-cbind(RecyclingRatio,Recuperation,IRSL_BOSL)
+      }else{
+        RejectionCriteria.temp<-cbind(RecyclingRatio,Recuperation,IRSL_BOSL)
+        RejectionCriteria<-rbind(RejectionCriteria,RejectionCriteria.temp)
+      }
+
+      ##============================================================================##
+      ##PLOTTING
+      ##============================================================================##
+
+      if(output.plot){
+
+        ##set plot settings
+        plot.settings <- list(
+          mtext = sample.data at METADATA[sample.data at METADATA[,"ID"]==LnLx.curveID[1],"SAMPLE"],
+          log = ""
+
+        )
+
+        ##modify arguments
+        plot.settings <- modifyList(plot.settings, list(...))
+
+
+
+        if(output.plot.single==FALSE){
+          layout(matrix(c(1,2,1,2,3,4,3,5),4,2,byrow=TRUE))
+        }
+        ##warning if number of curves exceed colour values
+        if(length(col)<length(LnLx.curveID)){
+          cat("\n[Analyse_SAR.OSLdata()] Warning: To many curves! Only the first",
+              length(col),"curves are plotted!")
+        }
+
+        ##==========================================================================
+        ##plot Ln,Lx Curves
+
+
+        ##get maximum value of LnLx curves
+        LnLx.curveMax<-max(unlist(sample.data at DATA[LnLx.curveID]))
+
+        ##get channel resolution (it should be the same for all values)
+        HIGH<-sample.data at METADATA[sample.data at METADATA[,"ID"]==LnLx.curveID[1],"HIGH"]
+        NPOINTS<-sample.data at METADATA[sample.data at METADATA[,"ID"]==LnLx.curveID[1],"NPOINTS"]
+
+        xaxt.values<-seq(HIGH/NPOINTS,HIGH,by=HIGH/NPOINTS)
+
+        ##open plot area LnLx
+        plot(NA,NA,
+             xlab="Time [s]",
+             ylab=paste("OSL [cts/",HIGH/NPOINTS," s]",sep=""),
+             xlim=c(HIGH/NPOINTS,HIGH),
+             ylim=c(1,max(unlist(sample.data at DATA[LnLx.curveID]))),
+             main=expression(paste(L[n],",",L[x]," curves",sep="")),
+             log=plot.settings$log
+        )
+        ##plot curves and get legend values
+        sapply(1:length(LnLx.curveID),function(x){
+          yaxt.values<-unlist(sample.data at DATA[LnLx.curveID[x]])
+          lines(xaxt.values,yaxt.values,col=col[x])
+        })
+
+        ##mark integration limits
+        abline(v=xaxt.values[min(signal.integral)], lty=2, col="gray")
+        abline(v=xaxt.values[max(signal.integral)], lty=2, col="gray")
+        abline(v=xaxt.values[min(background.integral)], lty=2, col="gray")
+        abline(v=xaxt.values[max(background.integral)], lty=2, col="gray")
+
+
+        ##plot legend
+        legend("topright",as.character(LnLxTnTx$Name),lty=c(rep(1,length(LnLx.curveID))),
+               cex=0.8*cex.global,col=col, bg="gray")
+
+        ##sample name
+        mtext(side=3,plot.settings$mtext,cex=0.7*cex.global)
+
+        ##========================================================================
+        ##open plot area TnTx
+        plot(NA,NA,
+             xlab="Time [s]",
+             ylab=paste("OSL [cts/",HIGH/NPOINTS," s]",sep=""),
+             xlim=c(HIGH/NPOINTS,HIGH),
+             ylim=c(1,max(unlist(sample.data at DATA[TnTx.curveID]))),
+             main=expression(paste(T[n],",",T[x]," curves",sep="")),
+             log=plot.settings$log
+        )
+        ##plot curves and get legend values
+        sapply(1:length(TnTx.curveID),function(x){
+          yaxt.values<-unlist(sample.data at DATA[TnTx.curveID[x]])
+          lines(xaxt.values,yaxt.values,col=col[x])
+        })
+
+        ##mark integration limits
+        abline(v=xaxt.values[min(signal.integral)], lty=2, col="gray")
+        abline(v=xaxt.values[max(signal.integral)], lty=2, col="gray")
+        abline(v=xaxt.values[min(background.integral)], lty=2, col="gray")
+        abline(v=xaxt.values[max(background.integral)], lty=2, col="gray")
+
+
+        ##plot legend
+        legend("topright",as.character(LnLxTnTx$Name),lty=c(rep(1,length(TnTx.curveID))),
+               cex=0.8*cex.global,col=col, bg="gray")
+
+        ##sample name
+        mtext(side=3,plot.settings$mtext,cex=0.7*cex.global)
+
+        ##========================================================================
+        ##Print TL curves for TnTx -
+        sample.data at METADATA[,"SEL"]<-FALSE
+        sample.data at METADATA[sample.data at METADATA["LTYPE"]=="TL","SEL"]<-TRUE
+
+        ##check if TL any curves is measured within the sequence
+        if(length(sample.data at METADATA[sample.data at METADATA[,"SEL"]==TRUE,1])>0){
+
+
+          ##to ensure that the right TL curves are used the run and set number of the LnLx and TnTx curves are used
+          LnLx.SET<-sapply(LnLx.curveID,function(x){sample.data at METADATA[sample.data at METADATA["ID"]==x,"SET"]})
+          LnLx.RUN<-sapply(LnLx.curveID,function(x){sample.data at METADATA[sample.data at METADATA["ID"]==x,"RUN"]})
+          TnTx.SET<-sapply(TnTx.curveID,function(x){sample.data at METADATA[sample.data at METADATA["ID"]==x,"SET"]})
+          TnTx.RUN<-sapply(TnTx.curveID,function(x){sample.data at METADATA[sample.data at METADATA["ID"]==x,"RUN"]})
+
+          ##get TL curve IDs in general considering the constraints
+          TL.curveID<-sapply(1:length(TnTx.curveID),function(x){results<-
+                                                                  sample.data at METADATA[sample.data at METADATA["SEL"]==TRUE & sample.data at METADATA["POSITION"]==i &
+                                                                                         sample.data at METADATA["SET"]>=LnLx.SET[x] & sample.data at METADATA["RUN"]>=LnLx.RUN[x] &
+                                                                                         sample.data at METADATA["SET"]<=TnTx.SET[x] & sample.data at METADATA["RUN"]<=TnTx.RUN[x],"ID"]})
+
+          ##get maximum value of  TL curves
+          TL.curveMax<-max(unlist(sample.data at DATA[TL.curveID]))
+
+          ##get channel resolution (it should be the same for all values)
+          HIGH<-unique(sample.data at METADATA[sample.data at METADATA["ID"]==TL.curveID[1],"HIGH"])
+          NPOINTS<-unique(sample.data at METADATA[sample.data at METADATA["ID"]==TL.curveID[1],"NPOINTS"])
+          xaxt.values<-seq(HIGH/NPOINTS,HIGH,by=HIGH/NPOINTS)
+
+          ##get heating rate
+          RATE<-unique(sample.data at METADATA[sample.data at METADATA["ID"]==TL.curveID[1],"RATE"])
+
+          ##open plot area for TL curves
+          plot(NA,NA,
+               xlab="T [\u00B0C]",
+               ylab=paste("TL [cts/",HIGH/NPOINTS," \u00B0C]",sep=""),
+               xlim=c(HIGH/NPOINTS,HIGH),
+               ylim=c(1,TL.curveMax),
+               main="Cutheat - TL curves",
+               sub=paste("(",RATE," K/s)",sep=""),
+               log=if(plot.settings$log=="y" | plot.settings$log=="xy"){"y"}else{""}
+          )
+
+          ##plot curves and get legend values
+          sapply(1:length(TL.curveID),function(x){
+            yaxt.values<-unlist(sample.data at DATA[TL.curveID[x]])
+            lines(xaxt.values,yaxt.values,col=col[x])
+          })
+
+          ##plot legend
+          legend("topleft",as.character(LnLxTnTx$Name),lty=c(rep(1,length(TL.curveID))),
+                 cex=0.8*cex.global,col=col, bg="white", bty="n")
+
+          ##sample name
+          mtext(side=3,plot.settings$mtext,cex=0.7*cex.global)
+
+
+        }else{
+          plot(NA,NA,xlim=c(0,100),ylim=c(0,100), main="Cutheat - TL curves")
+          text(50,50,"no cutheat as TL curve detected")
+        }
+
+        ##======================================================================##
+        ##Print IRSL Curves if IRSL curve is set
+
+        if(is.na(IRSL_BOSL) == FALSE){
+          ##get channel resolution (it should be the same for all values)
+          HIGH<-unique(sample.data at METADATA[sample.data at METADATA["ID"]==IRSL.curveID ,"HIGH"])
+          NPOINTS<-unique(sample.data at METADATA[sample.data at METADATA["ID"]==IRSL.curveID ,"NPOINTS"])
+          xaxt.values<-seq(HIGH/NPOINTS,HIGH,by=HIGH/NPOINTS)
+
+          ##open plot IRSL curve
+          plot(NA,NA,
+               xlab="Time [s]",
+               ylab=paste("OSL and IRSL [cts/",HIGH/NPOINTS," s]",sep=""),
+               xlim=c(0,HIGH),
+               ylim=c(0,max(unlist(sample.data at DATA[Reg1again.curveID]))),
+               main="IRSLT"
+          )
+
+          ##show integral limits
+          abline(v=xaxt.values[min(signal.integral)], lty=2, col="gray")
+          abline(v=xaxt.values[max(signal.integral)], lty=2, col="gray")
+
+          ##print(length(sample.data at DATA[IRSL.curveID]))
+          lines(xaxt.values,unlist(sample.data at DATA[IRSL.curveID]),col="red")
+          lines(xaxt.values,unlist(sample.data at DATA[Reg1again.curveID[1]]),col="blue")
+
+          ##legend
+          legend("topright",c("R1 again","IRSL"),lty=c(1,1),col=c("blue","red"), bty="n")
+
+
+          mtext(side=3,paste("IRSL/BOSL = ",IRSL_BOSL*100,"%",sep=""),
+                cex=.8*cex.global
+          )
+        }
+
+        if(((is.na(IRSL_BOSL)==TRUE) & length(IRSL.curveID)>0) |
+             ((is.na(IRSL_BOSL)==FALSE) & length(IRSL.curveID)>0)){
+
+          ##plot only IRSL curve
+          plot(xaxt.values,unlist(sample.data at DATA[IRSL.curveID]),
+               xlab="Time [s]",
+               ylab=paste("IRSL [cts/",HIGH/NPOINTS," s]",sep=""),
+               xlim=c(0,10),
+               ylim=c(0,max(unlist(sample.data at DATA[IRSL.curveID]))),
+               main="IRSL curve (10 s)",
+               col="red",
+               type="l"
+          )
+        }else{
+          plot(NA,NA,xlim=c(0,10), ylim=c(0,10), main="IRSL curve")
+          text(5,5,"no IRSL curve detected")
+        }
+        ##=========================================================================
+        ##Plot header
+        if(output.plot.single==TRUE){
+          mtext(side=3,paste("ALQ Pos. ",i,sep=""))
+        }else{
+          mtext(side=3,paste("ALQ Pos. ",i,sep=""),outer=TRUE,line=-2.5)
+        }
+
+        ##Plot footer
+        mtext(side=4,info.measurement,outer=TRUE,line=-1.5,cex=0.6*cex.global, col="blue")
+
+        ##output on terminal for plot
+        writeLines(paste("\n[Analyse_SAR.OSLdata()] >> Figure for position ",i," produced.",sep=""))
+
+        ##reset mfrow
+        par(mfrow=c(1,1))
+
+
+
+      }#endif for output.plot
+      ##preprate output of values
+      ##==============================================================================
+
+      ##Add LnLxTnTx values to the list
+      if(exists("LnLxTnTx_List")==FALSE){LnLxTnTx_List<-list()}
+      LnLxTnTx_List[[i]]<-LnLxTnTx
+      rm(LnLxTnTx)
+
+
+    }else{writeLines(paste("[Analyse_SAR.OSLdata()] >> Position ",i," is not valid and has been omitted!",sep=""))} #end if position checking
+
+  }#end for loop
+
+  ##============================================================================##
+  ##OUTPUT OF FUNCTION
+  ##============================================================================##
+
+  ##get further information from the position used
+
+  ##this is what you get from the Risoe file
+  readTemp<-unique(sample.data at METADATA[sample.data at METADATA[,"POSITION"]==min(position) & sample.data at METADATA[,"LTYPE"]!="TL","TEMPERATURE"])
+
+  cutheat<-unique(sample.data at METADATA[sample.data at METADATA[,"POSITION"]==min(position)  &
+                                         sample.data at METADATA[,"LTYPE"]=="TL","HIGH"])
+  if(length(cutheat)==0){cutheat=NA}
+
+  systemID<-unique(sample.data at METADATA[sample.data at METADATA[,"POSITION"]==min(position),"SYSTEMID"])
+
+  SARParameters<-data.frame(readTemp=readTemp,cutheat=cutheat,systemID=systemID)
+
+  return(list(LnLxTnTx=LnLxTnTx_List,
+              RejectionCriteria=RejectionCriteria,
+              SARParameters=SARParameters))
+
+
+}
diff --git a/R/CW2pHMi.R b/R/CW2pHMi.R
new file mode 100644
index 0000000..08d7242
--- /dev/null
+++ b/R/CW2pHMi.R
@@ -0,0 +1,320 @@
+#' Transform a CW-OSL curve into a pHM-OSL curve via interpolation under
+#' hyperbolic modulation conditions
+#'
+#' This function transforms a conventionally measured continuous-wave (CW)
+#' OSL-curve to a pseudo hyperbolic modulated (pHM) curve under hyperbolic
+#' modulation conditions using the interpolation procedure described by Bos &
+#' Wallinga (2012).
+#'
+#' The complete procedure of the transformation is described in Bos & Wallinga
+#' (2012). The input \code{data.frame} consists of two columns: time (t) and
+#' count values (CW(t))\cr\cr
+#'
+#' \bold{Internal transformation steps}\cr\cr (1) log(CW-OSL) values\cr\cr (2)
+#' Calculate t' which is the transformed time:\cr \deqn{t' =
+#' t-(1/\delta)*log(1+\delta*t)} (3) Interpolate CW(t'), i.e. use the
+#' log(CW(t)) to obtain the count values for the transformed time (t'). Values
+#' beyond \code{min(t)} and \code{max(t)} produce \code{NA} values.\cr\cr (4)
+#' Select all values for t' < \code{min(t)}, i.e. values beyond the time
+#' resolution of t. Select the first two values of the transformed data set
+#' which contain no \code{NA} values and use these values for a linear fit
+#' using \code{\link{lm}}.\cr\cr (5) Extrapolate values for t' < \code{min(t)}
+#' based on the previously obtained fit parameters.\cr\cr (6) Transform values
+#' using\cr \deqn{pHM(t) = (\delta*t/(1+\delta*t))*c*CW(t')} \deqn{c =
+#' (1+\delta*P)/\delta*P} \deqn{P = length(stimulation~period)} (7) Combine all
+#' values and truncate all values for t' > \code{max(t)} \cr\cr \emph{The
+#' number of values for t' < \code{min(t)} depends on the stimulation rate
+#' parameter \code{delta}. To avoid the production of too many artificial data
+#' at the raising tail of the determined pHM curve, it is recommended to use
+#' the automatic estimation routine for \code{delta}, i.e. provide no value for
+#' \code{delta}.}
+#'
+#' @param values \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (\bold{required}):
+#' \code{\linkS4class{RLum.Data.Curve}} or \code{\link{data.frame}} with
+#' measured curve data of type stimulation time (t) (\code{values[,1]}) and
+#' measured counts (cts) (\code{values[,2]}).
+#' @param delta \code{\link{vector}} (optional): stimulation rate parameter, if
+#' no value is given, the optimal value is estimated automatically (see
+#' details). Smaller values of delta produce more points in the rising tail of
+#' the curve.
+#' @return The function returns the same data type as the input data type with
+#' the transformed curve values. \item{list(list("RLum.Data.Curve"))}{package
+#' \code{\linkS4class{RLum} object} with two additional info elements:
+#' \tabular{rl}{ $CW2pHMi.x.t \tab: transformed time values \cr $CW2pHMi.method
+#' \tab: used method for the production of the new data points }}
+#' \item{list(list("data.frame"))}{with four columns: \tabular{rl}{ $x \tab:
+#' time\cr $y.t \tab: transformed count values\cr $x.t \tab: transformed time
+#' values \cr $method \tab: used method for the production of the new data
+#' points }}
+#' @note According to Bos & Wallinga (2012), the number of extrapolated points
+#' should be limited to avoid artificial intensity data. If \code{delta} is
+#' provided manually and more than two points are extrapolated, a warning
+#' message is returned. \cr\cr The function \code{\link{approx}} may produce
+#' some \code{Inf} and \code{NaN} data. The function tries to manually
+#' interpolate these values by calculating the \code{mean} using the adjacent
+#' channels. If two invalid values are succeeding, the values are removed and
+#' no further interpolation is attempted.\cr In every case a warning message is
+#' shown.
+#' @section Function version: 0.2.2
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France) \cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+#' Delft University of Technology, The Netherlands\cr
+#' @seealso \code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pPMi}},
+#' \code{\link{fit_LMCurve}}, \code{\link{lm}},
+#' \code{\linkS4class{RLum.Data.Curve}}
+#' @references Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
+#' signal components. Radiation Measurements, 47, 752-758.\cr
+#'
+#' \bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+#' Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+#' 26, 701-709.
+#'
+#' Bulur, E., 2000. A simple transformation for converting CW-OSL curves to
+#' LM-OSL curves. Radiation Measurements, 32, 141-145.
+#' @keywords manip
+#' @examples
+#'
+#'
+#' ##(1) - simple transformation
+#'
+#' ##load CW-OSL curve data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' ##transform values
+#' values.transformed<-CW2pHMi(ExampleData.CW_OSL_Curve)
+#'
+#' ##plot
+#' plot(values.transformed$x, values.transformed$y.t, log = "x")
+#'
+#' ##(2) - load CW-OSL curve from BIN-file and plot transformed values
+#'
+#' ##load BINfile
+#' #BINfileData<-readBIN2R("[path to BIN-file]")
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##grep first CW-OSL curve from ALQ 1
+#' curve.ID<-CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"LTYPE"]=="OSL" &
+#'                                     CWOSL.SAR.Data@@METADATA[,"POSITION"]==1
+#'                                   ,"ID"]
+#'
+#' curve.HIGH<-CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"ID"]==curve.ID[1]
+#'                                     ,"HIGH"]
+#'
+#' curve.NPOINTS<-CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"ID"]==curve.ID[1]
+#'                                        ,"NPOINTS"]
+#'
+#' ##combine curve to data set
+#'
+#' curve<-data.frame(x = seq(curve.HIGH/curve.NPOINTS,curve.HIGH,
+#'                           by = curve.HIGH/curve.NPOINTS),
+#'                   y=unlist(CWOSL.SAR.Data@@DATA[curve.ID[1]]))
+#'
+#'
+#' ##transform values
+#'
+#' curve.transformed <- CW2pHMi(curve)
+#'
+#' ##plot curve
+#' plot(curve.transformed$x, curve.transformed$y.t, log = "x")
+#'
+#'
+#' ##(3) - produce Fig. 4 from Bos & Wallinga (2012)
+#'
+#' ##load data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#' values <- CW_Curve.BosWallinga2012
+#'
+#' ##open plot area
+#' plot(NA, NA,
+#'      xlim=c(0.001,10),
+#'      ylim=c(0,8000),
+#'      ylab="pseudo OSL (cts/0.01 s)",
+#'      xlab="t [s]",
+#'      log="x",
+#'      main="Fig. 4 - Bos & Wallinga (2012)")
+#'
+#' values.t<-CW2pLMi(values, P=1/20)
+#' lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P=1/20)[,2],
+#'       col="red" ,lwd=1.3)
+#' text(0.03,4500,"LM", col="red" ,cex=.8)
+#'
+#' values.t<-CW2pHMi(values, delta=40)
+#' lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta=40)[,2],
+#'       col="black", lwd=1.3)
+#' text(0.005,3000,"HM", cex=.8)
+#'
+#' values.t<-CW2pPMi(values, P=1/10)
+#' lines(values[1:length(values.t[,1]),1],CW2pPMi(values, P=1/10)[,2],
+#'       col="blue", lwd=1.3)
+#' text(0.5,6500,"PM", col="blue" ,cex=.8)
+#'
+#'
+#' @export
+CW2pHMi<- function(
+  values,
+  delta
+){
+  
+  
+  ##(1) data.frame or RLum.Data.Curve object?
+  if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
+    
+    stop("[CW2pHMi()] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
+    
+  }
+  
+  ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves
+  if(is(values, "RLum.Data.Curve") == TRUE){
+    
+    if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
+      
+      stop(paste("[CW2pHMi()] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
+                 sep=""))
+      
+    }else{
+      
+      temp.values <- as(values, "data.frame")
+      
+    }
+    
+  }else{
+    
+    temp.values <- values
+    
+  }
+  
+  
+  # (1) Transform values ------------------------------------------------------
+  
+  ##log transformation of the CW-OSL count values
+  CW_OSL.log<-log(temp.values[,2])
+  
+  ##time transformation t >> t'
+  t<-temp.values[,1]
+  
+  ##set delta
+  ##if no values for delta is set selected a delta value for a maximum of
+  ##two extrapolation points
+  if(missing(delta)==TRUE){
+    
+    i<-10
+    delta<-i
+    t.transformed<-t-(1/delta)*log(1+delta*t)
+    
+    while(length(t.transformed[t.transformed<min(t)])>2){
+      
+      delta<-i
+      t.transformed<-t-(1/delta)*log(1+delta*t)
+      i<-i+10
+      
+    }
+  }else{
+    
+    t.transformed<-t-(1/delta)*log(1+delta*t)
+    
+  }
+  
+  # (2) Interpolation ---------------------------------------------------------
+  
+  ##interpolate values, values beyond the range return NA values
+  CW_OSL.interpolated <- approx(t,CW_OSL.log, xout=t.transformed, rule=1)
+  
+  
+  ##combine t.transformed and CW_OSL.interpolated in a data.frame
+  temp <- data.frame(x=t.transformed, y=unlist(CW_OSL.interpolated$y))
+  
+  ##Problem: I some cases the interpolation algorithm is not working properely
+  ##and Inf or NaN values are returned
+  
+  ##fetch row number of the invalid values
+  invalid_values.id <- c(which(is.infinite(temp[,2]) | is.nan(temp[,2])))
+  
+  if(length(invalid_values.id) > 0){
+    
+    warning(paste(length(invalid_values.id)," values have been found and replaced the mean of the nearest values." ))
+    
+  }
+  
+  ##interpolate between the lower and the upper value
+  invalid_values.interpolated<-sapply(1:length(invalid_values.id),
+                                      function(x) {
+                                        
+                                        mean(c(temp[invalid_values.id[x]-1,2],
+                                               temp[invalid_values.id[x]+1,2]))
+                                        
+                                      }
+  )
+  
+  ##replace invalid values in data.frame with newly interpolated values
+  if(length(invalid_values.id)>0){
+    temp[invalid_values.id,2]<-invalid_values.interpolated
+  }
+  
+  # (3) Extrapolate first values of the curve ---------------------------------
+  
+  ##(a) - find index of first rows which contain NA values (needed for extrapolation)
+  temp.sel.id<-min(which(is.na(temp[,2])==FALSE))
+  
+  ##(b) - fit linear function
+  fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2]))
+  
+  ##select values to extrapolate and predict (extrapolate) values based on the fitted function
+  x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1])
+  y.i<-predict(fit.lm,x.i)
+  
+  ##replace NA values by extrapolated values
+  temp[1:length(y.i),2]<-y.i
+  
+  ##set method values
+  temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i))))
+  
+  ##print a warning message for more than two extrapolation points
+  if(length(y.i)>2){warning("t' is beyond the time resolution and more than two data points have been extrapolated!")}
+  
+  # (4) Convert, transform and combine values ---------------------------------
+  
+  ##unlog CW-OSL count values, i.e. log(CW) >> CW
+  CW_OSL<-exp(temp$y)
+  
+  ##set values for c and P
+  
+  ##P is the stimulation period
+  P<-max(temp.values[,1])
+  
+  ##c is a dimensionless constant
+  c<-(1+(delta*P))/(delta*P)
+  
+  ##transform CW-OSL values to pLM-OSL values
+  pHM<-((delta*t)/(1+(delta*t)))*c*CW_OSL
+  
+  ##combine all values and exclude NA values
+  temp.values <- data.frame(x=t,y.t=pHM,x.t=t.transformed,method=temp.method)
+  temp.values <- na.exclude(temp.values)
+  
+  # (5) Return values ---------------------------------------------------------
+  
+  ##returns the same data type as the input
+  if(is(values, "data.frame") == TRUE){
+    
+    values <- temp.values
+    return(values)
+    
+  }else{
+    
+    
+    ##add old info elements to new info elements
+    temp.info <- c(values at info,
+                   CW2pHMi.x.t = list(temp.values$x.t),
+                   CW2pHMi.method = list(temp.values$method))
+    
+    newRLumDataCurves.CW2pHMi <- set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = values at recordType,
+      data = as.matrix(temp.values[,1:2]),
+      info = temp.info)
+    return(newRLumDataCurves.CW2pHMi)
+    
+  }
+  
+}
diff --git a/R/CW2pLM.R b/R/CW2pLM.R
new file mode 100644
index 0000000..117af49
--- /dev/null
+++ b/R/CW2pLM.R
@@ -0,0 +1,135 @@
+#' Transform a CW-OSL curve into a pLM-OSL curve
+#'
+#' Transforms a conventionally measured continuous-wave (CW) curve into a
+#' pseudo linearly modulated (pLM) curve using the equations given in Bulur
+#' (2000).
+#'
+#' According to Bulur (2000) the curve data are transformed by introducing two
+#' new parameters P (stimulation period) and u (transformed time):
+#' \deqn{P=2*max(t)} \deqn{u=\sqrt{(2*t*P)}} The new count values are then
+#' calculated by \deqn{ctsNEW = cts(u/P)} and the returned \code{data.frame} is
+#' produced by: \code{data.frame(u,ctsNEW)}
+#'
+#' @param values \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (\bold{required}): \code{RLum.Data.Curve} data
+#' object. Alternatively, a \code{data.frame} of the measured curve data of
+#' type stimulation time (t) (\code{values[,1]}) and measured counts (cts)
+#' (\code{values[,2]}) can be provided.
+#' @return The function returns the same data type as the input data type with
+#' the transformed curve values.
+#'
+#' \item{list(list("data.frame"))}{generic R data structure}
+#' \item{list(list("RLum.Data.Curve"))}{package \code{\linkS4class{RLum}
+#' object}}
+#' @note The transformation is recommended for curves recorded with a channel
+#' resolution of at least 0.05 s/channel.
+#' @section Function version: 0.4.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#' @seealso \code{\link{CW2pHMi}}, \code{\link{CW2pLMi}},
+#' \code{\link{CW2pPMi}}, \code{\link{fit_LMCurve}}, \code{\link{lm}},
+#' \code{\linkS4class{RLum.Data.Curve}}
+#'
+#' The output of the function can be further used for LM-OSL fitting:
+#' \code{\link{CW2pLMi}}, \code{\link{CW2pHMi}}, \code{\link{CW2pPMi}},
+#' \code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\link{plot_RLum}}
+#' @references Bulur, E., 2000. A simple transformation for converting CW-OSL
+#' curves to LM-OSL curves. Radiation Measurements, 32, 141-145.
+#'
+#' \bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+#' Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+#' 26, 701-709.
+#' @keywords manip
+#' @examples
+#'
+#'
+#' ##read curve from CWOSL.SAR.Data transform curve and plot values
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##read id for the 1st OSL curve
+#' id.OSL <- CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"LTYPE"] == "OSL","ID"]
+#'
+#' ##produce x and y (time and count data for the data set)
+#' x<-seq(CWOSL.SAR.Data@@METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data@@METADATA[id.OSL[1],"NPOINTS"],
+#'        CWOSL.SAR.Data@@METADATA[id.OSL[1],"HIGH"],
+#'        by = CWOSL.SAR.Data@@METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data@@METADATA[id.OSL[1],"NPOINTS"])
+#' y <- unlist(CWOSL.SAR.Data@@DATA[id.OSL[1]])
+#' values <- data.frame(x,y)
+#'
+#' ##transform values
+#' values.transformed <- CW2pLM(values)
+#'
+#' ##plot
+#' plot(values.transformed)
+#'
+#'
+#' @export
+CW2pLM <- function(
+  values
+){
+
+  # Integrity Checks --------------------------------------------------------
+
+  ##(1) data.frame or RLum.Data.Curve object?
+  if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
+
+    stop("[CW2pLM] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
+
+  }
+
+  ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves
+  if(is(values, "RLum.Data.Curve") == TRUE){
+
+    if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
+
+      stop(paste("[CW2pLM] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
+                 sep=""))
+
+    }else{
+
+      temp.values <- as(values, "data.frame")
+
+    }
+
+  }else{
+
+    temp.values <- values
+
+
+  }
+
+
+  # Calculation -------------------------------------------------------------
+
+
+  ##curve transformation
+  P<-2*max(temp.values[,1])
+  u<-((2*temp.values[,1]*P)^0.5)
+
+  ##cw >> plm conversion, according Bulur, 2000
+  temp.values[,2]<-temp.values[,2]*(u/P)
+  temp.values<-data.frame(u,temp.values[,2])
+
+
+  # Return values -----------------------------------------------------------
+
+  ##returns the same data type as the input
+
+  if(is(values, "data.frame") == TRUE){
+
+    values <- temp.values
+    return(values)
+
+  }else{
+
+    newRLumDataCurves.CW2pLM <- set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = values at recordType,
+                                                    data = as.matrix(temp.values),
+                                                    info = values at info)
+    return(newRLumDataCurves.CW2pLM)
+
+  }
+
+}
diff --git a/R/CW2pLMi.R b/R/CW2pLMi.R
new file mode 100644
index 0000000..c3489eb
--- /dev/null
+++ b/R/CW2pLMi.R
@@ -0,0 +1,263 @@
+#' Transform a CW-OSL curve into a pLM-OSL curve via interpolation under linear
+#' modulation conditions
+#'
+#' Transforms a conventionally measured continuous-wave (CW) OSL-curve into a
+#' pseudo linearly modulated (pLM) curve under linear modulation conditions
+#' using the interpolation procedure described by Bos & Wallinga (2012).
+#'
+#' The complete procedure of the transformation is given in Bos & Wallinga
+#' (2012). The input \code{data.frame} consists of two columns: time (t) and
+#' count values (CW(t))\cr\cr
+#'
+#' \bold{Nomenclature}\cr\cr P = stimulation time (s)\cr 1/P = stimulation rate
+#' (1/s)\cr\cr
+#'
+#' \bold{Internal transformation steps}\cr\cr (1) log(CW-OSL) values\cr (2)
+#' Calculate t' which is the transformed time: \deqn{t' = 1/2*1/P*t^2}
+#'
+#' (3) Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values
+#' for the transformed time (t'). Values beyond \code{min(t)} and \code{max(t)}
+#' produce \code{NA} values.\cr\cr (4) Select all values for t' <
+#' \code{min(t)}, i.e. values beyond the time resolution of t. Select the first
+#' two values of the transformed data set which contain no \code{NA} values and
+#' use these values for a linear fit using \code{\link{lm}}.\cr\cr (5)
+#' Extrapolate values for t' < \code{min(t)} based on the previously obtained
+#' fit parameters.\cr\cr (6) Transform values using \deqn{pLM(t) = t/P*CW(t')}
+#' (7) Combine values and truncate all values for t' > \code{max(t)}\cr\cr
+#' \emph{The number of values for t' < \code{min(t)} depends on the stimulation
+#' period (P) and therefore on the stimulation rate 1/P. To avoid the
+#' production of too many artificial data at the raising tail of the determined
+#' pLM curves it is recommended to use the automatic estimation routine for
+#' \code{P}, i.e. provide no own value for \code{P}.}
+#'
+#' @param values \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (\bold{required}):
+#' \code{\linkS4class{RLum.Data.Curve}} or \code{data.frame} with measured
+#' curve data of type stimulation time (t) (\code{values[,1]}) and measured
+#' counts (cts) (\code{values[,2]})
+#' @param P \code{\link{vector}} (optional): stimulation time in seconds. If no
+#' value is given the optimal value is estimated automatically (see details).
+#' Greater values of P produce more points in the rising tail of the curve.
+#' @return The function returns the same data type as the input data type with
+#' the transformed curve values. \item{list(list("RLum.Data.Curve"))}{package
+#' \code{\linkS4class{RLum}} object with two additional info elements:}
+#' \tabular{rl}{ $CW2pLMi.x.t \tab: transformed time values \cr $CW2pLMi.method
+#' \tab: used method for the production of the new data points}
+#' @note According to Bos & Wallinga (2012) the number of extrapolated points
+#' should be limited to avoid artificial intensity data. If \code{P} is
+#' provided manually and more than two points are extrapolated, a warning
+#' message is returned.
+#' @section Function version: 0.3.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux
+#' Montaigne\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+#' Delft University of Technology, The Netherlands\cr
+#' @seealso \code{\link{CW2pLM}}, \code{\link{CW2pHMi}}, \code{\link{CW2pPMi}},
+#' \code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}}
+#' @references Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
+#' signal components. Radiation Measurements, 47, 752-758.\cr
+#'
+#' \bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+#' Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+#' 26, 701-709.
+#'
+#' Bulur, E., 2000. A simple transformation for converting CW-OSL curves to
+#' LM-OSL curves. Radiation Measurements, 32, 141-145.
+#' @keywords manip
+#' @examples
+#'
+#'
+#' ##(1)
+#' ##load CW-OSL curve data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' ##transform values
+#' values.transformed <- CW2pLMi(ExampleData.CW_OSL_Curve)
+#'
+#' ##plot
+#' plot(values.transformed$x, values.transformed$y.t, log = "x")
+#'
+#' ##(2) - produce Fig. 4 from Bos & Wallinga (2012)
+#' ##load data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#' values <- CW_Curve.BosWallinga2012
+#'
+#' ##open plot area
+#' plot(NA, NA,
+#'      xlim = c(0.001,10),
+#'      ylim = c(0,8000),
+#'      ylab = "pseudo OSL (cts/0.01 s)",
+#'      xlab = "t [s]",
+#'      log = "x",
+#'      main = "Fig. 4 - Bos & Wallinga (2012)")
+#'
+#'
+#' values.t <- CW2pLMi(values, P = 1/20)
+#' lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2],
+#'       col = "red", lwd = 1.3)
+#' text(0.03,4500,"LM", col = "red", cex = .8)
+#'
+#' values.t <- CW2pHMi(values, delta = 40)
+#' lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta = 40)[,2],
+#'       col = "black", lwd = 1.3)
+#' text(0.005,3000,"HM", cex =.8)
+#'
+#' values.t <- CW2pPMi(values, P = 1/10)
+#' lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2],
+#'       col = "blue", lwd = 1.3)
+#' text(0.5,6500,"PM", col = "blue", cex = .8)
+#'
+#'
+#' @export
+CW2pLMi<- function(
+  values,
+  P
+){
+  
+  # (0) Integrity checks -------------------------------------------------------
+  
+  ##(1) data.frame or RLum.Data.Curve object?
+  if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
+    
+    stop("[CW2pLMi()] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
+    
+  }
+  
+  ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves
+  if(is(values, "RLum.Data.Curve") == TRUE){
+    
+    if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
+      
+      stop(paste("[CW2pLMi()] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
+                 sep=""))
+      
+    }else{
+      
+      temp.values <- as(values, "data.frame")
+      
+    }
+    
+  }else{
+    
+    temp.values <- values
+    
+  }
+  
+  
+  # (1) Transform values ------------------------------------------------------------------------
+  
+  
+  ##(a) log transformation of the CW-OSL count values
+  CW_OSL.log<-log(temp.values[,2])
+  
+  ##(b) time transformation t >> t'
+  t<-temp.values[,1]
+  
+  ##set P
+  ##if no values for P is set selected a P value for a maximum of
+  ##two extrapolation points
+  if(missing(P)==TRUE){
+    
+    i<-10
+    P<-1/i
+    t.transformed<-0.5*1/P*t^2
+    
+    while(length(t.transformed[t.transformed<min(t)])>2){
+      
+      P<-1/i
+      t.transformed<-0.5*1/P*t^2
+      i<-i+10
+      
+    }#end::while
+  }else{
+    
+    if(P==0){stop("[CW2pLMi] Error: P has to be > 0!")}
+    t.transformed<-0.5*1/P*t^2
+    
+  }
+  #endif
+  
+  # (2) Interpolation ---------------------------------------------------------------------------
+  
+  ##interpolate values, values beyond the range return NA values
+  CW_OSL.interpolated<-approx(t,CW_OSL.log, xout=t.transformed, rule=1 )
+  
+  ##combine t.transformed and CW_OSL.interpolated in a data.frame
+  temp<-data.frame(x=t.transformed, y=unlist(CW_OSL.interpolated$y))
+  
+  ##Problem: I rare cases the interpolation is not working properely and Inf or NaN values are returned
+  
+  ##Fetch row number of the invalid values
+  invalid_values.id<-c(which(is.infinite(temp[,2]) | is.nan(temp[,2])))
+  
+  ##interpolate between the lower and the upper value
+  invalid_values.interpolated<-sapply(1:length(invalid_values.id),
+                                      function(x) {
+                                        mean(c(temp[invalid_values.id[x]-1,2],temp[invalid_values.id[x]+1,2]))
+                                      }
+  )
+  
+  ##replace invalid values in data.frame with newly interpolated values
+  if(length(invalid_values.id)>0){
+    temp[invalid_values.id,2]<-invalid_values.interpolated
+  }
+  
+  # (3) Extrapolate first values of the curve ---------------------------------------------------
+  
+  
+  ##(a) - find index of first rows which contain NA values (needed for extrapolation)
+  temp.sel.id<-min(which(is.na(temp[,2])==FALSE))
+  
+  ##(b) - fit linear function
+  fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2]))
+  
+  ##select values to extrapolate and predict (extrapolate) values based on the fitted function
+  x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1])
+  y.i<-predict(fit.lm,x.i)
+  
+  ##replace NA values by extrapolated values
+  temp[1:length(y.i),2]<-y.i
+  
+  ##set method values
+  temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i))))
+  
+  ##print a warning message for more than two extrapolation points
+  if(length(y.i)>2){warning("t' is beyond the time resolution and more than two data points have been extrapolated!")}
+  
+  # (4) Convert, transform and combine values ---------------------------------------------------
+  
+  ##unlog CW-OSL count values, i.e. log(CW) >> CW
+  CW_OSL<-exp(temp$y)
+  
+  ##transform CW-OSL values to pLM-OSL values
+  pLM<-1/P*t*CW_OSL
+  
+  ##combine all values and exclude NA values
+  temp.values <- data.frame(x=t,y.t=pLM,x.t=t.transformed, method=temp.method)
+  temp.values <- na.exclude(temp.values)
+  
+  # (5) Return values ---------------------------------------------------------------------------
+  
+  ##returns the same data type as the input
+  if(is(values, "data.frame") == TRUE){
+    
+    values <- temp.values
+    return(values)
+    
+  }else{
+    
+    
+    ##add old info elements to new info elements
+    temp.info <- c(values at info,
+                   CW2pLMi.x.t = list(temp.values$x.t),
+                   CW2pLMi.method = list(temp.values$method))
+    
+    newRLumDataCurves.CW2pLMi <- set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = values at recordType,
+      data = as.matrix(temp.values[,1:2]),
+      info = temp.info)
+    return(newRLumDataCurves.CW2pLMi)
+    
+  }
+  
+}
diff --git a/R/CW2pPMi.R b/R/CW2pPMi.R
new file mode 100644
index 0000000..244b854
--- /dev/null
+++ b/R/CW2pPMi.R
@@ -0,0 +1,253 @@
+#' Transform a CW-OSL curve into a pPM-OSL curve via interpolation under
+#' parabolic modulation conditions
+#'
+#' Transforms a conventionally measured continuous-wave (CW) OSL-curve into a
+#' pseudo parabolic modulated (pPM) curve under parabolic modulation conditions
+#' using the interpolation procedure described by Bos & Wallinga (2012).
+#'
+#' The complete procedure of the transformation is given in Bos & Wallinga
+#' (2012). The input \code{data.frame} consists of two columns: time (t) and
+#' count values (CW(t))\cr\cr
+#'
+#' \bold{Nomenclature}\cr\cr P = stimulation time (s)\cr 1/P = stimulation rate
+#' (1/s)\cr\cr
+#'
+#' \bold{Internal transformation steps}\cr\cr (1) log(CW-OSL) values\cr\cr (2)
+#' Calculate t' which is the transformed time: \deqn{t' = (1/3)*(1/P^2)t^3} (3)
+#' Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values for
+#' the transformed time (t'). Values beyond \code{min(t)} and \code{max(t)}
+#' produce \code{NA} values.\cr\cr (4) Select all values for t' <
+#' \code{min(t)}, i.e. values beyond the time resolution of t. Select the first
+#' two values of the transformed data set which contain no \code{NA} values and
+#' use these values for a linear fit using \code{\link{lm}}.\cr\cr (5)
+#' Extrapolate values for t' < \code{min(t)} based on the previously obtained
+#' fit parameters. The extrapolation is limited to two values. Other values at
+#' the beginning of the transformed curve are set to 0.\cr\cr (6) Transform
+#' values using \deqn{pLM(t) = t^2/P^2*CW(t')} (7) Combine all values and
+#' truncate all values for t' > \code{max(t)}\cr\cr
+#'
+#' \emph{The number of values for t' < \code{min(t)} depends on the stimulation
+#' period \code{P}. To avoid the production of too many artificial data at the
+#' raising tail of the determined pPM curve, it is recommended to use the
+#' automatic estimation routine for \code{P}, i.e. provide no value for
+#' \code{P}.}
+#'
+#' @param values \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (\bold{required}):
+#' \code{\linkS4class{RLum.Data.Curve}} or \code{data.frame} with measured
+#' curve data of type stimulation time (t) (\code{values[,1]}) and measured
+#' counts (cts) (\code{values[,2]})
+#' @param P \code{\link{vector}} (optional): stimulation period in seconds. If
+#' no value is given, the optimal value is estimated automatically (see
+#' details). Greater values of P produce more points in the rising tail of the
+#' curve.
+#' @return The function returns the same data type as the input data type with
+#' the transformed curve values. \item{list(list("RLum.Data.Curve"))}{package
+#' \code{\linkS4class{RLum} object} with two additional info elements:
+#' \tabular{rl}{ $CW2pPMi.x.t \tab: transformed time values \cr $CW2pPMi.method
+#' \tab: used method for the production of the new data points }}
+#'
+#' \item{list(list("data.frame"))}{with four columns: \tabular{rl}{ $x \tab:
+#' time\cr $y.t \tab: transformed count values\cr $x.t \tab: transformed time
+#' values \cr $method \tab: used method for the production of the new data
+#' points }}
+#' @note According to Bos & Wallinga (2012), the number of extrapolated points
+#' should be limited to avoid artificial intensity data. If \code{P} is
+#' provided manually, not more than two points are extrapolated.
+#' @section Function version: 0.2.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+#' Delft University of Technology, The Netherlands\cr
+#' @seealso \code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pHMi}},
+#' \code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}}
+#' @references Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
+#' signal components. Radiation Measurements, 47, 752-758.\cr
+#'
+#' \bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+#' Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+#' 26, 701-709.
+#'
+#' Bulur, E., 2000. A simple transformation for converting CW-OSL curves to
+#' LM-OSL curves. Radiation Measurements, 32, 141-145.
+#' @keywords manip
+#' @examples
+#'
+#'
+#' ##(1)
+#' ##load CW-OSL curve data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' ##transform values
+#' values.transformed <- CW2pPMi(ExampleData.CW_OSL_Curve)
+#'
+#' ##plot
+#' plot(values.transformed$x,values.transformed$y.t, log = "x")
+#'
+#' ##(2) - produce Fig. 4 from Bos & Wallinga (2012)
+#'
+#' ##load data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#' values <- CW_Curve.BosWallinga2012
+#'
+#' ##open plot area
+#' plot(NA, NA,
+#'      xlim = c(0.001,10),
+#'      ylim = c(0,8000),
+#'      ylab = "pseudo OSL (cts/0.01 s)",
+#'      xlab = "t [s]",
+#'      log = "x",
+#'      main = "Fig. 4 - Bos & Wallinga (2012)")
+#'
+#' values.t <- CW2pLMi(values, P = 1/20)
+#' lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2],
+#'       col = "red",lwd = 1.3)
+#' text(0.03,4500,"LM", col = "red", cex = .8)
+#'
+#' values.t <- CW2pHMi(values, delta = 40)
+#' lines(values[1:length(values.t[,1]),1], CW2pHMi(values, delta = 40)[,2],
+#'       col = "black", lwd = 1.3)
+#' text(0.005,3000,"HM", cex = .8)
+#'
+#' values.t <- CW2pPMi(values, P = 1/10)
+#' lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2],
+#'       col = "blue", lwd = 1.3)
+#' text(0.5,6500,"PM", col = "blue", cex = .8)
+#'
+#'
+#' @export
+CW2pPMi<- function(
+  values,
+  P
+){
+  
+  # (0) Integrity checks ------------------------------------------------------
+  
+  ##(1) data.frame or RLum.Data.Curve object?
+  if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
+    
+    stop("[CW2pPMi()] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
+    
+  }
+  
+  ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves
+  if(is(values, "RLum.Data.Curve") == TRUE){
+    
+    if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
+      
+      stop(paste("[CW2pPMi()] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
+                 sep=""))
+      
+    }else{
+      
+      temp.values <- as(values, "data.frame")
+      
+    }
+    
+  }else{
+    
+    temp.values <- values
+    
+  }
+  
+  
+  # (3) Transform values ------------------------------------------------------
+  
+  ##log transformation of the CW-OSL count values
+  CW_OSL.log<-log(temp.values[,2])
+  
+  ##time transformation t >> t'
+  t<-temp.values[,1]
+  
+  ##set P
+  ##if no values for P is set selected a P value for a maximum of
+  ##two extrapolation points
+  if(missing(P)==TRUE){
+    
+    i<-1
+    P<-1/i
+    t.transformed<-(1/3)*(1/P^2)*t^3
+    
+    while(length(t.transformed[t.transformed<min(t)])>2){
+      
+      P<-1/i
+      t.transformed<-(1/3)*(1/P^2)*t^3
+      i<-i+1
+      
+    }
+  }else{
+    
+    t.transformed<-(1/3)*(1/P^2)*t^3
+    
+  }
+  
+  # (4) Interpolation ---------------------------------------------------------
+  
+  
+  ##interpolate values, values beyond the range return NA values
+  CW_OSL.interpolated <- approx(t, CW_OSL.log, xout=t.transformed, rule=1 )
+  
+  ##combine t.transformed and CW_OSL.interpolated in a data.frame
+  temp<-data.frame(x=t.transformed, y = unlist(CW_OSL.interpolated$y))
+  
+  
+  # (5) Extrapolate first values of the curve ---------------------------------
+  
+  ##(a) - find index of first rows which contain NA values (needed for extrapolation)
+  temp.sel.id<-min(which(is.na(temp[,2])==FALSE))
+  
+  ##(b) - fit linear function
+  fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2]))
+  
+  ##select values to extrapolate and predict (extrapolate) values based on the fitted function
+  x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1])
+  y.i<-predict(fit.lm,x.i)
+  
+  ##replace NA values by extrapolated values
+  temp[1:length(y.i),2]<-y.i
+  
+  ##set method values
+  temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i))))
+  
+  
+  ##print a warning message for more than two extrapolation points
+  if(temp.sel.id>2){warning("t' is beyond the time resolution. Only two data points have been extrapolated, the first ",temp.sel.id-3, " points have been set to 0!")}
+  
+  # (6) Convert, transform and combine values ---------------------------------
+  
+  ##unlog CW-OSL count values, i.e. log(CW) >> CW
+  CW_OSL<-exp(temp$y)
+  
+  ##transform CW-OSL values to pPM-OSL values
+  
+  pPM<-(t^2/P^2)*CW_OSL
+  
+  ##combine all values and exclude NA values
+  temp.values <- data.frame(x=t, y.t=pPM, x.t=t.transformed, method=temp.method)
+  temp.values <- na.exclude(temp.values)
+  
+  # (7) Return values ---------------------------------------------------------
+  
+  ##returns the same data type as the input
+  if(is(values, "data.frame") == TRUE){
+    
+    values <- temp.values
+    return(values)
+    
+  }else{
+    
+    
+    ##add old info elements to new info elements
+    temp.info <- c(values at info,
+                   CW2pPMi.x.t = list(temp.values$x.t),
+                   CW2pPMi.method = list(temp.values$method))
+    
+    newRLumDataCurves.CW2pPMi <- set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = values at recordType,
+      data = as.matrix(temp.values[,1:2]),
+      info = temp.info)
+    return(newRLumDataCurves.CW2pPMi)
+    
+  }
+  
+}
diff --git a/R/Luminescence-package.R b/R/Luminescence-package.R
new file mode 100644
index 0000000..2bed4ca
--- /dev/null
+++ b/R/Luminescence-package.R
@@ -0,0 +1,607 @@
+#' Comprehensive Luminescence Dating Data Analysis
+#'
+#' A collection of various R functions for the purpose of Luminescence dating
+#' data analysis. This includes, amongst others, data import, export,
+#' application of age models, curve deconvolution, sequence analysis and
+#' plotting of equivalent dose distributions.
+#'
+#' \tabular{ll}{ Package: \tab Luminescence\cr Type: \tab Package\cr Version:
+#' \tab 0.6.4 \cr Date: \tab 2016-09-09 \cr License: \tab GPL-3\cr }
+#'
+#' @name Luminescence-package
+#' @aliases Luminescence-package Luminescence
+#' @docType package
+#' @author \bold{Authors} (alphabetic order)
+#'
+#' \tabular{ll}{
+#' Christoph Burow \tab University of Cologne, Germany \cr
+#' Michael Dietze \tab GFZ Helmholtz Centre Potsdam, Germany \cr
+#' Julie Durcan \tab University of Oxford, United Kingdom \cr
+#' Manfred Fischer\tab University of Bayreuth, Germany \cr
+#' Margret C. Fuchs \tab Helmholtz-Zentrum Dresden-Rossendorf, Helmholtz-Institute Freiberg for Resource Technology,
+#' Freiberg, Germany \cr
+#' Johannes Friedrich \tab University of Bayreuth, Germany \cr
+#' Georgina King \tab University of Cologne, Germany \cr
+#' Sebastian Kreutzer \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+#' Norbert Mercier \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+#' Christoph Schmidt \tab University of Bayreuth, Germany \cr
+#' Rachel K. Smedley \tab Aberystwyth University, United Kingdom
+#
+#' }
+#'
+#' \bold{Beta-Tester}
+#'
+#' Thomas Kolb, University of Bayreuth, Germany\cr
+#'
+#' \bold{Supervisor}
+#'
+#' Markus Fuchs, Justus-Liebig-University Giessen, Germany\cr
+#'
+#' \bold{Support contact}
+#'
+#' \email{developers@@r-luminescence.de}\cr
+#'
+#' We may further encourage the usage of our support forum. For this please
+#' visit our project website (link below).
+#'
+#' \bold{Bug reporting}
+#'
+#' \email{bugtracker@@r-luminescence.de} \cr
+#'
+#' \bold{Project website}
+#'
+#' \url{http://www.r-luminescence.de}\cr
+#'
+#' \bold{Project source code repository}\cr
+#' \url{https://github.com/R-Lum/Luminescence}\cr
+#'
+#' \bold{Related package projects}\cr
+#' \url{https://cran.r-project.org/package=RLumShiny}\cr
+#' \url{http://shiny.r-luminescence.de}\cr
+#' \url{https://cran.r-project.org/package=RLumModel}\cr
+#' \url{http://model.r-luminescence.de}\cr
+#'
+#' \bold{Package maintainer}
+#'
+#' Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne, Pessac,
+#' France, \cr \email{sebastian.kreutzer@@u-bordeaux-montaigne.fr}
+#'
+#' \bold{Acknowledgement}
+#'
+#' Cooperation and personal exchange between the developers is gratefully
+#' funded by the DFG (SCHM 3051/3-1) in the framework of the program
+#' "Scientific Networks". Project title: "Lum.Network: Ein
+#' Wissenschaftsnetzwerk zur Analyse von Lumineszenzdaten mit R" (2014-2017)
+#'
+#' @references Dietze, M., Kreutzer, S., Fuchs, M.C., Burow, C., Fischer, M.,
+#' Schmidt, C., 2013. A practical guide to the R package Luminescence.
+#' Ancient TL, 31, 11-18.
+#'
+#' Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2016. The abanico plot:
+#' visualising chronometric data with individual standard errors. Quaternary Geochronology 31, 1-7.
+#' http://dx.doi.org/10.1016/j.quageo.2015.09.003
+#'
+#' Fuchs, M.C., Kreutzer, S., Burow, C., Dietze, M., Fischer, M., Schmidt, C.,
+#' Fuchs, M., 2015. Data processing in luminescence dating analysis: An
+#' exemplary workflow using the R package 'Luminescence'. Quaternary
+#' International, 362,8-13. http://dx.doi.org/10.1016/j.quaint.2014.06.034
+#'
+#' Kreutzer, S., Schmidt, C., Fuchs, M.C., Dietze, M., Fischer, M., Fuchs, M.,
+#' 2012. Introducing an R package for luminescence dating analysis. Ancient TL,
+#' 30, 1-8.
+#'
+#' Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model.
+#' Ancient TL 33, 16-21.
+#'
+#' @keywords package
+#' @import utils methods data.table bbmle
+#' @importFrom raster nlayers raster contour plotRGB brick
+#' @importFrom graphics plot.default frame abline mtext text lines par layout lines arrows axTicks axis barplot box boxplot contour curve grconvertX grconvertY hist legend persp points polygon rug segments title grid
+#' @importFrom grDevices adjustcolor axisTicks colorRampPalette gray.colors rgb topo.colors dev.off
+#' @importFrom stats approx as.formula complete.cases density dnorm glm lm median na.exclude na.omit nls nls.control pchisq pnorm quantile rnorm runif sd smooth smooth.spline spline t.test uniroot var weighted.mean setNames
+#' @importFrom parallel parLapply makeCluster stopCluster
+#' @importFrom Rcpp evalCpp
+#' @useDynLib Luminescence
+NULL
+
+
+#' Base data set for cosmic dose rate calculation
+#'
+#' Collection of data from various sources needed for cosmic dose rate
+#' calculation
+#'
+#'
+#' @format
+#'
+#' \tabular{ll}{
+#'
+#' \code{values.cosmic.Softcomp}: \tab data frame containing cosmic dose rates
+#' for shallow depths (< 167 g cm^-2) obtained using the "AGE" program by
+#' Rainer Gruen (cf. Gruen 2009). These data essentially reproduce the graph
+#' shown in Fig. 1 of Prescott & Hutton (1988). \cr
+#'
+#' \code{values.factor.Altitude}: \tab data frame containing altitude factors
+#' for adjusting geomagnetic field-change factors. Values were read from Fig. 1
+#' in Prescott & Hutton (1994). \cr
+#'
+#' \code{values.par.FJH}: \tab data frame containing values for parameters F, J
+#' and H (read from Fig. 2 in Prescott & Hutton 1994) used in the expression }
+#'
+#' \deqn{Dc = D0*(F+J*exp((altitude/1000)/H))}
+#' @section Version: 0.1
+#' @references
+#' Gruen, R., 2009. The "AGE" program for the calculation of luminescence age estimates.
+#' Ancient TL, 27, pp. 45-46.
+#'
+#' Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for
+#' TL and ESR. Nuclear Tracks and Radiation Measurements, 14, pp. 223-227.
+#'
+#' Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates
+#' for luminescence and ESR dating: large depths and long-term time variations.
+#' Radiation Measurements, 23, pp. 497-500.
+#' @source The following data were carefully read from figures in mentioned
+#' sources and used for fitting procedures. The derived expressions are used in
+#' the function \code{calc_CosmicDoseRate}.
+#'
+#' \bold{values.cosmic.Softcomp}
+#'
+#' \tabular{ll}{
+#'
+#' Program: \tab "AGE"\cr Reference: \tab Gruen (2009) \cr Fit: \tab
+#' Polynomials in the form of
+#'
+#' }
+#'
+#' For depths between 40-167 g cm^-2:
+#'
+#' \deqn{y = 2*10^-6*x^2-0.0008*x+0.2535}
+#'
+#' (For depths <40 g cm^-2)
+#'
+#' \deqn{y = -6*10^-8*x^3+2*10^-5*x^2-0.0025*x+0.2969}
+#'
+#' \bold{values.factor.Altitude}
+#'
+#' \tabular{ll}{
+#'
+#' Reference: \tab Prescott & Hutton (1994) \cr Page: \tab 499 \cr Figure: \tab
+#' 1 \cr Fit: \tab 2-degree polynomial in the form of
+#'
+#' }
+#'
+#' \deqn{y = -0.026*x^2 + 0.6628*x + 1.0435}
+#'
+#' \bold{values.par.FJH}
+#'
+#' \tabular{ll}{
+#'
+#' Reference: \tab Prescott & Hutton (1994) \cr Page: \tab 500 \cr Figure: \tab
+#' 2 \cr Fits: \tab 3-degree polynomials and linear fits
+#'
+#' }
+#'
+#' F (non-linear part, \eqn{\lambda} < 36.5 deg.):
+#'
+#' \deqn{y = -7*10^-7*x^3-8*10^-5*x^2-0.0009*x+0.3988}
+#'
+#' F (linear part, \eqn{\lambda} > 36.5 deg.):
+#'
+#' \deqn{y = -0.0001*x + 0.2347}
+#'
+#' J (non-linear part, \eqn{\lambda} < 34 deg.):
+#'
+#' \deqn{y = 5*10^-6*x^3-5*10^-5*x^2+0.0026*x+0.5177}
+#'
+#' J (linear part, \eqn{\lambda} > 34 deg.):
+#'
+#' \deqn{y = 0.0005*x + 0.7388}
+#'
+#' H (non-linear part, \eqn{\lambda} < 36 deg.):
+#'
+#' \deqn{y = -3*10^-6*x^3-5*10^-5*x^2-0.0031*x+4.398}
+#'
+#' H (linear part, \eqn{\lambda} > 36 deg.):
+#'
+#' \deqn{y = 0.0002*x + 4.0914}
+#' @keywords datasets
+#' @examples
+#'
+#' ##load data
+#' data(BaseDataSet.CosmicDoseRate)
+#' @name BaseDataSet.CosmicDoseRate
+NULL
+
+
+#' Example data from a SAR OSL and SAR TL measurement for the package
+#' Luminescence
+#'
+#' Example data from a SAR OSL and TL measurement for package Luminescence
+#' directly extracted from a Risoe BIN-file and provided in an object of type
+#' \link{Risoe.BINfileData-class}
+#'
+#'
+#' @format
+#'
+#' \code{CWOSL.SAR.Data}: SAR OSL measurement data
+#'
+#' \code{TL.SAR.Data}: SAR TL measurement data
+#'
+#' Each class object contains two slots: (a) \code{METADATA} is a
+#' \link{data.frame} with all metadata stored in the BIN file of the
+#' measurements and (b) \code{DATA} contains a list of vectors of the measured
+#' data (usually count values).
+#' @section Version: 0.1
+#' @references
+#' \bold{CWOSL.SAR.Data}: unpublished data \cr
+#'
+#' \bold{TL.SAR.Data}: unpublished data
+#' @source \bold{CWOSL.SAR.Data}
+#'
+#' \tabular{ll}{
+#'
+#' Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab BT607\cr
+#' Location: \tab Saxony/Germany\cr Material: \tab Middle grain quartz measured
+#' \cr \tab on aluminum cups on a Risoe TL/OSL DA-15 reader\cr Reference: \tab
+#' unpublished }
+#'
+#' \bold{TL.SAR.Data}
+#'
+#' \tabular{ll}{
+#'
+#' Lab: \tab Luminescence Laboratory of Cologne\cr Lab-Code: \tab LP1_5\cr
+#' Location: \tab Spain\cr Material: \tab Flint \cr Setup: \tab Risoe TL/OSL
+#' DA-20 reader \cr \tab (Filter: Semrock Brightline, \cr \tab HC475/50, N2,
+#' unpolished steel discs) \cr Reference: \tab unpublished \cr Remarks: \tab
+#' dataset limited to one position\cr }
+#'
+#' @note Please note that this example data cannot be exported to a BIN-file using the function
+#' \code{writeR2BIN} as it was generated and implemented in the package long time ago. In the meantime
+#' the BIN-file format changed.
+#'
+#' @keywords datasets
+#' @examples
+#'
+#' ##show first 5 elements of the METADATA and DATA elements in the terminal
+#' data(ExampleData.BINfileData, envir = environment())
+#' CWOSL.SAR.Data@@METADATA[1:5,]
+#' CWOSL.SAR.Data@@DATA[1:5]
+#'
+#' @name ExampleData.BINfileData
+NULL
+
+
+#' Example CW-OSL curve data for the package Luminescence
+#'
+#' \code{data.frame} containing CW-OSL curve data (time, counts)
+#'
+#' @name ExampleData.CW_OSL_Curve
+#' @docType data
+#' @format Data frame with 1000 observations on the following 2 variables:
+#' \describe{ \item{list("x")}{a numeric vector, time} \item{list("y")}{a
+#' numeric vector, counts} }
+#' @references Baartman, J.E.M., Veldkamp, A., Schoorl, J.M., Wallinga, J.,
+#' Cammeraat, L.H., 2011. Unravelling Late Pleistocene and Holocene landscape
+#' dynamics: The Upper Guadalentin Basin, SE Spain. Geomorphology, 125,
+#' 172-185.
+#'
+#' Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal
+#' components. Radiation Measurements, 47, 752-758.
+#'
+#' @source \bold{ExampleData.CW_OSL_Curve}
+#'
+#' \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab
+#' BT607\cr Location: \tab Saxony/Germany\cr Material: \tab Middle grain quartz
+#' measured on aluminum cups on a Risoe TL/OSL DA-15 reader.\cr Reference: \tab
+#' unpublished data }
+#'
+#' \bold{CW_Curve.BosWallinga2012}
+#'
+#' \tabular{ll}{ Lab: \tab Netherlands Centre for Luminescence Dating (NCL)\cr
+#' Lab-Code: \tab NCL-2108077\cr Location: \tab Guadalentin Basin, Spain\cr
+#' Material: \tab Coarse grain quartz\cr Reference: \tab Bos & Wallinga (2012)
+#' and Baartman et al. (2011) }
+#'
+#' @keywords datasets
+#' @examples
+#'
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#' plot(ExampleData.CW_OSL_Curve)
+#'
+NULL
+
+
+
+
+
+#' Example data for fit_LMCurve() in the package Luminescence
+#'
+#' Lineraly modulated (LM) measurement data from a quartz sample from Norway
+#' including background measurement. Measurements carried out in the
+#' luminescence laboratory at the University of Bayreuth.
+#'
+#'
+#' @format Two objects (data.frames) with two columns (time and counts).
+#' @references
+#' Fuchs, M., Kreutzer, S., Fischer, M., Sauer, D., Soerensen, R., 2012. OSL and IRSL
+#' dating of raised beach sand deposits along the southeastern coast of Norway.
+#' Quaternary Geochronology, 10, 195-200.
+#' @source
+#' \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab
+#' BT900\cr Location: \tab Norway\cr Material: \tab Beach deposit, coarse grain
+#' quartz measured on aluminum discs on a Risoe TL/OSL DA-15 reader\cr }
+#' @examples
+#'
+#' ##show LM data
+#' data(ExampleData.FittingLM, envir = environment())
+#' plot(values.curve,log="x")
+#'
+#' @name ExampleData.FittingLM
+NULL
+
+
+#' Example Lx/Tx data from CW-OSL SAR measurement
+#'
+#' LxTx data from a SAR measurement for the package Luminescence.
+#'
+#'
+#' @format A \code{data.frame} with 4 columns (Dose, LxTx, LxTx.Error, TnTx).
+#' @references unpublished data
+#' @source
+#' \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab
+#' BT607\cr Location: \tab Ostrau (Saxony-Anhalt/Germany)\cr Material: \tab
+#' Middle grain (38-63 \eqn{\mu}m) quartz measured on a Risoe TL/OSL DA-15
+#' reader.\cr }
+#' @examples
+#'
+#' ##plot Lx/Tx data vs dose [s]
+#' data(ExampleData.LxTxData, envir = environment())
+#' plot(LxTxData$Dose,LxTxData$LxTx)
+#'
+#' @name ExampleData.LxTxData
+NULL
+
+
+#' Example Lx and Tx curve data from an artificial OSL measurement
+#'
+#' Lx and Tx data of continous wave (CW-) OSL signal curves.
+#'
+#'
+#' @format Two \code{data.frames} containing time and count values.
+#' @references unpublished data
+#' @source
+#' Arbitrary OSL measurement.
+#' @examples
+#'
+#' ##load data
+#' data(ExampleData.LxTxOSLData, envir = environment())
+#'
+#' ##plot data
+#' plot(Lx.data)
+#' plot(Tx.data)
+#'
+#' @name ExampleData.LxTxOSLData
+NULL
+
+
+#' Example data as \code{\linkS4class{RLum.Analysis}} objects
+#'
+#' Collection of different \code{\linkS4class{RLum.Analysis}} objects for
+#' protocol analysis.
+#'
+#'
+#' @format
+#'
+#' \code{IRSAR.RF.Data}: IRSAR.RF.Data on coarse grain feldspar
+#'
+#' Each object contains data needed for the given protocol analysis.
+#' @section Version: 0.1
+#' @references
+#' \bold{IRSAR.RF.Data}
+#'
+#' Kreutzer, S., Lauer, T., Meszner, S., Krbetschek, M.R., Faust, D., Fuchs,
+#' M., 2014. Chronology of the Quaternary profile Zeuchfeld in Saxony-Anhalt /
+#' Germany - a preliminary luminescence dating study. Zeitschrift fuer
+#' Geomorphologie 58, 5-26. doi: 10.1127/0372-8854/2012/S-00112
+#' @source \bold{IRSAR.RF.Data}
+#'
+#' These data were kindly provided by Tobias Lauer and Matthias Krbetschek.
+#'
+#' \tabular{ll}{
+#'
+#' Lab: \tab Luminescence Laboratory TU Bergakademie Freiberg\cr Lab-Code: \tab
+#' ZEU/SA1\cr Location: \tab Zeuchfeld (Zeuchfeld Sandur;
+#' Saxony-Anhalt/Germany)\cr Material: \tab K-feldspar (130-200 \eqn{\mu}m)\cr
+#' Reference: \tab Kreutzer et al. (2014)\cr
+#'
+#' }
+#' @keywords datasets
+#' @examples
+#'
+#' ##load data
+#' data(ExampleData.RLum.Analysis, envir = environment())
+#'
+#' ##plot data
+#' plot_RLum(IRSAR.RF.Data)
+#'
+#' @name ExampleData.RLum.Analysis
+NULL
+
+
+#' Example data as \code{\linkS4class{RLum.Data.Image}} objects
+#'
+#' Measurement of Princton Instruments camera imported with the function
+#' \code{\link{read_SPE2R}} to R to produce an
+#' \code{\linkS4class{RLum.Data.Image}} object.
+#'
+#'
+#' @format Object of class \code{\linkS4class{RLum.Data.Image}}
+#' @section Version: 0.1
+#' @source \bold{ExampleData.RLum.Data.Image}
+#'
+#' These data were kindly provided by Regina DeWitt.
+#'
+#' \tabular{ll}{
+#'
+#' Lab.: \tab Department of Physics, East-Carolina University, NC, USA\cr
+#' Lab-Code: \tab -\cr Location: \tab - \cr Material: \tab - \cr Reference:
+#' \tab - \cr
+#'
+#' }
+#'
+#' Image data is a measurement of fluorescent ceiling lights with a cooled
+#' Princeton Instruments (TM) camera fitted on Risoe DA-20 TL/OSL reader.
+#' @keywords datasets
+#' @examples
+#'
+#' ##load data
+#' data(ExampleData.RLum.Data.Image, envir = environment())
+#'
+#' ##plot data
+#' plot_RLum(ExampleData.RLum.Data.Image)
+#'
+#' @name ExampleData.RLum.Data.Image
+NULL
+
+
+#' Example data for a SAR OSL measurement and a TL spectrum using a lexsyg
+#' reader
+#'
+#' Example data from a SAR OSL measurement and a TL spectrum for package
+#' Luminescence imported from a Freiberg Instruments XSYG file using the
+#' function \code{\link{read_XSYG2R}}.
+#'
+#'
+#' @format
+#'
+#' \code{OSL.SARMeasurement}: SAR OSL measurement data
+#'
+#' The data contain two elements: (a) \code{$Sequence.Header} is a
+#' \link{data.frame} with metadata from the measurement,(b)
+#' \code{Sequence.Object} contains an \code{\linkS4class{RLum.Analysis}} object
+#' for further analysis.\cr
+#'
+#' \code{TL.Spectrum}: TL spectrum data
+#'
+#' \code{\linkS4class{RLum.Data.Spectrum}} object for further analysis. The
+#' spectrum was cleaned from cosmic-rays using the function
+#' \code{apply_CosmicRayRemoval}. Note that no quantum efficiency calibration
+#' was performed.
+#' @section Version: 0.1
+#' @seealso \code{\link{read_XSYG2R}}, \code{\linkS4class{RLum.Analysis}},\cr
+#' \code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{plot_RLum}},\cr
+#' \code{\link{plot_RLum.Analysis}}, \code{\link{plot_RLum.Data.Spectrum}}
+#' @references Unpublished data measured to serve as example data for that
+#' package. Location origin of sample BT753 is given here:
+#'
+#' Fuchs, M., Kreutzer, S., Rousseau, D.D., Antoine, P., Hatte, C., Lagroix,
+#' F., Moine, O., Gauthier, C., Svoboda, J., Lisa, L., 2013. The loess sequence
+#' of Dolni Vestonice, Czech Republic: A new OSL-based chronology of the Last
+#' Climatic Cycle. Boreas, 42, 664--677.
+#' @source \bold{OSL.SARMeasurement}
+#'
+#' \tabular{ll}{
+#'
+#' Lab: \tab Luminescence Laboratory Giessen\cr Lab-Code: \tab no code\cr
+#' Location: \tab not specified\cr Material: \tab Coarse grain quartz \cr \tab
+#' on steel cups on lexsyg research reader\cr Reference: \tab unpublished }
+#'
+#' \bold{TL.Spectrum}
+#'
+#' \tabular{ll}{
+#'
+#' Lab: \tab Luminescence Laboratory Giessen\cr Lab-Code: \tab BT753\cr
+#' Location: \tab Dolni Vestonice/Czech Republic\cr Material: \tab Fine grain
+#' polymineral \cr \tab on steel cups on lexsyg rearch reader\cr Reference:
+#' \tab Fuchs et al., 2013 \cr Spectrum: \tab Integration time 19 s, channel
+#' time 20 s\cr Heating: \tab 1 K/s, up to 500 deg. C }
+#' @keywords datasets
+#' @examples
+#'
+#' ##show data
+#' data(ExampleData.XSYG, envir = environment())
+#'
+#' ## =========================================
+#' ##(1) OSL.SARMeasurement
+#' OSL.SARMeasurement
+#'
+#' ##show $Sequence.Object
+#' OSL.SARMeasurement$Sequence.Object
+#'
+#' ##grep OSL curves and plot the first curve
+#' OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object,
+#' recordType="OSL")[[1]]
+#' plot_RLum(OSLcurve)
+#'
+#' ## =========================================
+#' ##(2) TL.Spectrum
+#' TL.Spectrum
+#'
+#' ##plot simple spectrum (2D)
+#' plot_RLum.Data.Spectrum(TL.Spectrum,
+#'                         plot.type="contour",
+#'                         xlim = c(310,750),
+#'                         ylim = c(0,300),
+#'                         bin.rows=10,
+#'                         bin.cols = 1)
+#'
+#' ##plot 3d spectrum (uncomment for usage)
+#' # plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="persp",
+#' # xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+#' # bin.cols = 1)
+#'
+#' @name ExampleData.XSYG
+NULL
+
+
+#' Example De data sets for the package Luminescence
+#'
+#' Equivalent dose (De) values measured for a fine grain quartz sample from a
+#' loess section in Rottewitz (Saxony/Germany) and for a coarse grain quartz
+#' sample from a fluvial deposit in the rock shelter of Cueva Anton
+#' (Murcia/Spain).
+#'
+#'
+#' @format A \code{\link{list}} with two elements, each containing a two column
+#' \code{\link{data.frame}}:
+#'
+#' \describe{ \code{$BT998}: De and De error values for a fine grain quartz
+#' sample from a loess section in Rottewitz.\cr\cr \code{$CA1}: Single grain De
+#' and De error values for a coarse grain quartz sample from a fluvial deposit
+#' in the rock shelter of Cueva Anton }
+#' @references \bold{BT998} \cr\cr Unpublished data \cr\cr
+#' \bold{CA1} \cr\cr
+#' Burow, C., Kehl, M., Hilgers, A., Weniger, G.-C., Angelucci, D., Villaverde,
+#' V., Zapata, J. and Zilhao, J.  (2015). Luminescence dating of fluvial
+#' deposits in the rock shelter of Cueva Anton, Spain. Geochronometria 52, 107-125.
+#'
+#' \bold{BT998} \cr
+#' \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr
+#' Lab-Code: \tab BT998\cr Location: \tab Rottewitz (Saxony/Germany)\cr
+#' Material: \tab Fine grain quartz measured on aluminum discs on a Risoe
+#' TL/OSL DA-15 reader\cr Units: \tab Values are given in seconds \cr Dose
+#' Rate: \tab Dose rate of the beta-source at measurement ca. 0.0438 Gy/s +/-
+#' 0.0019 Gy/s\cr Measurement Date: \tab 2012-01-27 }
+#' \bold{CA1} \cr
+#' \tabular{ll}{ Lab: \tab Cologne Luminescence Laboratory (CLL)\cr Lab-Code:
+#' \tab C-L2941\cr Location: \tab Cueva Anton (Murcia/Spain)\cr Material: \tab
+#' Coarse grain quartz (200-250 microns) measured on single grain discs on a
+#' Risoe TL/OSL DA-20 reader\cr Units: \tab Values are given in Gray \cr
+#' Measurement Date: \tab 2012 }
+#' @keywords datasets
+#' @examples
+#'
+#' ##(1) plot values as histogram
+#' data(ExampleData.DeValues, envir = environment())
+#' plot_Histogram(ExampleData.DeValues$BT998, xlab = "De [s]")
+#'
+#' ##(2) plot values as histogram (with second to gray conversion)
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' De.values <- Second2Gray(ExampleData.DeValues$BT998,
+#'                          dose.rate = c(0.0438, 0.0019))
+#'
+#'
+#' plot_Histogram(De.values, xlab = "De [Gy]")
+#'
+#' @name ExampleData.DeValues
+NULL
diff --git a/R/RLum-class.R b/R/RLum-class.R
new file mode 100644
index 0000000..22d9d3f
--- /dev/null
+++ b/R/RLum-class.R
@@ -0,0 +1,87 @@
+#' @include replicate_RLum.R RcppExports.R
+NULL
+
+#' Class \code{"RLum"}
+#'
+#' Abstract class for data in the package Luminescence
+#'
+#'
+#' @name RLum-class
+#'
+#' @docType class
+#'
+#' @slot originator Object of class \code{\link{character}} containing the name of the producing
+#' function for the object. Set automatically by using the function \code{\link{set_RLum}}.
+#'
+#' @slot info Object of class \code{\link{list}} for additional information on the object itself
+#'
+#' @slot .uid Object of class \code{\link{character}} for a unique object identifier. This id is
+#' usually calculated using the internal function \code{.create_UID()} if the funtion \code{\link{set_RLum}}
+#' is called.
+#'
+#' @slot .pid Object of class \code{\link{character}} for a parent id. This allows nesting RLum-objects
+#' at will. The parent id can be the uid of another object.
+#'
+#' @note \code{RLum} is a virtual class.
+#'
+#' @section Objects from the Class: A virtual Class: No objects can be created
+#' from it.
+#'
+#' @section Class version: 0.4.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Analysis}}
+#'
+#' @keywords classes
+#'
+#' @examples
+#'
+#' showClass("RLum")
+#'
+#' @export
+setClass("RLum",
+           slots = list(
+             originator = "character",
+             info = "list",
+             .uid = "character",
+             .pid = "character"
+             ),
+           contains = "VIRTUAL",
+           prototype = prototype(
+             originator = NA_character_,
+             info = list(),
+             .uid = NA_character_,
+             .pid = NA_character_
+           )
+         )
+
+
+# replication method for object class ------------------------------------------
+
+#' @describeIn RLum
+#' Replication method RLum-objects
+#'
+#' @param object an object of class \code{\linkS4class{RLum}} (\bold{required})
+#'
+#' @param times \code{\link{integer}} (optional): number for times each element is repeated
+#' element
+#'
+#' @export
+setMethod(
+  "replicate_RLum",
+  "RLum",
+  definition = function(object, times = NULL) {
+
+    ##The case this is NULL
+    if (is.null(times)) {
+      times <- 1
+    }
+
+    lapply(1:times, function(x) {
+      object
+
+    })
+
+  }
+)
diff --git a/R/RLum.Analysis-class.R b/R/RLum.Analysis-class.R
new file mode 100644
index 0000000..d61f86b
--- /dev/null
+++ b/R/RLum.Analysis-class.R
@@ -0,0 +1,681 @@
+#' @include get_RLum.R set_RLum.R length_RLum.R structure_RLum.R names_RLum.R
+NULL
+
+#' Class \code{"RLum.Analysis"}
+#'
+#' Object class to represent analysis data for protocol analysis, i.e. all curves, spectra etc.
+#' from one measurements. Objects from this class are produced, by e.g. \code{\link{read_XSYG2R}},
+#' \code{\link{read_Daybreak2R}}
+#'
+#'
+#' @name RLum.Analysis-class
+#'
+#' @docType class
+#'
+#' @slot protocol Object of class \code{\link{character}} describing the applied measurement protocol
+#'
+#' @slot records Object of class \code{\link{list}} containing objects of class \code{\linkS4class{RLum.Data}}
+#'
+#' @note The method \code{\link{structure_RLum}} is currently just avaiblable for objects
+#' containing \code{\linkS4class{RLum.Data.Curve}}.
+#'
+#' @section Objects from the Class: Objects can be created by calls of the form
+#' \code{set_RLum("RLum.Analysis", ...)}.
+#'
+#' @section Class version: 0.4.6
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{Risoe.BINfileData2RLum.Analysis}},
+#' \code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum}}
+#'
+#' @keywords classes methods
+#'
+#' @examples
+#'
+#' showClass("RLum.Analysis")
+#'
+#' ##set empty object
+#' set_RLum(class = "RLum.Analysis")
+#'
+#' ###use example data
+#' ##load data
+#' data(ExampleData.RLum.Analysis, envir = environment())
+#'
+#' ##show curves in object
+#' get_RLum(IRSAR.RF.Data)
+#'
+#' ##show only the first object, but by keeping the object
+#' get_RLum(IRSAR.RF.Data, record.id = 1, drop = FALSE)
+#'
+#' @export
+setClass("RLum.Analysis",
+         slots = list(
+           protocol = "character",
+           records = "list"
+         ),
+         contains = "RLum",
+         prototype = list (
+           protocol = NA_character_,
+           records = list()
+         )
+)
+
+
+####################################################################################################
+###as()
+####################################################################################################
+##LIST
+##COERCE RLum.Analyse >> list AND list >> RLum.Analysis
+#' as() - RLum-object coercion
+#'
+#' for \code{[RLum.Analysis]}
+#'
+#' \bold{[RLum.Analysis]}\cr
+#'
+#' \tabular{ll}{
+#'  \bold{from} \tab \bold{to}\cr
+#'   \code{list} \tab \code{list}\cr
+#' }
+#'
+#' Given that the \code{\link{list}} consits of \code{\linkS4class{RLum.Analysis}} objects.
+#'
+#' @name as
+#'
+#'
+setAs("list", "RLum.Analysis",
+      function(from,to){
+
+        new(to,
+            protocol = NA_character_,
+            records = from)
+      })
+
+setAs("RLum.Analysis", "list",
+      function(from){
+
+        lapply(1:length(from at records), function(x){
+          from at records[[x]]
+
+        })
+
+      })
+
+
+####################################################################################################
+###show()
+####################################################################################################
+#' @describeIn RLum.Analysis
+#' Show structure of \code{RLum.Analysis} object
+#' @export
+setMethod("show",
+          signature(object = "RLum.Analysis"),
+          function(object){
+
+            ##print
+            cat("\n [RLum.Analysis]")
+
+            ##show slot originator, for compatibily reasons with old example data, here
+            ##a check
+            if(.hasSlot(object, "originator")){cat("\n\t originator:", paste0(object at originator,"()"))}
+
+            cat("\n\t protocol:", object at protocol)
+            cat("\n\t additional info elements: ", if(.hasSlot(object, "info")){length(object at info)}else{0})
+            cat("\n\t number of records:", length(object at records))
+
+            #skip this part if nothing is included in the object
+            if(length(object at records) > 0){
+
+              ##get object class types
+              temp <- sapply(1:length(object at records), function(x){
+
+                is(object at records[[x]])[1]
+
+              })
+
+              ##print object class types
+              sapply(1:length(table(temp)), function(x){
+
+                ##show RLum class type
+                cat("\n\t .. :",names(table(temp)[x]),":",table(temp)[x])
+
+
+                ##show structure
+                ##set width option ... just an implementation for the tutorial output
+                ifelse(getOption("width")<=50, temp.width <- 4, temp.width  <- 10)
+
+                cat("\n\t .. .. : ",
+                    unlist(sapply(1:length(object at records),  function(i) {
+
+                      if(names(table(temp)[x]) == is(object at records[[i]])[1]){
+                        paste(object at records[[i]]@recordType,
+                              if(i%%temp.width==0 & i!=length(object at records)){"\n\t .. .. : "})
+                      }
+                    })))
+
+              })
+
+            }else{
+
+              cat("\n\t >> This is an empty object and cannot be used for further analysis! <<")
+
+            }
+
+
+          }
+)##end show method
+
+
+####################################################################################################
+###set_RLum()
+####################################################################################################
+#' @describeIn RLum.Analysis
+#' Construction method for \code{\linkS4class{RLum.Analysis}} objects.
+#'
+#' @param class [\code{set_RLum}] \code{\link{character}} (\bold{required}): name of the \code{RLum} class to be created
+#' @param originator [\code{set_RLum}] \code{\link{character}} (automatic): contains the name
+#' of the calling function (the function that produces this object); can be set manually.
+#' @param .uid [\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+#' using the internal C++ function \code{.create_UID}.
+#' @param .pid [\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+#' at will.
+#' @param protocol [\code{set_RLum}] \code{\link{character}} (optional): sets protocol type for
+#' analysis object. Value may be used by subsequent analysis functions.
+#' @param records [\code{set_RLum}] \code{\link{list}} (\bold{required}): list of \code{\linkS4class{RLum.Analysis}} objects
+#' @param info [\code{set_RLum}] \code{\link{list}} (optional): a list containing additional
+#' info data for the object
+#'
+#' \bold{\code{set_RLum}}:\cr
+#'
+#' Returns an \code{\linkS4class{RLum.Analysis}} object.
+#'
+#' @export
+setMethod(
+  "set_RLum",
+  signature = "RLum.Analysis",
+
+  definition = function(class,
+                        originator,
+                        .uid,
+                        .pid,
+                        protocol = NA_character_,
+                        records = list(),
+                        info = list()
+                        ) {
+
+    ##produce empty class object
+    newRLumAnalysis <- new(Class = "RLum.Analysis")
+
+    ##allow self set to reset an RLum.Analysis object
+    if(is(records, "RLum.Analysis")){
+
+      #fill slots (this is much faster than the old code!)
+      newRLumAnalysis at protocol <- if(missing(protocol)){records at protocol}else{protocol}
+      newRLumAnalysis at originator <- originator
+      newRLumAnalysis at records <- records at records
+      newRLumAnalysis at info <- if(missing(info)){records at info}else{c(records at info, info)}
+      newRLumAnalysis at .uid <- .uid
+      newRLumAnalysis at .pid <- if(missing(.pid)){records at .uid}else{.pid}
+
+
+    }else{
+
+      #fill slots (this is much faster than the old code!)
+      newRLumAnalysis at protocol <- protocol
+      newRLumAnalysis at originator <- originator
+      newRLumAnalysis at records <- records
+      newRLumAnalysis at info <- info
+      newRLumAnalysis at .uid <- .uid
+      newRLumAnalysis at .pid <- .pid
+
+    }
+
+    return(newRLumAnalysis)
+
+  }
+)
+
+####################################################################################################
+###get_RLum()
+####################################################################################################
+#' @describeIn RLum.Analysis
+#' Accessor method for RLum.Analysis object.
+#'
+#' The slots record.id, recordType, curveType and RLum.type are optional to allow for records
+#' limited by their id (list index number), their record type (e.g. recordType = "OSL")
+#' or object type.
+#'
+#' Example: curve type (e.g. curveType = "predefined" or curveType ="measured")
+#'
+#' The selection of a specific RLum.type object superimposes the default selection.
+#' Currently supported objects are: RLum.Data.Curve and RLum.Data.Spectrum
+#'
+#' @param object \code{[show_RLum]}\code{[get_RLum]}\code{[names_RLum]}\code{[length_RLum]}
+#' \code{[structure_RLum]}] an object of class \code{\linkS4class{RLum.Analysis}}
+#' (\bold{required})
+#'
+#' @param record.id [\code{get_RLum}] \code{\link{numeric}} or \code{\link{logical}} (optional): IDs of specific records.
+#' If of type \code{logical} the entire id range is assuemd and \code{TRUE} and \code{FALSE} indicates the selection.
+#'
+#' @param recordType [\code{get_RLum}] \code{\link{character}} (optional): record type (e.g., "OSL").
+#' Can be also a vector, for multiple matching, e.g., \code{recordType = c("OSL", "IRSL")}
+#'
+#' @param curveType [\code{get_RLum}] \code{\link{character}} (optional): curve
+#' type (e.g. "predefined" or "measured")
+#'
+#' @param RLum.type [\code{get_RLum}] \code{\link{character}} (optional): RLum object type.
+#' Defaults to "RLum.Data.Curve" and "RLum.Data.Spectrum".
+#'
+#' @param get.index [\code{get_RLum}] \code{\link{logical}} (optional): return a numeric
+#' vector with the index of each element in the RLum.Analysis object.
+#'
+#' @param recursive [\code{get_RLum}] \code{\link{logical}} (with default): if \code{TRUE} (the default)
+#' and the result of the 'get_RLum' request is a single object this object will be unlisted, means
+#' only the object itself and no list containing exactly one object is returned. Mostly this makes things
+#' easier, however, if this method is used within a loop this might undesired.
+#'
+#' @param drop [\code{get_RLum}] \code{\link{logical}} (with default): coerce to the next possible layer
+#' (which are \code{RLum.Data}-objects), \code{drop = FALSE} keeps the original \code{RLum.Analysis}
+#'
+#' @param info.object [\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
+#' element
+#'
+#' @return
+#'
+#' \bold{\code{get_RLum}}:\cr
+#'
+#' Returns: \cr
+#' (1) \code{\link{list}} of \code{\linkS4class{RLum.Data}} objects or \cr
+#' (2) Single \code{\linkS4class{RLum.Data}} object, if only one object is contained and
+#' \code{recursive = FALSE} or\cr
+#' (3) \code{\linkS4class{RLum.Analysis}} ojects for \code{drop = FALSE} \cr
+#'
+#' @export
+setMethod("get_RLum",
+          signature = ("RLum.Analysis"),
+
+          function(object, record.id = NULL, recordType = NULL, curveType = NULL, RLum.type = NULL,
+                   protocol = "UNKNOWN", get.index = NULL, drop = TRUE, recursive = TRUE, info.object = NULL){
+
+            ##if info.object is set, only the info objects are returned
+            if(!is.null(info.object)) {
+
+              if(info.object %in% names(object at info)){
+
+                unlist(object at info[info.object])
+
+              }else{
+
+                ##check for entries
+                if(length(object at info) == 0){
+
+                  warning("[get_RLum] This RLum.Analysis object has no info objects! NULL returned!)")
+                  return(NULL)
+
+                }else{
+
+                  ##grep names
+                  temp.element.names <- paste(names(object at info), collapse = ", ")
+
+                  warning.text <- paste("[get_RLum] Invalid info.object name. Valid names are:", temp.element.names)
+
+                  warning(warning.text, call. = FALSE)
+                  return(NULL)
+
+                }
+
+              }
+
+
+            } else{
+              ##record.id
+              if (is.null(record.id)) {
+                record.id <- c(1:length(object at records))
+
+              } else if (!is(record.id, "numeric") &
+                         !is(record.id, "logical")) {
+                stop("[get_RLum()] 'record.id' has to be of type 'numeric' or 'logical'!")
+
+              }
+              ##logical needs a slightly different treatment
+              ##Why do we need this? Because a lot of standard R functions work with logical
+              ##values instead of numerical indicies
+              if (is(record.id, "logical")) {
+                record.id <- c(1:length(object at records))[record.id]
+
+              }
+
+              ##check if record.id exists
+              if (FALSE %in% (abs(record.id) %in% (1:length(object at records)))) {
+                stop("[get_RLum()] At least one 'record.id' is invalid!")
+
+              }
+
+              ##recordType
+              if (is.null(recordType)) {
+                recordType <- unique(unlist(lapply(1:length(object at records),
+                                                   function(x) {
+                                                     object at records[[x]]@recordType
+                                                   })))
+
+              } else{
+                if (!is(recordType, "character")) {
+                  stop("[get_RLum()] 'recordType' has to be of type 'character'!")
+
+                }
+
+              }
+
+              ##curveType
+              if (is.null(curveType)) {
+                curveType <- unique(unlist(lapply(1:length(object at records),
+                                                  function(x) {
+                                                    object at records[[x]]@curveType
+                                                  })))
+
+              } else if (!is(curveType, "character")) {
+                stop("[get_RLum()] 'curveType' has to be of type 'character'!")
+
+              }
+
+              ##RLum.type
+              if (is.null(RLum.type)) {
+                RLum.type <- c("RLum.Data.Curve", "RLum.Data.Spectrum")
+
+              } else if (!is(RLum.type, "character")) {
+                stop("[get_RLum()] 'RLum.type' has to be of type 'character'!")
+
+              }
+
+              ##get.index
+              if (is.null(get.index)) {
+                get.index <- FALSE
+
+              } else if (!is(get.index, "logical")) {
+                stop("[get_RLum()] 'get.index' has to be of type 'logical'!")
+
+              }
+
+              ##get originator
+              if (.hasSlot(object, "originator")) {
+                originator <- object at originator
+
+              } else{
+                originator <- NA_character_
+
+              }
+
+
+              ##-----------------------------------------------------------------##
+
+              ##a pre-selection is necessary to support negative index selection
+              object at records <- object at records[record.id]
+              record.id <- 1:length(object at records)
+
+
+              ##select curves according to the chosen parameter
+              if (length(record.id) > 1) {
+                temp <- lapply(record.id, function(x) {
+                  if (is(object at records[[x]])[1] %in% RLum.type == TRUE) {
+                    ##as input a vector is allowed
+                    temp <- lapply(1:length(recordType), function(k) {
+                      ##translate input to regular expression
+                      recordType[k] <- glob2rx(recordType[k])
+                      recordType[k] <- substr(recordType[k],
+                                              start = 2,
+                                              stop = nchar(recordType[k]) -
+                                                1)
+
+                      if (grepl(recordType[k], object at records[[x]]@recordType) == TRUE &
+                          object at records[[x]]@curveType %in% curveType) {
+                        if (!get.index) {
+                          object at records[[x]]
+
+                        } else{
+                          x
+                        }
+
+                      }
+
+                    })
+
+                    ##remove empty entries and select just one to unlist
+                    temp <- temp[!sapply(temp, is.null)]
+
+                    ##if list has length 0 skip entry
+                    if (length(temp) != 0) {
+                      temp[[1]]
+                    } else{
+                      temp <- NULL
+                    }
+
+                  }
+
+                })
+
+
+                ##remove empty list element
+                temp <- temp[!sapply(temp, is.null)]
+
+                ##check if the produced object is empty and show warning message
+                if (length(temp) == 0) {
+                  warning("[get_RLum()] This request produced an empty list of records!")
+
+                }
+
+                ##remove list for get.index
+                if (get.index) {
+                  return(unlist(temp))
+
+                } else{
+                  if (!drop) {
+                    temp <- set_RLum(
+                      class = "RLum.Analysis",
+                      originator = originator,
+                      records = temp,
+                      protocol = object at protocol
+                    )
+                    return(temp)
+
+                  } else{
+                    if (length(temp) == 1 & recursive == TRUE) {
+                      return(temp[[1]])
+
+                    } else{
+                      return(temp)
+
+                    }
+
+                  }
+
+                }
+
+              } else{
+                if (get.index == FALSE) {
+                  if (drop == FALSE) {
+                    ##needed to keep the argument drop == TRUE
+                    temp <- set_RLum(
+                      class = "RLum.Analysis",
+                      originator = originator,
+                      records = list(object at records[[record.id]]),
+                      protocol = object at protocol
+                    )
+                    return(temp)
+
+                  } else{
+                    return(object at records[[record.id]])
+
+                  }
+
+
+                } else{
+                  return(record.id)
+
+                }
+              }
+
+            }
+
+          })
+
+
+####################################################################################################
+###structure_RLum()
+####################################################################################################
+#' @describeIn RLum.Analysis
+#' Method to show the structure of an \code{\linkS4class{RLum.Analysis}} object.
+#'
+#' @param fullExtent [structure_RLum] \code{\link{logical}} (with default): extents the returned \code{data.frame}
+#' to its full extent, i.e. all info elements are part of the return as well. The default valule
+#' is \code{FALSE} as the data frame might become rather big.
+#'
+#' @return
+#'
+#' \bold{\code{structure_RLum}}:\cr
+#'
+#' Returns \code{\linkS4class{data.frame}} showing the structure.
+#'
+#' @export
+setMethod("structure_RLum",
+          signature= "RLum.Analysis",
+          definition = function(object, fullExtent = FALSE) {
+
+            ##check if the object containing other elements than allowed
+            if(length(grep(FALSE, sapply(object at records, is, class="RLum.Data.Curve")))!=0){
+
+              stop("[structure_RLum()]  Only 'RLum.Data.Curve' objects are allowed!" )
+
+            }
+
+            ##get length object
+            temp.object.length <- length(object at records)
+
+            ##ID
+            temp.id <- 1:temp.object.length
+
+            ##OBJECT TYPE
+            temp.recordType <- c(NA)
+            length(temp.recordType) <- temp.object.length
+            temp.recordType <- sapply(1:temp.object.length,
+                                      function(x){object at records[[x]]@recordType})
+
+            ##PROTOCOL STEP
+            temp.protocol.step <- c(NA)
+            length(temp.protocol.step) <- temp.object.length
+
+            ##n.channels
+            temp.n.channels <- sapply(1:temp.object.length,
+                                      function(x){length(object at records[[x]]@data[,1])})
+
+            ##X.MIN
+            temp.x.min <- sapply(1:temp.object.length,
+                                 function(x){min(object at records[[x]]@data[,1])})
+
+            ##X.MAX
+            temp.x.max <- sapply(1:temp.object.length,
+                                 function(x){max(object at records[[x]]@data[,1])})
+
+            ##y.MIN
+            temp.y.min <- sapply(1:temp.object.length,
+                                 function(x){min(object at records[[x]]@data[,2])})
+
+            ##X.MAX
+            temp.y.max <- sapply(1:temp.object.length,
+                                 function(x){max(object at records[[x]]@data[,2])})
+
+            ##.uid
+            temp.uid <- unlist(lapply(object at records, function(x){x at .uid}))
+
+            ##.pid
+            temp.pid <- unlist(lapply(object at records, function(x){x at .pid}))
+
+            ##originator
+            temp.originator <- unlist(lapply(object at records, function(x){x at originator}))
+
+            ##curveType
+            temp.curveType <- unlist(lapply(object at records, function(x){x at curveType}))
+
+            ##info elements as character value
+            if (fullExtent) {
+              temp.info.elements <- as.data.frame(data.table::rbindlist(lapply(object at records, function(x) {
+                x at info
+              }), fill = TRUE))
+
+            } else{
+              temp.info.elements <-
+                unlist(sapply(1:temp.object.length, function(x) {
+                  if (length(object at records[[x]]@info) != 0) {
+                    do.call(paste, as.list(names(object at records[[x]]@info)))
+                  } else{
+                    NA
+                  }
+
+                }))
+
+            }
+
+            ##combine output to a data.frame
+            return(
+              data.frame(
+                id = temp.id,
+                recordType = temp.recordType,
+                curveType = temp.curveType,
+                protocol.step = temp.protocol.step,
+                n.channels = temp.n.channels,
+                x.min = temp.x.min,
+                x.max = temp.x.max,
+                y.min = temp.y.min,
+                y.max = temp.y.max,
+                originator = temp.originator,
+                .uid = temp.uid,
+                .pid = temp.pid,
+                info = temp.info.elements,
+                stringsAsFactors = FALSE
+              )
+            )
+
+          })
+
+
+####################################################################################################
+###length_RLum()
+####################################################################################################
+#' @describeIn RLum.Analysis
+#' Returns the length of the object, i.e., number of stored records.
+#'
+#' @return
+#'
+#' \bold{\code{length_RLum}}\cr
+#'
+#' Returns the number records in this object.
+#'
+#' @export
+setMethod("length_RLum",
+          "RLum.Analysis",
+          function(object){
+            length(object at records)
+
+          })
+
+####################################################################################################
+###names_RLum()
+####################################################################################################
+#' @describeIn RLum.Analysis
+#' Returns the names of the \code{\linkS4class{RLum.Data}} objects objects (same as shown with the show method)
+#'
+#' @return
+#'
+#' \bold{\code{names_RLum}}\cr
+#'
+#' Returns the names of the record types (recordType) in this object.
+#'
+#' @export
+setMethod("names_RLum",
+          "RLum.Analysis",
+          function(object){
+            sapply(1:length(object at records), function(x){
+              object at records[[x]]@recordType})
+
+          })
+
diff --git a/R/RLum.Data-class.R b/R/RLum.Data-class.R
new file mode 100644
index 0000000..98c3685
--- /dev/null
+++ b/R/RLum.Data-class.R
@@ -0,0 +1,32 @@
+#' Class \code{"RLum.Data"}
+#'
+#' Generalized virtual data class for luminescence data.
+#'
+#'
+#' @name RLum.Data-class
+#'
+#' @docType class
+#'
+#' @note Just a virtual class.
+#'
+#' @section Objects from the Class: A virtual Class: No objects can be created
+#' from it.
+#'
+#' @section Class version: 0.2.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Spectrum}}
+#'
+#' @keywords classes
+#'
+#' @examples
+#'
+#' showClass("RLum.Data")
+#'
+#' @export
+setClass("RLum.Data",
+         contains = c("RLum", "VIRTUAL")
+)
+
diff --git a/R/RLum.Data.Curve-class.R b/R/RLum.Data.Curve-class.R
new file mode 100644
index 0000000..1cd3292
--- /dev/null
+++ b/R/RLum.Data.Curve-class.R
@@ -0,0 +1,458 @@
+#' @include get_RLum.R set_RLum.R names_RLum.R length_RLum.R bin_RLum.Data.R
+NULL
+
+#' Class \code{"RLum.Data.Curve"}
+#'
+#' Class for representing luminescence curve data.
+#'
+#' @name RLum.Data.Curve-class
+#'
+#' @docType class
+#'
+#' @slot recordType Object of class "character" containing the type of the curve (e.g. "TL" or "OSL")
+#'
+#' @slot curveType Object of class "character" containing curve type, allowed values are measured or predefined
+#'
+#' @slot data Object of class \code{\link{matrix}} containing curve x and y data.
+#' 'data' can also be of type \code{RLum.Data.Curve} to change object values without deconstructing the object.
+#' For example: \code{set_RLum(class = 'RLum.Data.Curve',
+#' data = Your.RLum.Data.Curve, recordType = 'never seen before')}
+#' would just change the recordType. Missing arguements  the value is taken from the input object
+#' in 'data' (which is already an RLum.Data.Curve object in this example)
+#'
+#'
+#' @note The class should only contain data for a single curve. For additional
+#' elements the slot \code{info} can be used (e.g. providing additional heating
+#' ramp curve). Objects from the class \code{RLum.Data.Curve} are produced by other
+#' functions (partyl within \code{\linkS4class{RLum.Analysis}} objects),
+#' namely: \code{\link{Risoe.BINfileData2RLum.Analysis}}, \code{\link{read_XSYG2R}}
+#'
+#' @section Create objects from this Class: Objects can be created by calls of the form
+#' \code{set_RLum(class = "RLum.Data.Curve", ...)}.
+#'
+#' @section Class version: 0.4.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
+#' \code{\link{plot_RLum}}, \code{\link{merge_RLum}}
+#'
+#' @keywords classes
+#'
+#' @examples
+#'
+#' showClass("RLum.Data.Curve")
+#'
+#' ##set empty curve object
+#' set_RLum(class = "RLum.Data.Curve")
+#'
+#' @export
+setClass("RLum.Data.Curve",
+         slots = list(
+           recordType = "character",
+           curveType = "character",
+           data = "matrix"
+           ),
+         contains = "RLum.Data",
+         prototype = list (
+           recordType = NA_character_,
+           curveType = NA_character_,
+           data = matrix(data = 0, ncol = 2)
+           )
+         )
+
+####################################################################################################
+###as()
+####################################################################################################
+##LIST
+##COERCE RLum.Data.Curve >> list AND list >> RLum.Data.Curve
+#' as() - RLum-object coercion
+#'
+#' for \code{[RLum.Data.Curve]}
+#'
+#' \bold{[RLum.Data.Curve]}\cr
+#'
+#' \tabular{ll}{
+#'  \bold{from} \tab \bold{to}\cr
+#'   \code{list} \tab \code{list} \cr
+#'   \code{data.frame} \tab \code{data.frame}\cr
+#'   \code{matrix} \tab \code{matrix}
+#'
+#' }
+#'
+#' @param from \code{\linkS4class{RLum}} or \code{\link{list}}, \code{\link{data.frame}}, \code{\link{matrix}}
+#' (\bold{required}): object to be coerced from
+#'
+#' @param to \code{\link{character}} (\bold{required}): class name to be coerced to
+#'
+#' @seealso \code{\link[methods]{as}}
+#'
+#' @note Due to the complex structure of the \code{RLum} objects itself a coercing to standard
+#' R data structures will be always loosely!
+#'
+#' @name as
+#'
+setAs("list", "RLum.Data.Curve",
+      function(from,to){
+
+        new(to,
+            recordType = "unkown curve type",
+            curveType = NA_character_,
+            data = matrix(unlist(from), ncol = 2),
+            info = list())
+      })
+
+
+setAs("RLum.Data.Curve", "list",
+      function(from){
+
+          list(x = from at data[,1], y = from at data[,2])
+
+      })
+
+##DATA.FRAME
+##COERCE RLum.Data.Curve >> data.frame AND data.frame >> RLum.Data.Curve
+setAs("data.frame", "RLum.Data.Curve",
+      function(from,to){
+
+              new(to,
+                  recordType = "unkown curve type",
+                  curveType = NA_character_,
+                  data = as.matrix(from),
+                  info = list())
+            })
+
+setAs("RLum.Data.Curve", "data.frame",
+      function(from){
+
+        data.frame(x = from at data[,1],
+                   y = from at data[,2])
+
+      })
+
+
+##MATRIX
+##COERCE RLum.Data.Curve >> matrix AND matrix >> RLum.Data.Curve
+setAs("matrix", "RLum.Data.Curve",
+      function(from,to){
+
+        new(to,
+            recordType = "unkown curve type",
+            curveType = NA_character_,
+            data = from,
+            info = list())
+
+      })
+
+
+setAs("RLum.Data.Curve", "matrix",
+      function(from){
+        from at data
+
+      })
+
+
+####################################################################################################
+###show()
+####################################################################################################
+#' @describeIn RLum.Data.Curve
+#' Show structure of \code{RLum.Data.Curve} object
+#' @export
+setMethod("show",
+          signature(object = "RLum.Data.Curve"),
+          function(object){
+
+
+            ##print information
+
+            cat("\n [RLum.Data.Curve]")
+            cat("\n\t recordType:", object at recordType)
+            cat("\n\t curveType:",  object at curveType)
+            cat("\n\t measured values:", length(object at data[,1]))
+            cat("\n\t .. range of x-values:", suppressWarnings(range(object at data[,1])))
+            cat("\n\t .. range of y-values:",  suppressWarnings(range(object at data[,2])))
+            cat("\n\t additional info elements:", length(object at info))
+            #cat("\n\t\t >> names:", names(object at info))
+          }
+)
+
+
+
+
+####################################################################################################
+###set_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Curve
+#' Construction method for RLum.Data.Curve object. The slot info is optional
+#' and predefined as empty list by default.
+#'
+#' @param class [\code{set_RLum}] \code{\link{character}} (\bold{required}): name of the \code{RLum} class to create
+#' @param originator [\code{set_RLum}] \code{\link{character}} (automatic): contains the name of the calling function
+#' (the function that produces this object); can be set manually.
+#' @param .uid [\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+#' using the internal C++ function \code{.create_UID}.
+#' @param .pid [\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+#' at will.
+#' @param recordType [\code{set_RLum}] \code{\link{character}} (optional): record type (e.g., "OSL")
+#' @param curveType [\code{set_RLum}] \code{\link{character}} (optional): curve type (e.g., "predefined" or "measured")
+#' @param data [\code{set_RLum}] \code{\link{matrix}} (\bold{required}): raw curve data.
+#' If \code{data} itself is a \code{RLum.Data.Curve}-object this can be used to re-construct the object
+#' (s. Details)
+#' @param info [\code{set_RLum}] \code{\link{list}} (optional): info elements
+#'
+#' @return
+#'
+#' \bold{\code{set_RLum}}\cr
+#'
+#' Returns an \code{\linkS4class{RLum.Data.Curve}} object.
+#'
+#' @export
+setMethod(
+  "set_RLum",
+  signature = signature("RLum.Data.Curve"),
+
+  definition = function(class,
+                        originator,
+                        .uid,
+                        .pid,
+                        recordType = NA_character_,
+                        curveType = NA_character_,
+                        data = matrix(0, ncol = 2),
+                        info = list()) {
+
+    ##The case where an RLum.Data.Curve object can be provided
+    ##with this RLum.Data.Curve objects can be provided to be reconstructed
+    if (is(data, "RLum.Data.Curve")) {
+
+      ##check for missing curveType
+      if (missing(curveType)) {
+        curveType <- data at curveType
+
+      }
+
+      ##check for missing recordType
+      if(missing(recordType)){
+        recordType <- data at recordType
+
+      }
+
+      ##check for missing data ... not possible as data is the object itself
+
+      ##check for missing info
+      if(missing(info)){
+       info <- data at info
+
+      }
+
+      ##check for missing .uid
+      if(missing(.uid)){
+        info <- data at .uid
+
+      }
+
+      ##check for missing .pid
+      if(missing(.pid)){
+        info <- data at .pid
+
+      }
+
+      ##set empty clas form object
+      newRLumDataCurve <- new("RLum.Data.Curve")
+
+      ##fill - this is the faster way, filling in new() costs ...
+      newRLumDataCurve at recordType = recordType
+      newRLumDataCurve at curveType = curveType
+      newRLumDataCurve at data = data at data
+      newRLumDataCurve at info = info
+      newRLumDataCurve at .uid = data at .uid
+      newRLumDataCurve at .pid = data at .pid
+
+      return(newRLumDataCurve)
+
+    }else{
+
+      ##set empty clas form object
+      newRLumDataCurve <- new("RLum.Data.Curve")
+
+      ##fill - this is the faster way, filling in new() costs ...
+      newRLumDataCurve at originator = originator
+      newRLumDataCurve at recordType = recordType
+      newRLumDataCurve at curveType = curveType
+      newRLumDataCurve at data = data
+      newRLumDataCurve at info = info
+      newRLumDataCurve at .uid = .uid
+      newRLumDataCurve at .pid = .pid
+
+      return(newRLumDataCurve)
+
+    }
+
+  }
+)
+
+####################################################################################################
+###get_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Curve
+#' Accessor method for RLum.Data.Curve object. The argument info.object is
+#' optional to directly access the info elements. If no info element name is
+#' provided, the raw curve data (matrix) will be returned.
+#'
+#' @param object [\code{show_RLum}][\code{get_RLum}][\code{length_RLum}][\code{names_RLum}] an object of
+#' class \code{\linkS4class{RLum.Data.Curve}} (\bold{required})
+#' @param info.object [\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
+#' element
+#'
+#' @return
+#'
+#' \bold{\code{get_RLum}}\cr
+#'
+#' (1) A \code{\link{matrix}} with the curve values or \cr
+#' (2) only the info object if \code{info.object} was set.\cr
+#'
+#' @export
+setMethod("get_RLum",
+          signature("RLum.Data.Curve"),
+          definition = function(object, info.object = NULL) {
+
+           ##Check if function is of type RLum.Data.Curve
+           if(is(object, "RLum.Data.Curve") == FALSE){
+
+              stop("[get_RLum] Function valid for 'RLum.Data.Curve' objects only!")
+
+           }
+
+           ##if info.object == NULL just show the curve values
+          if(!is.null(info.object)) {
+
+              if(info.object %in% names(object at info)){
+
+                unlist(object at info[info.object])
+
+              }else{
+
+                ##check for entries
+                if(length(object at info) == 0){
+
+                  warning("[get_RLum] This RLum.Data.Curve object has no info objects! NULL returned!)")
+                  return(NULL)
+
+                }else{
+
+                  ##grep names
+                  temp.element.names <- paste(names(object at info), collapse = ", ")
+
+                  warning.text <- paste("[get_RLum] Invalid info.object name. Valid names are:", temp.element.names)
+
+                  warning(warning.text, call. = FALSE)
+                  return(NULL)
+
+                }
+
+              }
+
+
+             }else{
+
+                    object at data
+
+             }
+          })
+
+####################################################################################################
+###length_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Curve
+#' Returns the length of the curve object, which is the maximum of the
+#' value time/temperature of the curve (corresponding to the stimulation length)
+#'
+#' @return
+#' \bold{\code{length_RLum}}\cr
+#'
+#' Number of channels in the curve (row number of the matrix)
+#'
+#' @export
+setMethod("length_RLum",
+          "RLum.Data.Curve",
+          function(object){
+            max(object at data[,1])
+
+          })
+
+####################################################################################################
+###names_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Curve
+#' Returns the names info elements coming along with this curve object
+#'
+#' @return
+#'
+#' \bold{\code{names_RLum}}\cr
+#'
+#' Names of the info elements (slot \code{info})
+#'
+#' @export
+setMethod("names_RLum",
+          "RLum.Data.Curve",
+          function(object){
+            names(object at info)
+
+          })
+
+####################################################################################################
+###bin_RLum.Data()
+####################################################################################################
+#' @describeIn RLum.Data.Curve
+#' Allows binning of specific objects
+#'
+#' @param bin_size [\code{bin_RLum}] \code{\link{integer}} (with default): set number of channels
+#' used for each bin, e.g. \code{bin_size = 2} means that two channels are binned.
+#'
+#' @return
+#'
+#' \bold{\code{bin_RLum.Data}}\cr
+#'
+#' Same object as input, after applying the binning.
+#'
+#' @export
+setMethod(f = "bin_RLum.Data",
+          signature = "RLum.Data.Curve",
+          function(object, bin_size = 2) {
+
+            ##check for invalid bin_size values
+            if (!is.null(bin_size) && bin_size > 0) {
+              ##set stepping vector
+              stepping <- seq(1, nrow(object at data), by = bin_size)
+
+              ##get bin vector
+              bin_vector <- object at data[, 2]
+
+              ##set desired length of the vector
+              ##to avoid add effects later
+              length(bin_vector) <-
+                suppressWarnings(prod(dim(matrix(
+                  bin_vector, ncol = length(stepping)
+                ))))
+
+              ##define new matrix for binning
+              bin_matrix <-
+                matrix(bin_vector, ncol = length(stepping))
+
+              ##calcuate column sums and replace matrix
+              ##this is much faster than anly apply loop
+              object at data <-
+                matrix(c(object at data[stepping], colSums(bin_matrix, na.rm = TRUE)), ncol = 2)
+
+              ##set matrix
+              return(set_RLum(class = "RLum.Data.Curve",
+                              data = object))
+            } else{
+              warning("Argument 'bin_size' invald, nothing was done!")
+
+              ##set matrix
+              return(set_RLum(class = "RLum.Data.Curve",
+                              data = object))
+
+            }
+
+          })
diff --git a/R/RLum.Data.Image-class.R b/R/RLum.Data.Image-class.R
new file mode 100644
index 0000000..35311bc
--- /dev/null
+++ b/R/RLum.Data.Image-class.R
@@ -0,0 +1,345 @@
+#' @include get_RLum.R set_RLum.R names_RLum.R
+NULL
+
+#' Class \code{"RLum.Data.Image"}
+#'
+#' Class for representing luminescence image data (TL/OSL/RF). Such data are for example produced
+#' by the function \code{\link{read_SPE2R}}
+#'
+#' @name RLum.Data.Image-class
+#'
+#' @docType class
+#'
+#' @slot recordType Object of class \code{\link{character}}
+#' containing the type of the curve (e.g. "OSL image", "TL image")
+#'
+#' @slot curveType Object of class \code{\link{character}} containing curve type, allowed values
+#' are measured or predefined
+#'
+#' @slot data Object of class \code{\link[raster]{brick}} containing images (raster data).
+#'
+#' @slot info Object of class \code{\link{list}} containing further meta information objects
+#'
+#' @note The class should only contain data for a set of images. For additional
+#' elements the slot \code{info} can be used.
+#'
+#' @section Objects from the Class: Objects can be created by calls of the form
+#' \code{set_RLum("RLum.Data.Image", ...)}.
+#'
+#' @section Class version: 0.4.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
+#' \code{\link{plot_RLum}}, \code{\link{read_SPE2R}}
+#'
+#' @keywords classes
+#'
+#' @examples
+#'
+#' showClass("RLum.Data.Image")
+#'
+#' ##create empty RLum.Data.Image object
+#' set_RLum(class = "RLum.Data.Image")
+#'
+#' @importClassesFrom raster RasterBrick
+#' @export
+setClass(
+  "RLum.Data.Image",
+  slots = list(
+    recordType = "character",
+    curveType = "character",
+    data = "RasterBrick",
+    info = "list"
+  ),
+  contains = "RLum.Data",
+  prototype = list (
+    recordType = character(),
+    curveType = character(),
+    data = raster::brick(raster::raster(matrix())),
+    info = list()
+  )
+)
+
+
+####################################################################################################
+###as()
+####################################################################################################
+
+##DATA.FRAME
+##COERCE RLum.Data.Image >> data.frame AND data.frame >> RLum.Data.Image
+#' as()
+#'
+#' for \code{[RLum.Data.Image]}
+#'
+#' \bold{[RLum.Data.Image]}\cr
+#'
+#' \tabular{ll}{
+#'  \bold{from} \tab \bold{to}\cr
+#'   \code{data.frame} \tab \code{data.frame}\cr
+#'   \code{matrix} \tab \code{matrix}
+#'
+#' }
+#'
+#' @name as
+#'
+#'
+setAs("data.frame", "RLum.Data.Image",
+      function(from,to){
+
+        new(to,
+            recordType = "unkown curve type",
+            curveType = "NA",
+            data = as.matrix(from),
+            info = list())
+      })
+
+setAs("RLum.Data.Image", "data.frame",
+      function(from){
+
+        data.frame(x = from at data@values[seq(1,length(from at data@values), by = 2)],
+                   y = from at data@values[seq(2,length(from at data@values), by = 2)])
+
+      })
+
+
+##MATRIX
+##COERCE RLum.Data.Image >> matrix AND matrix >> RLum.Data.Image
+setAs("matrix", "RLum.Data.Image",
+      function(from,to){
+
+        new(to,
+            recordType = "unkown curve type",
+            curveType = "NA",
+            data = raster::brick(raster::raster(as.matrix(from))),
+            info = list())
+      })
+
+setAs("RLum.Data.Image", "matrix",
+      function(from){
+
+        ##only the first object is convertec
+        as.matrix(from[[1]])
+
+      })
+
+
+####################################################################################################
+###show()
+####################################################################################################
+#' @describeIn RLum.Data.Image
+#' Show structure of \code{RLum.Data.Image} object
+#' @export
+setMethod("show",
+          signature(object = "RLum.Data.Image"),
+          function(object){
+
+            x.rows <- object at data@ncols
+            y.cols <- object at data@nrows
+            z.range <- paste(min(object at data@data at min),":",max(object at data@data at max))
+
+            ##print information
+
+            cat("\n [RLum.Data.Image]")
+            cat("\n\t recordType:", object at recordType)
+            cat("\n\t curveType:",  object at curveType)
+            cat("\n\t .. recorded frames:", length(object at data@data at names))
+            cat("\n\t .. .. pixel per frame:", x.rows*y.cols)
+            cat("\n\t .. .. x dimension [px]:", x.rows)
+            cat("\n\t .. .. y dimension [px]:", y.cols)
+            cat("\n\t .. .. full pixel value range:", z.range)
+            cat("\n\t additional info elements:", length(object at info))
+            #cat("\n\t\t >> names:", names(object at info))
+          }
+)
+
+
+####################################################################################################
+###set_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Image
+#' Construction method for RLum.Data.Image object. The slot info is optional
+#' and predefined as empty list by default..
+#'
+#' @param class \code{[set_RLum]}\code{\link{character}}: name of the \code{RLum} class to create
+#' @param originator \code{[set_RLum]} \code{\link{character}} (automatic):
+#' contains the name of the calling function (the function that produces this object); can be set manually.
+#' @param .uid [\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+#' using the internal C++ function \code{.create_UID}.
+#' @param .pid [\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+#' at will.
+#' @param recordType \code{[set_RLum]} \code{\link{character}}: record type (e.g. "OSL")
+#' @param curveType \code{[set_RLum]} \code{\link{character}}: curve type (e.g. "predefined" or "measured")
+#' @param data \code{[set_RLum]} \code{\link{matrix}}: raw curve data. If data is of type \code{RLum.Data.Image}
+#' this can be used to re-construct the object.
+#' @param info \code{[set_RLum]} \code{\link{list}}: info elements
+#'
+#' @return
+#'
+#' \bold{\code{set_RLum}}\cr
+#'
+#' Returns an object from class \code{RLum.Data.Image}
+#'
+#' @export
+setMethod(
+  "set_RLum",
+  signature = signature("RLum.Data.Image"),
+
+  definition = function(class,
+                        originator,
+                        .uid,
+                        .pid,
+                        recordType = "Image",
+                        curveType = NA_character_,
+                        data = raster::brick(raster::raster(matrix())),
+                        info = list()) {
+    ##The case where an RLum.Data.Image object can be provided
+    ##with this RLum.Data.Image objects can be provided to be reconstructed
+
+    if (is(data, "RLum.Data.Image")) {
+      ##check for missing curveType
+      if (missing(curveType)) {
+        curveType <- data at curveType
+
+      }
+
+      ##check for missing recordType
+      if (missing(recordType)) {
+        recordType <- data at recordType
+
+      }
+
+      ##check for missing data ... not possible as data is the object itself
+
+      ##check for missing info
+      if (missing(info)) {
+        info <- data at info
+
+      }
+
+      ##check for missing .uid
+      if (missing(.uid)) {
+        info <- data at .uid
+
+      }
+
+      ##check for missing .pid
+      if (missing(.pid)) {
+        info <- data at .pid
+
+      }
+
+      ##set empty clas form object
+      newRLumDataImage <- new("RLum.Data.Image")
+
+      ##fill - this is the faster way, filling in new() costs ...
+      newRLumDataImage at recordType = recordType
+      newRLumDataImage at curveType = curveType
+      newRLumDataImage at data = data at data
+      newRLumDataImage at info = info
+      newRLumDataImage at .uid = data at .uid
+      newRLumDataImage at .pid = data at .pid
+
+      return(newRLumDataImage)
+
+    } else{
+      ##set empty clas form object
+      newRLumDataImage <- new("RLum.Data.Image")
+
+      ##fill - this is the faster way, filling in new() costs ...
+      newRLumDataImage at originator = originator
+      newRLumDataImage at recordType = recordType
+      newRLumDataImage at curveType = curveType
+      newRLumDataImage at data = data
+      newRLumDataImage at info = info
+      newRLumDataImage at .uid = .uid
+      newRLumDataImage at .pid = .pid
+
+      return(newRLumDataImage)
+
+    }
+
+  }
+)
+
+####################################################################################################
+###get_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Image
+#' Accessor method for RLum.Data.Image object. The argument info.object is
+#'  optional to directly access the info elements. If no info element name is
+#'  provided, the raw image data (RasterBrick) will be returned.
+#'
+#' @param object \code{[show_RLum]}\code{[get_RLum]}\code{[names_RLum]} an object
+#' of class \code{\linkS4class{RLum.Data.Image}}
+#' @param info.object \code{[get_RLum]} \code{\link{character}} name of the info object to returned
+#'
+#' @return
+#'
+#' \bold{\code{get_RLum}}\cr
+#'
+#' (1) Returns the data object (\code{\link[raster]{brick}})\cr
+#' (2) only the info object if \code{info.object} was set.\cr
+#'
+#' @export
+setMethod("get_RLum",
+          signature("RLum.Data.Image"),
+          definition = function(object, info.object) {
+
+            ##Check if function is of type RLum.Data.Image
+            if(is(object, "RLum.Data.Image") == FALSE){
+
+              stop("[get_RLum] Function valid for 'RLum.Data.Image' objects only!")
+
+            }
+
+            ##if missing info.object just show the curve values
+
+            if(missing(info.object) == FALSE){
+
+              if(is(info.object, "character") == FALSE){
+                stop("[get_RLum] 'info.object' has to be a character!")
+              }
+
+              if(info.object %in% names(object at info) == TRUE){
+
+                unlist(object at info[info.object])
+
+              }else{
+
+                ##grep names
+                temp.element.names <- paste(names(object at info), collapse = ", ")
+
+                stop.text <- paste("[get_RLum] Invalid element name. Valid names are:", temp.element.names)
+
+                stop(stop.text)
+
+              }
+
+
+            }else{
+
+              object at data
+
+            }
+          })
+
+####################################################################################################
+###names_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Image
+#' Returns the names info elements coming along with this curve object
+#'
+#' @return
+#'
+#' \bold{\code{names_RLum}}\cr
+#'
+#' Returns the names of the info elements
+#'
+#' @export
+setMethod("names_RLum",
+          "RLum.Data.Image",
+          function(object) {
+            names(object at info)
+
+          })
diff --git a/R/RLum.Data.Spectrum-class.R b/R/RLum.Data.Spectrum-class.R
new file mode 100644
index 0000000..a2150e4
--- /dev/null
+++ b/R/RLum.Data.Spectrum-class.R
@@ -0,0 +1,350 @@
+#' @include get_RLum.R set_RLum.R names_RLum.R
+NULL
+
+#' Class \code{"RLum.Data.Spectrum"}
+#'
+#' Class for representing luminescence spectra data (TL/OSL/RF).
+#'
+#' @name RLum.Data.Spectrum-class
+#'
+#' @docType class
+#'
+#' @slot recordType Object of class \code{\link{character}} containing the type of the curve (e.g. "TL" or "OSL")
+#'
+#' @slot curveType Object of class \code{\link{character}} containing curve type, allowed values
+#' are measured or predefined
+#'
+#' @slot data Object of class \code{\link{matrix}} containing spectrum (count) values.
+#' Row labels indicate wavelength/pixel values, column labels are temperature or time values.
+#'
+#' @slot info Object of class \code{\link{list}} containing further meta information objects
+#'
+#' @note The class should only contain data for a single spectra data set. For
+#' additional elements the slot \code{info} can be used. Objects from this class are automatically
+#' created by, e.g., \code{\link{read_XSYG2R}}
+#'
+#' @section Objects from the Class: Objects can be created by calls of the form
+#' \code{set_RLum("RLum.Data.Spectrum", ...)}.
+#'
+#' @section Class version: 0.4.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
+#' \code{\link{plot_RLum}}
+#'
+#' @keywords classes
+#'
+#' @examples
+#'
+#' showClass("RLum.Data.Spectrum")
+#'
+#' ##show example data
+#' data(ExampleData.XSYG, envir = environment())
+#' TL.Spectrum
+#'
+#' ##show data matrix
+#' get_RLum(TL.Spectrum)
+#'
+#' ##plot spectrum
+#' \dontrun{
+#' plot_RLum(TL.Spectrum)
+#' }
+#' @export
+setClass(
+  "RLum.Data.Spectrum",
+  slots = list(
+    recordType = "character",
+    curveType = "character",
+    data = "matrix",
+    info = "list"
+  ),
+  contains = "RLum.Data",
+  prototype = list (
+    recordType = NA_character_,
+    curveType = NA_character_,
+    data = matrix(),
+    info = list()
+  )
+)
+
+
+####################################################################################################
+###as()
+####################################################################################################
+##data.frame
+##COERCE RLum.Data.Spectrum >> data.frame AND data.frame >> RLum.Data.Spectrum
+#' as()
+#'
+#' for \code{[RLum.Data.Spectrum]}
+#'
+#'
+#' \bold{[RLum.Data.Spectrum]}\cr
+#'
+#' \tabular{ll}{
+#'  \bold{from} \tab \bold{to}\cr
+#'   \code{data.frame} \tab \code{data.frame}\cr
+#'   \code{matrix} \tab \code{matrix}
+#'
+#' }
+#'
+#'
+#' @name as
+#'
+#'
+setAs("data.frame", "RLum.Data.Spectrum",
+      function(from,to){
+
+        new(to,
+            recordType = NA_character_,
+            curveType = NA_character_,
+            data = as.matrix(from),
+            info = list())
+      })
+
+setAs("RLum.Data.Spectrum", "data.frame",
+      function(from){
+        as.data.frame(from at data)
+
+      })
+
+
+##MATRIX
+##COERCE RLum.Data.Spectrum >> matrix AND matrix >> RLum.Data.Spectrum
+setAs("matrix", "RLum.Data.Spectrum",
+      function(from,to){
+        new(to,
+            recordType = NA_character_,
+            curveType = NA_character_,
+            data = from,
+            info = list())
+      })
+
+setAs("RLum.Data.Spectrum", "matrix",
+      function(from){
+        from at data
+
+      })
+
+
+####################################################################################################
+###show()
+####################################################################################################
+#' @describeIn RLum.Data.Spectrum
+#' Show structure of \code{RLum.Data.Spectrum} object
+#' @export
+setMethod("show",
+          signature(object = "RLum.Data.Spectrum"),
+          function(object){
+
+            x.range <- suppressWarnings(range(as.numeric(rownames(object at data))))
+            y.range <- suppressWarnings(range(as.numeric(colnames(object at data))))
+            z.range <- range(object at data)
+
+            ##print information
+
+            cat("\n [RLum.Data.Spectrum]")
+            cat("\n\t recordType:", object at recordType)
+            cat("\n\t curveType:",  object at curveType)
+            cat("\n\t .. recorded frames:", length(object at data[1,]))
+            cat("\n\t .. .. measured values per frame:", length(object at data[,1]))
+            cat("\n\t .. .. range wavelength/pixel:", x.range)
+            cat("\n\t .. .. range time/temp.:", y.range)
+            cat("\n\t .. .. range count values:", z.range)
+            cat("\n\t additional info elements:", length(object at info))
+            #cat("\n\t\t >> names:", names(object at info))
+          }
+)
+
+
+####################################################################################################
+###set_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Spectrum
+#' Construction method for RLum.Data.Spectrum object. The slot info is optional
+#'  and predefined as empty list by default
+#'
+#' @param class [\code{set_RLum}] \code{\link{character}} (automatic): name of the \code{RLum} class to create.
+#' @param originator \code{\link{character}} (automatic): contains the name of the calling function
+#' (the function that produces this object); can be set manually.
+#' @param .uid [\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+#' using the internal C++ function \code{.create_UID}.
+#' @param .pid [\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+#' at will.
+#' @param recordType [\code{set_RLum}] \code{\link{character}}: record type (e.g. "OSL")
+#' @param curveType [\code{set_RLum}] \code{\link{character}}: curve type (e.g. "predefined" or "measured")
+#' @param data [\code{set_RLum}] \code{\link{matrix}}: raw curve data. If data is of
+#' type \code{RLum.Data.Spectrum}, this can be used to re-construct the object.
+#' @param info [\code{set_RLum}] \code{\link{list}}: info elements
+#'
+#' @return
+#'
+#' \bold{\code{[set_RLum]}}\cr
+#'
+#' An object from the class \code{RLum.Data.Spectrum}
+#'
+#' @export
+setMethod(
+  "set_RLum",
+  signature = signature("RLum.Data.Spectrum"),
+  definition = function(class,
+                        originator,
+                        .uid,
+                        .pid,
+                        recordType = "Spectrum",
+                        curveType = NA_character_,
+                        data = matrix(),
+                        info = list()) {
+
+    ##The case where an RLum.Data.Spectrum object can be provided
+    ##with this RLum.Data.Spectrum objects can be provided to be reconstructed
+
+    if (is(data, "RLum.Data.Spectrum")) {
+      ##check for missing curveType
+      if (missing(curveType)) {
+        curveType <- data at curveType
+
+      }
+
+      ##check for missing recordType
+      if (missing(recordType)) {
+        recordType <- data at recordType
+
+      }
+
+      ##check for missing data ... not possible as data is the object itself
+
+      ##check for missing info
+      if (missing(info)) {
+        info <- data at info
+
+      }
+
+      ##check for missing .uid
+      if (missing(.uid)) {
+        info <- data at .uid
+
+      }
+
+      ##check for missing .pid
+      if (missing(.pid)) {
+        info <- data at .pid
+
+      }
+
+      ##set empty clas form object
+      newRLumDataSpectrum <- new("RLum.Data.Spectrum")
+
+      ##fill - this is the faster way, filling in new() costs ...
+      newRLumDataSpectrum at recordType = recordType
+      newRLumDataSpectrum at curveType = curveType
+      newRLumDataSpectrum at data = data at data
+      newRLumDataSpectrum at info = info
+      newRLumDataSpectrum at .uid = data at .uid
+      newRLumDataSpectrum at .pid = data at .pid
+
+      return(newRLumDataSpectrum)
+
+    } else{
+      ##set empty clas form object
+      newRLumDataSpectrum <- new("RLum.Data.Spectrum")
+
+      ##fill - this is the faster way, filling in new() costs ...
+      newRLumDataSpectrum at originator = originator
+      newRLumDataSpectrum at recordType = recordType
+      newRLumDataSpectrum at curveType = curveType
+      newRLumDataSpectrum at data = data
+      newRLumDataSpectrum at info = info
+      newRLumDataSpectrum at .uid = .uid
+      newRLumDataSpectrum at .pid = .pid
+
+      return(newRLumDataSpectrum)
+
+    }
+
+  }
+)
+
+####################################################################################################
+###get_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Spectrum
+#' Accessor method for RLum.Data.Spectrum object. The argument info.object
+#' is optional to directly access the info elements. If no info element name
+#' is provided, the raw curve data (matrix) will be returned
+#'
+#' @param object [\code{show_RLum}][\code{get_RLum}][\code{names_RLum}] an object of
+#'  class \code{\linkS4class{RLum.Data.Spectrum}}
+#' @param info.object [\code{get_RLum}] \code{\link{character}} (optional): the name of the info
+#' object to be called
+#'
+#' @return
+#'
+#' \bold{\code{get_RLum}}\cr
+#'
+#' (1) A \code{\link{matrix}} with the spectrum values or \cr
+#' (2) only the info object if \code{info.object} was set.\cr
+#'
+#' @export
+setMethod("get_RLum",
+          signature("RLum.Data.Spectrum"),
+          definition = function(object, info.object) {
+
+            ##Check if function is of type RLum.Data.Spectrum
+            if(is(object, "RLum.Data.Spectrum") == FALSE){
+
+              stop("[get_RLum] Function valid for 'RLum.Data.Spectrum' objects only!")
+
+            }
+
+            ##if missing info.object just show the curve values
+
+            if(missing(info.object) == FALSE){
+
+              if(is(info.object, "character") == FALSE){
+                stop("[get_RLum] 'info.object' has to be a character!")
+              }
+
+              if(info.object %in% names(object at info) == TRUE){
+
+                unlist(object at info[info.object])
+
+              }else{
+
+                ##grep names
+                temp.element.names <- paste(names(object at info), collapse = ", ")
+
+                stop.text <- paste("[get_RLum] Invalid element name. Valid names are:", temp.element.names)
+
+                stop(stop.text)
+
+              }
+
+
+            }else{
+
+              object at data
+
+            }
+          })
+
+
+####################################################################################################
+###names_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Spectrum
+#' Returns the names info elements coming along with this curve object
+#'
+#' @return
+#'
+#' \bold{\code{names_RLum}}\cr
+#'
+#' The names of the info objects
+#'
+#' @export
+setMethod("names_RLum",
+          "RLum.Data.Spectrum",
+          function(object){
+            names(object at info)
+
+          })
diff --git a/R/RLum.Results-class.R b/R/RLum.Results-class.R
new file mode 100644
index 0000000..2fcdc1c
--- /dev/null
+++ b/R/RLum.Results-class.R
@@ -0,0 +1,391 @@
+#' @include get_RLum.R set_RLum.R length_RLum.R names_RLum.R
+NULL
+
+#' Class \code{"RLum.Results"}
+#'
+#' Object class contains results data from functions (e.g., \code{\link{analyse_SAR.CWOSL}}).
+#'
+#' @name RLum.Results-class
+#'
+#' @docType class
+#'
+#' @slot data Object of class "list" containing output data
+#'
+#' @note The class is intended to store results from functions to be used by
+#' other functions. The data in the object should always be accessed by the
+#' method \code{get_RLum}.
+#'
+#' @section Objects from the Class: Objects can be created by calls of the form
+#' \code{new("RLum.Results", ...)}.
+#'
+#' @section Class version: 0.5.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\linkS4class{RLum}}, \code{\link{plot_RLum}}, \code{\link{merge_RLum}}
+#'
+#' @keywords classes methods
+#'
+#' @examples
+#'
+#' showClass("RLum.Results")
+#'
+#' ##create an empty object from this class
+#' set_RLum(class = "RLum.Results")
+#'
+#' ##use another function to show how it works
+#'
+#' ##Basic calculation of the dose rate for a specific date
+#'  dose.rate <-  calc_SourceDoseRate(
+#'    measurement.date = "2012-01-27",
+#'    calib.date = "2014-12-19",
+#'    calib.dose.rate = 0.0438,
+#'    calib.error = 0.0019)
+#'
+#' ##show object
+#' dose.rate
+#'
+#' ##get results
+#' get_RLum(dose.rate)
+#'
+#' ##get parameters used for the calcualtion from the same object
+#' get_RLum(dose.rate, data.object = "parameters")
+#'
+#' ##alternatively objects can be accessed using S3 generics, such as
+#' dose.rate$parameters
+#'
+#' @export
+setClass(
+  Class = "RLum.Results",
+  slots = list(data = "list"),
+  contains = "RLum",
+  prototype = list (data = list())
+)
+
+
+####################################################################################################
+###as()
+####################################################################################################
+##LIST
+##COERCE RLum.Results >> list AND list >> RLum.Results
+#' as() - RLum-object coercion
+#'
+#' for \code{[RLum.Results]}
+#'
+#' \bold{[RLum.Results]}\cr
+#'
+#' \tabular{ll}{
+#'  \bold{from} \tab \bold{to}\cr
+#'   \code{list} \tab \code{list}\cr
+#' }
+#'
+#' Given that the \code{\link{list}} consits of \code{\linkS4class{RLum.Results}} objects.
+#'
+#' @name as
+#'
+#'
+setAs("list", "RLum.Results",
+      function(from,to){
+
+        new(to,
+            orginator = "coercion",
+            data = from)
+
+      })
+
+setAs("RLum.Results", "list",
+      function(from){
+
+        from at data
+
+      })
+
+####################################################################################################
+###show()
+####################################################################################################
+#' @describeIn RLum.Results
+#' Show structure of \code{RLum.Results} object
+#' @export
+setMethod("show",
+          signature(object = "RLum.Results"),
+          function(object) {
+            ##data elements
+            temp.names <- names(object at data)
+
+            if (length(object) > 0) {
+              temp.type <- sapply(1:length(object at data),
+                                  function(x) {
+                                    paste("\t .. $", temp.names[x],
+                                          " : ",
+                                          is(object at data[[x]])[1],
+                                          sep = "")
+
+
+                                  })
+            } else{
+              temp.type <- paste0("\t .. $", temp.names, " : ", is(object at data)[1])
+
+            }
+
+            temp.type <- paste(temp.type, collapse = "\n")
+
+
+            ##print information
+            cat("\n [RLum.Results]")
+            cat("\n\t originator: ", object at originator, "()", sep = "")
+            cat("\n\t data:", length(object at data))
+            cat("\n", temp.type)
+            cat("\n\t additional info elements: ", length(object at info))
+
+          })
+
+
+
+####################################################################################################
+###set_RLum()
+####################################################################################################
+#' @describeIn RLum.Results
+#' Construction method for an RLum.Results object.
+#'
+#' @param class [\code{set_RLum}] \code{\link{character}} \bold{(required)}: name of the \code{RLum} class to create
+#' @param originator [\code{set_RLum}] \code{\link{character}} (automatic): contains the
+#' name of the calling function
+#' (the function that produces this object); can be set manually.
+#' @param .uid [\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+#' using the internal C++ function \code{.create_UID}.
+#' @param .pid [\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+#' at will.
+#' @param data [\code{set_RLum}] \code{\link{list}} (optional): a list containing the data to
+#' be stored in the object
+#' @param info [\code{set_RLum}] \code{\link{list}} (optional): a list containing additional
+#' info data for the object
+#' @return
+#'
+#' \bold{\code{set_RLum}}:\cr
+#'
+#' Returns an object from the class \code{\linkS4class{RLum.Results}}\cr
+#'
+#' @export
+setMethod("set_RLum",
+          signature = signature("RLum.Results"),
+
+          function(class,
+                   originator,
+                   .uid,
+                   .pid,
+                   data = list(),
+                   info = list()) {
+
+            ##create new class
+            newRLumReuslts <- new("RLum.Results")
+
+            ##fill object
+            newRLumReuslts at originator <- originator
+            newRLumReuslts at data <- data
+            newRLumReuslts at info <- info
+            newRLumReuslts at .uid <- .uid
+            newRLumReuslts at .pid <- .pid
+
+            return(newRLumReuslts)
+
+          })
+
+
+####################################################################################################
+###get_RLum()
+####################################################################################################
+#' @describeIn RLum.Results
+#' Accessor method for RLum.Results object. The argument data.object allows
+#' directly accessing objects delivered within the slot data. The default
+#' return object depends on the object originator (e.g., \code{fit_LMCurve}).
+#' If nothing is specified always the first \code{data.object} will be returned.
+#'
+#' Note: Detailed specification should be made in combination with the originator slot in the
+#' receiving function if results are pipped.
+#'
+#' @param object [\code{get_RLum}] \code{\linkS4class{RLum.Results}} (required): an object of class
+#' \code{\linkS4class{RLum.Results}} to be evaluated
+#'
+#' @param data.object [\code{get_RLum}] \code{\link{character}} or
+#' \code{\link{numeric}}: name or index of the data slot to be returned
+#'
+#' @param info.object [\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
+#' element
+#'
+#' @param drop [\code{get_RLum}] \code{\link{logical}} (with default): coerce to the next possible layer
+#' (which are data objects, \code{drop = FALSE} keeps the original \code{RLum.Results}
+#'
+#' @return
+#'
+#' \bold{\code{get_RLum}}:\cr
+#'
+#' Returns: \cr
+#' (1) Data object from the specified slot \cr
+#' (2) \code{\link{list}} of data objects from the slots if 'data.object' is vector or \cr
+#' (3) an \code{\linkS4class{RLum.Results}} for \code{drop = FALSE}.\cr
+#'
+#'
+#' @export
+setMethod(
+  "get_RLum",
+  signature = signature("RLum.Results"),
+  definition = function(object, data.object, info.object = NULL, drop = TRUE) {
+    ##if info.object is set, only the info objects are returned
+    if (!is.null(info.object)) {
+      if (info.object %in% names(object at info)) {
+        unlist(object at info[info.object])
+
+      } else{
+        ##check for entries
+        if (length(object at info) == 0) {
+          warning("[get_RLum] This RLum.Results object has no info objects! NULL returned!)")
+          return(NULL)
+
+        } else{
+          ##grep names
+          temp.element.names <-
+            paste(names(object at info), collapse = ", ")
+
+          warning.text <-
+            paste("[get_RLum] Invalid info.object name. Valid names are:",
+                  temp.element.names)
+
+          warning(warning.text, call. = FALSE)
+          return(NULL)
+
+        }
+      }
+
+    } else{
+      if (!missing(data.object)) {
+        ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+        ##CASE1: data.object is of type 'character'
+        if (is(data.object, "character")) {
+          #check if the provided names are available
+          if (all(data.object %in% names(object at data))) {
+            ##account for multiple inputs
+            if (length(data.object) > 1) {
+              temp.return <- sapply(data.object, function(x) {
+                object at data[[x]]
+
+              })
+
+            } else{
+              temp.return <- list(data.object = object at data[[data.object]])
+
+            }
+
+
+          } else{
+            error.message <- paste0(
+              "[get_RLum()] data.object(s) unknown, valid names are: ",
+              paste(names(object at data), collapse = ", ")
+
+            )
+            stop(error.message)
+
+          }
+
+        }
+
+        ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+        ##CASE2: data.object is of type 'numeric'
+        else if (is(data.object, "numeric")) {
+          ##check if index is valid
+          if (max(data.object) > length(object at data)) {
+            stop("[get_RLum] 'data.object' index out of bounds!")
+
+          } else if (length(data.object) > 1) {
+            temp.return <- lapply(data.object, function(x) {
+              object at data[[x]]
+
+            })
+
+
+          } else{
+            temp.return <- list(object at data[[data.object]])
+
+          }
+
+          ##restore names as that get los with this method
+          names(temp.return) <-
+            names(object at data)[data.object]
+
+        }
+        ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+        ##CASE3: data.object is of an unsupported type
+        else{
+          stop("[get_RLum] 'data.object' has to be of type character or numeric!")
+        }
+
+        ##the CASE data.object is missing
+      } else{
+        ##return always the first object if nothing is specified
+        temp.return <- object at data[1]
+
+      }
+
+      ##CHECK whether an RLum.Results object needs to be produced ...
+      ##This will just be the case if the funtion havn't returned something before
+      if (drop) {
+        ##we need to access the list here, otherwise we get unexpected behaviour as drop = TRUE
+        ##should always return the lowest possible element here
+        return(temp.return[[1]])
+
+      } else{
+        return(set_RLum(
+          "RLum.Results",
+          originator = object at originator,
+          data = temp.return
+        ))
+
+
+      }
+
+    }
+  }
+)
+
+
+
+####################################################################################################
+###length_RLum()
+####################################################################################################
+#' @describeIn RLum.Results
+#' Returns the length of the object, i.e., number of stored data.objects
+#'
+#' @return
+#'
+#' \bold{\code{length_RLum}}\cr
+#'
+#' Returns the number of data elements in the \code{RLum.Results} object.
+#'
+#' @export
+setMethod("length_RLum",
+          "RLum.Results",
+          function(object){
+
+            length(object at data)
+
+          })
+
+####################################################################################################
+###names_RLum()
+####################################################################################################
+#' @describeIn RLum.Results
+#' Returns the names data.objects
+#'
+#' @return
+#'
+#' \bold{\code{names_RLum}}\cr
+#'
+#' Returns the names of the data elements in the object.
+#'
+#' @export
+setMethod("names_RLum",
+          "RLum.Results",
+          function(object){
+             names(object at data)
+
+          })
diff --git a/R/RcppExports.R b/R/RcppExports.R
new file mode 100644
index 0000000..3dce775
--- /dev/null
+++ b/R/RcppExports.R
@@ -0,0 +1,11 @@
+# Generated by using Rcpp::compileAttributes() -> do not edit by hand
+# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
+
+.analyse_IRSARRF_SRS <- function(values_regenerated_limited, values_natural_limited, n_MC) {
+    .Call('Luminescence_analyse_IRSARRF_SRS', PACKAGE = 'Luminescence', values_regenerated_limited, values_natural_limited, n_MC)
+}
+
+.create_UID <- function() {
+    .Call('Luminescence_create_UID', PACKAGE = 'Luminescence')
+}
+
diff --git a/R/Risoe.BINfileData2RLum.Analysis.R b/R/Risoe.BINfileData2RLum.Analysis.R
new file mode 100644
index 0000000..d95b24b
--- /dev/null
+++ b/R/Risoe.BINfileData2RLum.Analysis.R
@@ -0,0 +1,315 @@
+#' Convert Risoe.BINfileData object to an RLum.Analysis object
+#'
+#' Converts values from one specific position of a Risoe.BINfileData S4-class
+#' object to an RLum.Analysis object.
+#'
+#' The \code{\linkS4class{RLum.Analysis}} object requires a set of curves for
+#' specific further protocol analyses. However, the
+#' \code{\linkS4class{Risoe.BINfileData}} usually contains a set of curves for
+#' different aliquots and different protocol types that may be mixed up.
+#' Therefore, a conversion is needed.
+#'
+#' @param object \code{\linkS4class{Risoe.BINfileData}} (\bold{required}):
+#' \code{Risoe.BINfileData} object
+#'
+#' @param pos \code{\link{numeric}} (optional): position number of the
+#' \code{Risoe.BINfileData} object for which the curves are stored in the
+#' \code{RLum.Analysis} object. If \code{length(position)>1} a list of \code{RLum.Analysis} objects
+#' is returned. If nothing is provided every position will be converted. If the position is not valid \code{NA} is
+#' returned.
+#'
+#' @param grain \code{\link{vector}, \link{numeric}} (optional): grain number from
+#' the measurement to limit the converted data set (e.g., \code{grain =
+#' c(1:48)}). Please be aware that this option may lead to unwanted effects, as the output
+#' is strictly limited to the choosen grain number for all position numbers
+#'
+#' @param run \code{\link{vector}, \link{numeric}} (optional): run number from
+#' the measurement to limit the converted data set (e.g., \code{run =
+#' c(1:48)}).
+#'
+#' @param set \code{\link{vector}, \link{numeric}} (optional): set number from
+#' the measurement to limit the converted data set (e.g., \code{set =
+#' c(1:48)}).
+#'
+#' @param ltype \code{\link{vector}, \link{character}} (optional): curve type
+#' to limit the converted data. Commonly allowed values are: \code{IRSL}, \code{OSL},
+#' \code{TL}, \code{RIR}, \code{RBR} and \code{USER} (see also \code{\linkS4class{Risoe.BINfileData}})
+#'
+#' @param dtype \code{\link{vector}, \link{character}} (optional): data type to
+#' limit the converted data. Commonly allowed values are listed in \code{\linkS4class{Risoe.BINfileData}}
+#'
+#' @param protocol \code{\link{character}} (optional): sets protocol type for
+#' analysis object. Value may be used by subsequent analysis functions.
+#'
+#' @param txtProgressBar \link{logical} (with default): enables or disables
+#' \code{\link{txtProgressBar}}.
+#'
+#' @return Returns an \code{\linkS4class{RLum.Analysis}} object.
+#'
+#' @note The \code{protocol} argument of the \code{\linkS4class{RLum.Analysis}}
+#' object is set to 'unknown' if not stated otherwise.
+#'
+#' @section Function version: 0.4.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}}, \code{\link{read_BIN2R}}
+#'
+#' @references #
+#'
+#' @keywords manip
+#'
+#' @examples
+#'
+#' ##load data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##convert values for position 1
+#' Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1)
+#'
+#' @export
+Risoe.BINfileData2RLum.Analysis<- function(
+  object,
+  pos = NULL,
+  grain = NULL,
+  run = NULL,
+  set = NULL,
+  ltype = NULL,
+  dtype = NULL,
+  protocol = "unknown",
+  txtProgressBar = FALSE
+){
+
+
+  # Integrity Check ---------------------------------------------------------
+
+  if (!is(object,"Risoe.BINfileData")){
+    stop("[Risoe.BINfileData2RLum.Analysis()] Input object is not of type 'Risoe.BINfileData'.")
+  }
+
+  if (!is.null(pos) && !is(pos,"numeric")){
+    stop("[Risoe.BINfileData2RLum.Analysis()] Argument 'pos' has to be of type numeric.")
+  }
+
+  if (is.null(pos)) {
+    pos <- unique(object at METADATA[["POSITION"]])
+  } else{
+    ##get and check valid positions and remove invalid numbers from the input
+    positions.valid <- unique(object at METADATA[, "POSITION"])
+
+    if (length(setdiff(pos, positions.valid)) > 0) {
+      warning(
+        paste0(
+          "[Risoe.BINfileData2RLum.Analysis()] invalid position number skipped: ",
+          paste(setdiff(pos, positions.valid), collapse = ", ")
+        ),
+        call. = FALSE
+      )
+
+      pos <- intersect(pos, positions.valid)
+
+    }
+  }
+
+
+  # Grep run and set data ---------------------------------------------------
+
+    ##grain
+    if (is.null(grain)) {
+      grain <- unique(object at METADATA[,"GRAIN"])
+
+    }else{
+      if(length(setdiff(grain, grain.valid)) > 0){
+        grain.valid <- unique(object at METADATA[,"GRAIN"])
+
+        warning(paste0("[Risoe.BINfileData2RLum.Analysis()] invalid grain number skipped: ",
+                       paste(setdiff(grain, grain.valid), collapse = ", ")), call. = FALSE)
+
+        grain <- intersect(grain, grain.valid)
+
+      }
+
+    }
+
+    ##run
+    if (is.null(run)) {
+      run <- unique(object at METADATA[["RUN"]])
+    } else{
+      if (TRUE %in% unique(unique(object at METADATA[["RUN"]]) %in% run) != TRUE) {
+        ##get and check valid positions
+        run.valid <-
+          paste(as.character(unique(object at METADATA[, "RUN"])), collapse = ", ")
+
+        stop(
+          paste(
+            "[Risoe.BINfileData2RLum.Analysis()] run = ",
+            run,
+            " contain invalid run(s).
+            Valid runs are: ",
+            run.valid,
+            sep = ""
+          )
+        )
+
+      }
+
+    }
+
+    #set
+    if(is.null(set)){set <- unique(object at METADATA[["SET"]])
+    } else{
+
+      if(TRUE %in% unique(unique(object at METADATA[["SET"]]) %in% set) != TRUE){
+
+        ##get and check valid positions
+        set.valid <- paste(as.character(unique(object at METADATA[,"SET"])), collapse=", ")
+
+        stop(paste("[Risoe.BINfileData2RLum.Analysis] set = ", set, " contain invalid set(s).
+                   Valid sets are: ", set.valid, sep=""))
+
+      }
+
+    }
+
+    ##ltype
+    if (is.null(ltype)) {
+      ltype <- unique(object at METADATA[["LTYPE"]])
+    } else{
+      if (TRUE %in% unique(unique(object at METADATA[, "LTYPE"]) %in% ltype) != TRUE) {
+        ##get and check valid positions
+        ltype.valid <-
+          paste(as.character(unique(object at METADATA[, "LTYPE"])), collapse = ", ")
+
+        stop(
+          paste(
+            "[Risoe.BINfileData2RLum.Analysis] ltype = ",
+            ltype,
+            " contain invalid ltype(s).
+            Valid ltypes are: ",
+            ltype.valid,
+            sep = ""
+          )
+        )
+
+      }
+
+    }
+
+    ##dtype
+    if (is.null(dtype)) {
+      dtype <- unique(object at METADATA[["DTYPE"]])
+    } else{
+      if (TRUE %in% unique(unique(object at METADATA[, "DTYPE"]) %in% dtype) != TRUE) {
+        ##get and check valid positions
+        dtype.valid <-
+          paste(as.character(unique(object at METADATA[, "DTYPE"])), collapse = ", ")
+
+        stop(
+          paste(
+            "[Risoe.BINfileData2RLum.Analysis] dtype = ",
+            dtype,
+            " contain invalid dtype(s).
+            Valid dtypes are: ",
+            dtype.valid,
+            sep = ""
+          )
+        )
+
+      }
+
+    }
+
+
+    # Select values and convert them-----------------------------------------------------------
+
+    ##set progressbar to false if only one position is provided
+    if(txtProgressBar & length(pos)<2){
+      txtProgressBar <- FALSE
+
+    }
+
+    ##This loop does:
+    ## (a) iterating over all possible positions
+    ## (b) consider grains in all possible positions
+    ## (c) consider other selections
+    ## (d) create the RLum.Analysis objects
+
+    ##set progress bar
+    if(txtProgressBar){
+      pb<-txtProgressBar(min=min(pos),max=max(pos), char="=", style=3)
+    }
+
+    object <- lapply(pos, function(pos){
+
+      ##update progress bar
+      if(txtProgressBar){
+        setTxtProgressBar(pb, value = pos)
+      }
+
+      ##if no grain information is given, we select all grains in the particular position
+      if(is.null(grain)){
+        grain <- unique(object at METADATA[object at METADATA[["POSITION"]] == pos, "GRAIN"])
+
+      }
+
+      ##loop over the grains and produce RLum.Analysis objects
+      object <- lapply(grain, function(grain){
+
+        ##select data
+        ##the NA is necessary, as FI readers like to write a NA instead of 0 in the column
+        ##and this causes some trouble
+
+        if(is.na(grain)){
+          temp_id <- object at METADATA[
+            object at METADATA[["POSITION"]] == pos &
+              object at METADATA[["RUN"]] %in% run &
+              object at METADATA[["SET"]] %in% set &
+              object at METADATA[["LTYPE"]] %in% ltype &
+              object at METADATA[["DTYPE"]] %in% dtype
+            , "ID"]
+
+
+        }else{
+          temp_id <- object at METADATA[
+            object at METADATA[["POSITION"]] == pos &
+              object at METADATA[["GRAIN"]] == grain &
+              object at METADATA[["RUN"]] %in% run &
+              object at METADATA[["SET"]] %in% set &
+              object at METADATA[["LTYPE"]] %in% ltype &
+              object at METADATA[["DTYPE"]] %in% dtype
+            , "ID"]
+
+
+        }
+
+        ##create curve object
+        object <- set_RLum(
+          class = "RLum.Analysis",
+          records = lapply(temp_id,function(x) {
+            .Risoe.BINfileData2RLum.Data.Curve(object, id = x)
+          }),
+          protocol = protocol,
+          originator = "Risoe.BINfileData2RLum.Analysis"
+        )
+
+        ##add unique id of RLum.Analysis object to each curve object as .pid using internal function
+        .set_pid(object)
+
+      })
+
+      return(object)
+
+    })
+
+    ##this is necessary to not break with previous code, i.e. if only one element is included
+    ##the output is RLum.Analysis and not a list of it
+    if(length(object) == 1){
+      invisible(object[[1]][[1]])
+
+    }else{
+
+      invisible(unlist(object))
+
+    }
+
+}
diff --git a/R/Risoe.BINfileData2RLum.Data.Curve.R b/R/Risoe.BINfileData2RLum.Data.Curve.R
new file mode 100644
index 0000000..5ede362
--- /dev/null
+++ b/R/Risoe.BINfileData2RLum.Data.Curve.R
@@ -0,0 +1,141 @@
+#' Convert an element from a Risoe.BINfileData object to an RLum.Data.Curve
+#' object
+#'
+#' The function converts one specified single record from a Risoe.BINfileData
+#' object to an RLum.Data.Curve object.
+#'
+#' The function extracts all \code{METADATA} from the \code{Risoe.BINfileData}
+#' object and stores them in the \code{RLum.Data.Curve} object. This function
+#' can be used stand-alone, but is the base function for \code{\link{Risoe.BINfileData2RLum.Analysis}}.
+#'
+#' @param object \code{\linkS4class{Risoe.BINfileData}} (\bold{required}):
+#' \code{Risoe.BINfileData} object
+#'
+#' @param id \code{\link{integer}} (\bold{required}): record id in the
+#' \code{Risoe.BINfileData} object of the curve that is to be stored in the
+#' \code{RLum.Data.Curve} object. If no value for id is provided, the record
+#' has to be specified by \code{pos}, \code{set} and \code{run}.
+#'
+#' @param pos \code{\link{integer}} (optional): record position number in the
+#' \code{Risoe.BINfileData} object of the curve that is to be stored in the
+#' \code{RLum.Data.Curve} object. If a value for \code{id} is provided, this
+#' argument is ignored.
+#'
+#' @param run \code{\link{integer}} (optional): record run number in the
+#' \code{Risoe.BINfileData} object of the curve that is to be stored in the
+#' \code{RLum.Data.Curve} object. If a value for \code{id} is provided, this
+#' argument is ignored.
+#'
+#' @param set \code{\link{integer}} (optional): record set number in the
+#' \code{Risoe.BINfileData} object of the curve that is to be stored in the
+#' \code{RLum.Data.Curve} object. If a value for \code{id} is provided, this
+#' argument is ignored.
+#'
+#' @return Returns an \code{\linkS4class{RLum.Data.Curve}} object.
+#'
+#' @note Due to changes in the BIN-file (version 3 to version 4) format the recalculation of TL-curves might be not
+#' overall correct for cases where the TL measurement is combined with a preheat.
+#'
+#' @section Function version: 0.4.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France),
+#' Christoph Burow, Universtiy of Cologne (Germany)
+#'
+#' @seealso \code{\link{Risoe.BINfileData2RLum.Analysis}},
+#' \code{\link{set_RLum}}, \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{Risoe.BINfileData}},
+#' \code{\link{plot_RLum}}
+#'
+#' @references #
+#'
+#' @keywords manip
+#'
+#' @examples
+#'
+#' ##get package example data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##convert one record
+#' Risoe.BINfileData2RLum.Data.Curve(CWOSL.SAR.Data, id = 1)
+#'
+#' @noRd
+.Risoe.BINfileData2RLum.Data.Curve <- function(
+  object,
+  id,
+  pos,
+  run,
+  set
+){
+
+
+  # grep id of record -------------------------------------------------------
+  ##if id is set, no input for pos and rund is nescessary
+  if (missing(id)) {
+    id <- object at METADATA[object at METADATA[, "POSITION"] == pos &
+                            object at METADATA[, "SET"] == set &
+                            object at METADATA[, "RUN"] == run,
+                          "ID"]
+
+  }
+
+
+  # Select values -----------------------------------------------------------
+
+  ##build matrix
+  if(object at METADATA[id,"NPOINTS"][1] != 0){
+
+    if(object at METADATA[id, "LTYPE"] == "TL" && as.numeric(object at METADATA[id, "VERSION"]) >=4){
+
+      temp.x <- c(
+        seq(
+          from = object at METADATA[["LOW"]][id],
+          to = object at METADATA[["AN_TEMP"]][id],
+          length.out = object at METADATA[["TOLDELAY"]][id]
+        ),
+        seq(
+          from = object at METADATA[["AN_TEMP"]][id],
+          to = object at METADATA[["AN_TEMP"]][id],
+          length.out = object at METADATA[["TOLON"]][id]
+        ),
+        seq(
+          from = object at METADATA[["AN_TEMP"]][id],
+          to = object at METADATA[["HIGH"]][id],
+          length.out = object at METADATA[["TOLOFF"]][id]
+        )
+      )
+
+    }else{
+
+      temp.x <- seq(
+        from = object at METADATA[["LOW"]][id],
+        to = object at METADATA[["HIGH"]][id],
+        length.out = object at METADATA[["NPOINTS"]][id]
+      )
+
+    }
+
+    temp.y <- unlist(object at DATA[id], use.names = FALSE)
+
+
+  }else{
+    temp.x <- NA
+    temp.y <- NA
+
+    warning("[.Risoe.BINfileData2RLum.Data.Curve()] NPOINTS was 0, RLum.Data.Curve-object with NA-values produced.")
+
+  }
+
+  ##convert info elements to list ... this procedure halfs the time needed in comparison to
+  ##to simply as.list(object at METADATA)
+  info <- lapply(1:length(names(object at METADATA)), function(x){.subset2(object at METADATA, x)[id]})
+  names(info) <- names(object at METADATA)
+
+  # Build object ------------------------------------------------------------
+  set_RLum(
+    class = "RLum.Data.Curve",
+    recordType = as.character(object at METADATA[id, "LTYPE"]),
+    data = matrix(c(temp.x, temp.y), ncol = 2),
+    info = info
+  )
+
+}
diff --git a/R/RisoeBINfileData-class.R b/R/RisoeBINfileData-class.R
new file mode 100644
index 0000000..9a63c7e
--- /dev/null
+++ b/R/RisoeBINfileData-class.R
@@ -0,0 +1,276 @@
+#' @include get_Risoe.BINfileData.R set_Risoe.BINfileData.R
+NULL
+
+#' Class \code{"Risoe.BINfileData"}
+#'
+#' S4 class object for luminescence data in R. The object is produced as output
+#' of the function \code{\link{read_BIN2R}}.
+#'
+#'
+#' @name Risoe.BINfileData-class
+#'
+#' @docType class
+#'
+#' @slot METADATA Object of class "data.frame" containing the meta information for each curve.
+#'
+#' @slot DATA Object of class "list" containing numeric vector with count data.
+#'
+#' @slot .RESERVED Object of class "list" containing list of undocumented raw values for internal use only.
+#'
+#' @note
+#'
+#' \bold{Internal METADATA - object structure}
+#'
+#' \tabular{rllll}{
+#' \bold{#} \tab \bold{Name} \tab \bold{Data Type} \tab \bold{V} \tab \bold{Description} \cr
+#' [,1]  \tab ID  \tab \code{numeric} \tab RLum \tab Unique record ID (same ID as in slot \code{DATA})\cr
+#' [,2]  \tab SEL \tab \code{logic} \tab RLum \tab Record selection, not part official BIN-format, triggered by TAG\cr
+#' [,3]  \tab VERSION \tab \code{raw} \tab 03-08 \tab BIN-file version number \cr
+#' [,4]  \tab LENGTH \tab \code{integer} \tab 03-08 \tab Length of this record\cr
+#' [,5]  \tab PREVIOUS \tab \code{integer} \tab 03-08 \tab Length of previous record\cr
+#' [,6]  \tab NPOINTS \tab \code{integer} \tab 03-08 \tab Number of data points in the record\cr
+#' [,7]  \tab RECTYPE \tab \code{integer} \tab 08 \tab Record type \cr
+#' [,8]  \tab RUN \tab \code{integer} \tab 03-08 \tab Run number\cr
+#' [,9]  \tab SET \tab \code{integer} \tab 03-08 \tab Set number\cr
+#' [,10]  \tab POSITION \tab  \code{integer} \tab 03-08 \tab Position number\cr
+#' [,11] \tab GRAIN \tab \code{integer} \tab 03-04 \tab Grain number\cr
+#' [,12] \tab GRAINNUMBER \tab \code{integer} \tab 06-08 \tab Grain number\cr
+#' [,13] \tab CURVENO \tab \code{integer} \tab 06-08 \tab Curve number\cr
+#' [,14] \tab XCOORD \tab \code{integer} \tab 03-08 \tab X position of a single grain\cr
+#' [,15] \tab YCOORD \tab \code{integer} \tab 03-08 \tab Y position of a single grain\cr
+#' [,16] \tab SAMPLE \tab \code{factor} \tab 03-08 \tab Sample name\cr
+#' [,17] \tab COMMENT \tab \code{factor} \tab 03-08 \tab Comment name\cr
+#' [,18] \tab SYSTEMID \tab \code{integer} \tab 03-08 \tab Risoe system id\cr
+#' [,19] \tab FNAME \tab \code{factor} \tab 06-08 \tab File name (*.bin/*.binx)\cr
+#' [,20] \tab USER \tab \code{facotr} \tab 03-08 \tab User name\cr
+#' [,21] \tab TIME \tab \code{character} \tab 03-08 \tab Data collection time (hh-mm-ss)\cr
+#' [,22] \tab DATE \tab \code{factor} \tab 03-08 \tab Data collection date (ddmmyy)\cr
+#' [,23] \tab DTYPE \tab \code{character} \tab 03-08 \tab Data type\cr
+#' [,24] \tab BL_TIME \tab \code{numeric} \tab 03-08 \tab Bleaching time\cr
+#' [,25] \tab BL_UNIT \tab \code{integer} \tab 03-08 \tab Bleaching unit (mJ, J, secs, mins, hrs)\cr
+#' [,26] \tab NORM1 \tab \code{numeric} \tab 03-08 \tab Normalisation factor (1)\cr
+#' [,27] \tab NORM2 \tab \code{numeric} \tab 03-08 \tab Normalisation factor (2)\cr
+#' [,28] \tab NORM3 \tab \code{numeric} \tab 03-08 \tab Normalisation factor (3)\cr
+#' [,29] \tab BG \tab \code{numeric} \tab 03-08 \tab Background level\cr
+#' [,30] \tab SHIFT \tab \code{integer} \tab 03-08 \tab Number of channels to shift data\cr
+#' [,31] \tab TAG \tab \code{integer} \tab 03-08 \tab Tag, triggers SEL\cr
+#' [,32] \tab LTYPE \tab \code{character} \tab 03-08 \tab Luminescence type\cr
+#' [,33] \tab LIGHTSOURCE \tab \code{character} \tab 03-08 \tab Light source\cr
+#' [,34] \tab LPOWER \tab \code{numeric} \tab 03-08 \tab Optical stimulation power\cr
+#' [,35] \tab LIGHTPOWER \tab \code{numeric} \tab 06-08 \tab Optical stimulation power\cr
+#' [,36] \tab LOW \tab \code{numeric} \tab 03-08 \tab Low (temperature, time, wavelength)\cr
+#' [,37] \tab HIGH \tab \code{numeric} \tab 03-08 \tab High (temperature, time, wavelength)\cr
+#' [,38] \tab RATE \tab \code{numeric} \tab 03-08 \tab Rate (heating rate, scan rate)\cr
+#' [,39] \tab TEMPERATURE \tab \code{integer} \tab 03-08 \tab Sample temperature\cr
+#' [,40] \tab MEASTEMP \tab \code{integer} \tab 06-08 \tab Measured temperature\cr
+#' [,41] \tab AN_TEMP \tab \code{numeric} \tab 03-08 \tab Annealing temperature\cr
+#' [,42] \tab AN_TIME \tab \code{numeric} \tab 03-08 \tab Annealing time\cr
+#' [,43] \tab TOLDELAY \tab \code{integer} \tab 03-08 \tab TOL 'delay' channels\cr
+#' [,44] \tab TOLON \tab \code{integer} \tab 03-08 \tab TOL 'on' channels\cr
+#' [,45] \tab TOLOFF \tab \code{integer} \tab 03-08 \tab TOL 'off' channels\cr
+#' [,46] \tab IRR_TIME \tab \code{numeric} \tab 03-08 \tab Irradiation time\cr
+#' [,47] \tab IRR_TYPE \tab \code{integer} \tab 03-08 \tab Irradiation type (alpha, beta or gamma)\cr
+#' [,48] \tab IRR_UNIT \tab \code{integer} \tab 03-04 \tab Irradiation unit (Gy, Rads, secs, mins, hrs)\cr
+#' [,49] \tab IRR_DOSERATE \tab \code{numeric} \tab 06-08 \tab Irradiation dose rate (Gy/s)\cr
+#' [,50] \tab IRR_DOSERATEERR \tab \code{numeric} \tab 06-08 \tab Irradiation dose rate error (Gy/s)\cr
+#' [,51] \tab TIMESINCEIRR \tab \code{integer} \tab 06-08 \tab Time since irradiation (s)\cr
+#' [,52] \tab TIMETICK \tab \code{numeric} \tab 06-08 \tab Time tick for pulsing (s)\cr
+#' [,53] \tab ONTIME \tab \code{integer} \tab 06-08 \tab On-time for pulsing (in time ticks)\cr
+#' [,54] \tab STIMPERIOD \tab \code{integer} \tab 06-08 \tab Stimulation period (on+off in time ticks)\cr
+#' [,55] \tab GATE_ENABLED \tab \code{raw} \tab 06-08 \tab PMT signal gating enabled\cr
+#' [,56] \tab ENABLE_FLAGS \tab \code{raw} \tab 06-08 \tab PMT signal gating  enabled\cr
+#' [,57] \tab GATE_START \tab \code{integer} \tab 06-08 \tab Start gating (in time ticks)\cr
+#' [,58] \tab GATE_STOP \tab \code{ingeter} \tab 06-08 \tab Stop gating (in time ticks), 'Gateend' for version 04, here only GATE_STOP is used\cr
+#' [,59] \tab PTENABLED \tab \code{raw} \tab 06-08 \tab Photon time enabled\cr
+#' [,60] \tab DTENABLED \tab \code{raw} \tab 06-08 \tab PMT dead time correction enabled\cr
+#' [,61] \tab DEADTIME \tab \code{numeric} \tab 06-08 \tab PMT dead time (s)\cr
+#' [,62] \tab MAXLPOWER \tab \code{numeric} \tab 06-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr
+#' [,63] \tab XRF_ACQTIME \tab \code{numeric} \tab 06-08 \tab XRF acquisition time (s)\cr
+#' [,64] \tab XRF_HV \tab \code{numeric} \tab 06-08 \tab XRF X-ray high voltage (V)\cr
+#' [,65] \tab XRF_CURR \tab \code{integer} \tab 06-08 \tab XRF X-ray current (uA)\cr
+#' [,66] \tab XRF_DEADTIMEF \tab \code{numeric} \tab 06-08 \tab XRF dead time fraction\cr
+#' [,67] \tab SEQUENCE \tab \code{character} \tab 03-04 \tab Sequence name\cr
+#' [,68] \tab DETECTOR_ID \tab \code{raw} \tab 07-08 \tab Detector ID\cr
+#' [,69] \tab LOWERFILTER_ID \tab \code{integer} \tab 07-08 \tab Lower filter ID in reader\cr
+#' [,70] \tab UPPERFILTER_ID \tab \code{integer} \tab 07-08 \tab Uper filter ID in reader\cr
+#' [,71] \tab ENOISEFACTOR \tab \code{numeric} \tab 07-08 \tab Excess noise filter, usage unknown \cr
+#' [,72] \tab MARKPOS_X1 \tab \code{numeric} \tab 08 \tab Coordinates marker position 1 \cr
+#' [,73] \tab MARKPOS_Y1 \tab \code{numeric} \tab 08 \tab Coordinates marker position 1 \cr
+#' [,74] \tab MARKPOS_X2 \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr
+#' [,75] \tab MARKPOS_Y2 \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr
+#' [,76] \tab MARKPOS_X3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
+#' [,77] \tab MARKPOS_Y3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
+#' [,78] \tab MARKPOS_X4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
+#' [,79] \tab MARKPOS_Y4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
+#' [,80] \tab EXTR_START \tab \code{numeric} \tab 08 \tab usage unknown \cr
+#' [,81] \tab EXTR_END \tab \code{numeric} \tab 08 \tab usage unknown
+#' } V = BIN-file version (RLum means that it does not depend on a specific BIN
+#' version)\cr
+#'
+#' Note that the \code{Risoe.BINfileData} object combines all values from
+#' different versions from the BIN-file, reserved bits are skipped, however,
+#' the function \code{\link{write_R2BIN}} reset arbitrary reserved bits. Invalid
+#' values for a specific version are set to \code{NA}. Furthermore, the
+#' internal R data types do not necessarily match the required data types for
+#' the BIN-file data import! Data types are converted during data import.\cr
+#'
+#' \bold{LTYPE} values
+#'
+#' \tabular{rll}{ [,0] \tab TL \tab: Thermoluminescence \cr [,1] \tab OSL \tab:
+#' Optically stimulated luminescence \cr [,2] \tab IRSL \tab: Infrared
+#' stimulated luminescence \cr [,3] \tab M-IR \tab: Infrared monochromator
+#' scan\cr [,4] \tab M-VIS \tab: Visible monochromator scan\cr [,5] \tab TOL
+#' \tab: Thermo-optical luminescence \cr [,6] \tab TRPOSL \tab: Time Resolved
+#' Pulsed OSL\cr [,7] \tab RIR \tab: Ramped IRSL\cr [,8] \tab RBR \tab: Ramped
+#' (Blue) LEDs\cr [,9] \tab USER \tab: User defined\cr [,10] \tab POSL \tab:
+#' Pulsed OSL \cr [,11] \tab SGOSL \tab: Single Grain OSL\cr [,12] \tab RL
+#' \tab: Radio Luminescence \cr [,13] \tab XRF \tab: X-ray Fluorescence }
+#'
+#' \bold{DTYPE} values \tabular{rll}{ [,0] \tab 0 \tab Natural \cr [,1] \tab 1
+#' \tab N+dose \cr [,2] \tab 2 \tab Bleach \cr [,3] \tab 3 \tab Bleach+dose \cr
+#' [,4] \tab 4 \tab Natural (Bleach) \cr [,5] \tab 5 \tab N+dose (Bleach) \cr
+#' [,6] \tab 6 \tab Dose \cr [,7] \tab 7 \tab Background }
+#'
+#' \bold{LIGHTSOURCE} values \tabular{rll}{ [,0] \tab 0 \tab Non \cr [,1] \tab
+#' 1 \tab Lamp \cr [,2] \tab 2 \tab IR diodes/IR Laser \cr [,3] \tab 3 \tab
+#' Calibration LED \cr [,4] \tab 4 \tab Blue Diodes \cr [,5] \tab 5 \tab White
+#' lite \cr [,6] \tab 6 \tab Green laser (single grain) \cr [,7] \tab 7 \tab IR
+#' laser (single grain) }
+#'
+#' (information on the BIN/BINX file format are kindly provided by Risoe, DTU
+#' Nutech)
+#'
+#' @section Objects from the Class: Objects can be created by calls of the form
+#' \code{new("Risoe.BINfileData", ...)}.
+#'
+#' @section Function version: 0.3.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso
+#' \code{\link{plot_Risoe.BINfileData}}, \code{\link{read_BIN2R}},
+#' \code{\link{write_R2BIN}},\code{\link{merge_Risoe.BINfileData}},
+#' \code{\link{Risoe.BINfileData2RLum.Analysis}},
+#'
+#' @references Risoe DTU, 2013. The Sequence Editor User Manual - Feb 2013 and Risoe DTU, 2016. The
+#' Sequence Editor User Manual - Feburar 2016
+#'
+#' \code{http://www.nutech.dtu.dk/}
+#'
+#' @keywords classes
+#'
+#' @examples
+#'
+#' showClass("Risoe.BINfileData")
+#'
+#' @export
+setClass("Risoe.BINfileData",
+         slots = list(
+           METADATA = "data.frame",
+           DATA = "list",
+           .RESERVED = "list"
+           )
+         )
+
+##set generic S4 function for object
+#' @describeIn Risoe.BINfileData
+#' Show structure of RLum and Risoe.BINfile class objects
+#' @export
+setMethod("show", signature(object = "Risoe.BINfileData"),
+          function(object){
+
+            version<-paste(unique(object at METADATA[,"VERSION"]), collapse = ", ")
+            systemID<-paste(unique(object at METADATA[,"SYSTEMID"]), collapse = ", ")
+            filename <- as.character(object at METADATA[1,"FNAME"])
+            records.overall<-length(object at DATA)
+            records.type<-table(object at METADATA[,"LTYPE"])
+            user<-paste(unique(as.character(object at METADATA[,"USER"])), collapse = ", ")
+            date<-paste(unique(as.character(object at METADATA[,"DATE"])), collapse = ", ")
+            run.range<-range(object at METADATA[,"RUN"])
+            set.range<-range(object at METADATA[,"SET"])
+            grain.range <- range(object at METADATA[,"GRAIN"])
+            pos.range<-range(object at METADATA[,"POSITION"])
+
+            records.type.count <- sapply(1:length(records.type),
+              function(x){paste(
+              names(records.type)[x],"\t(n = ",records.type[x],")",sep="")
+              })
+
+            records.type.count <- paste(records.type.count,
+                                        collapse="\n\t                      ")
+
+            ##print
+            cat("\n[Risoe.BINfileData object]")
+            cat("\n\n\tBIN/BINX version     ", version)
+            if(version>=6){
+              cat("\n\tFile name:           ", filename)
+            }
+            cat("\n\tObject date:         ", date)
+            cat("\n\tUser:                ", user)
+            cat("\n\tSystem ID:           ", ifelse(systemID == 0,"0 (unknown)", systemID))
+            cat("\n\tOverall records:     ", records.overall)
+            cat("\n\tRecords type:        ", records.type.count)
+            cat("\n\tPosition range:      ",pos.range[1],":",pos.range[2])
+            cat("\n\tGrain range:         ",grain.range[1],":",grain.range[2])
+            cat("\n\tRun range:           ",run.range[1],":",run.range[2])
+            cat("\n\tSet range:           ",set.range[1],":",set.range[2])
+          }#end function
+          )#end setMethod
+
+
+# constructor (set) method for object class -----------------------------------
+
+#' @describeIn Risoe.BINfileData
+#' The Risoe.BINfileData is normally produced as output of the function read_BIN2R.
+#' This construction method is intended for internal usage only.
+#'
+#' @param METADATA Object of class "data.frame" containing the meta information
+#' for each curve.
+#'
+#' @param DATA Object of class "list" containing numeric vector with count data.
+#'
+#' @param .RESERVED Object of class "list" containing list of undocumented raw
+#' values for internal use only.
+#' @export
+setMethod("set_Risoe.BINfileData",
+          signature = c(
+            METADATA = "data.frame", DATA = "list", .RESERVED = "ANY"
+          ),
+
+          function(METADATA, DATA, .RESERVED) {
+            if (missing(.RESERVED)) {
+              .RESERVED <- list()
+            }
+
+            new(
+              "Risoe.BINfileData",
+              METADATA = METADATA,
+              DATA = DATA,
+              .RESERVED = .RESERVED
+            )
+
+          })
+
+
+# accessor (get) method for object class -----------------------------------
+
+#' @describeIn Risoe.BINfileData
+#' Formal get-method for Risoe.BINfileData object. It does not allow accessing
+#' the object directly, it is just showing a terminal message.
+#'
+#' @param object an object of class \code{\linkS4class{Risoe.BINfileData}}
+#'
+#' @param ... other arguments that might be passed
+#'
+#' @export
+setMethod("get_Risoe.BINfileData",
+          signature= "Risoe.BINfileData",
+          definition = function(object, ...) {
+
+            cat("[get_Risoe.BINfileData()] No direct access is provided for this object type. Use the function 'Risoe.BINfileData2RLum.Analysis' for object coercing.")
+
+          })##end setMethod
+
+##-------------------------------------------------------------------------------------------------##
+##=================================================================================================##
diff --git a/R/Second2Gray.R b/R/Second2Gray.R
new file mode 100644
index 0000000..4db8b44
--- /dev/null
+++ b/R/Second2Gray.R
@@ -0,0 +1,218 @@
+#' Converting equivalent dose values from seconds (s) to gray (Gy)
+#'
+#' Conversion of absorbed radiation dose in seconds (s) to the SI unit gray
+#' (Gy) including error propagation. Normally used for equivalent dose data.
+#'
+#' Calculation of De values from seconds (s) to gray (Gy) \deqn{De [Gy] = De
+#' [s] * Dose Rate [Gy/s])} \cr
+#'
+#' Provided calculation error propagation methods for error calculation (with 'se' as the standard error
+#' and 'DR' of the dose rate of the beta-source):\cr
+#'
+#'
+#' \bold{(1) \code{omit}} (default)\cr
+#'
+#' \deqn{se(De) [Gy] = se(De) [s] * DR [Gy/s]}
+#'
+#' In this case the standard error of the dose rate of the beta-source is treated as systematic
+#' (i.e. non-random), it error propagation is omitted. However, the error must be considered during
+#' calculation of the final age. (cf. Aitken, 1985, pp. 242). This approach can be seen as
+#' method (2) (gaussian) for the case the (random) standard error of the beta-source calibration is
+#' 0. Which particular method is requested depends on the situation and cannot be prescriptive.
+#'
+#' \bold{(2) \code{gaussian}} error propagation \cr
+#'
+#' \deqn{se(De) [Gy] = \sqrt((DR [Gy/s] * se(De) [s])^2 + (De [s] * se(DR) [Gy/s])^2)}
+#'
+#' Applicable under the assumption that errors of De and se are uncorrelated.
+#'
+#' \bold{(3) \code{absolute}} error propagation \cr
+#'
+#' \deqn{se(De) [Gy]= abs(DR [Gy/s] * se(De) [s]) + abs(De [s] * se(DR) [Gy/s])}
+#'
+#' Applicable under the assumption that errors of De and se are not uncorrelated.
+#'
+#' @param data \code{\link{data.frame}} (\bold{required}): input values,
+#' structure: data (\code{values[,1]}) and data error (\code{values [,2]}) are
+#' required
+#'
+#' @param dose.rate \code{\linkS4class{RLum.Results}} or \code{\link{data.frame}} or \code{\link{numeric}}
+#' (\bold{required}): \code{RLum.Results} needs to be orginated from the
+#' function \code{\link{calc_SourceDoseRate}}, for \code{vector} dose rate in
+#' Gy/s and dose rate error in Gy/s
+#'
+#' @param error.propagation \code{\link{character}} (with default): error propagation method used for error
+#' calculation (\code{omit}, \code{gaussian} or \code{absolute}), see details for further
+#' information
+#'
+#' @return Returns a \link{data.frame} with converted values.
+#'
+#' @note If no or a wrong error propagation method is given, the execution of the function is
+#' stopped. Furthermore, if a \code{data.frame} is provided for the dose rate values is has to
+#' be of the same length as the data frame provided with the argument \code{data}
+#'
+#' @section Function version: 0.6.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France),\cr Michael Dietze, GFZ Potsdam (Germany),\cr Margret C. Fuchs, HZDR,
+#' Helmholtz-Institute Freiberg for Resource Technology
+#' (Germany)
+#'
+#' @seealso \code{\link{calc_SourceDoseRate}}
+#'
+#' @references
+#'
+#' Aitken, M.J., 1985. Thermoluminescence dating. Academic Press.
+#'
+#' @keywords manip
+#'
+#' @examples
+#'
+#'
+#' ##(A) for known source dose rate at date of measurement
+#' ## - load De data from the example data help file
+#' data(ExampleData.DeValues, envir = environment())
+#' ## - convert De(s) to De(Gy)
+#' Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+#'
+#'
+#'
+#'
+#'
+#' ##(B) for source dose rate calibration data
+#' ## - calculate source dose rate first
+#' dose.rate <-  calc_SourceDoseRate(measurement.date = "2012-01-27",
+#'                                   calib.date = "2014-12-19",
+#'                                   calib.dose.rate = 0.0438,
+#'                                   calib.error = 0.0019)
+#' # read example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' # apply dose.rate to convert De(s) to De(Gy)
+#' Second2Gray(ExampleData.DeValues$BT998, dose.rate)
+#'
+#' @export
+Second2Gray <- function(
+  data,
+  dose.rate,
+  error.propagation = "omit"
+){
+
+  # Integrity tests -----------------------------------------------------------------------------
+
+  ##(1) data.frame or RLum.Data.Curve object?
+  if(!is(data, "data.frame")){
+
+    stop("[Second2Gray()] 'data' object has to be of type 'data.frame'!")
+
+  }
+
+  ##(2) numeric, data.frame or RLum.Data.Curve object?
+  if(!is(dose.rate, "numeric")  &  !is(dose.rate, "RLum.Results") & !is(dose.rate, "data.frame")){
+
+    stop("[Second2Gray()] 'dose.rate' object has to be of type 'numeric', 'data.frame' or 'RLum.Results'!")
+
+  }
+
+
+  ##(3) last check to avoid problems
+  if(is(dose.rate, "data.frame")){
+
+    if(nrow(dose.rate)!=nrow(data)){
+
+      stop("[Second2Gray()] the data frames in 'data' and 'dose.rate' need to be of similar length!")
+
+    }
+
+  }
+
+
+  ##(4) check for right orginator
+  if(is(dose.rate, "RLum.Results")){
+
+    if(dose.rate at originator != "calc_SourceDoseRate"){
+
+      stop("[Second2Gray()]  Wrong originator for dose.rate 'RLum.Results' object.")
+
+    }else{
+
+      ##check what is what
+      if(!is(get_RLum(dose.rate, data.object = "dose.rate"), "data.frame")){
+
+        dose.rate <- data.frame(
+          dose.rate  <- as.numeric(get_RLum(dose.rate, data.object = "dose.rate")[1]),
+          dose.rate.error <- as.numeric(get_RLum(dose.rate, data.object = "dose.rate")[2])
+          )
+
+      }else{
+
+        dose.rate <- get_RLum(dose.rate, data.object = "dose.rate")
+
+      }
+
+    }
+
+  }
+
+
+  # Calculation ---------------------------------------------------------------------------------
+
+
+  De.seconds <- data[,1]
+  De.error.seconds <- data[,2]
+
+  De.gray <- NA
+  De.error.gray <- NA
+
+  if(is(dose.rate,"data.frame")){
+    De.gray <- round(De.seconds*dose.rate[,1], digits=2)
+
+  }else{
+    De.gray <- round(De.seconds*dose.rate[1], digits=2)
+
+  }
+
+  if(error.propagation == "omit"){
+
+    if(is(dose.rate,"data.frame")){
+      De.error.gray <- round(dose.rate[,1]*De.error.seconds, digits=3)
+
+    }else{
+      De.error.gray <- round(dose.rate[1]*De.error.seconds, digits=3)
+
+    }
+
+  }else if(error.propagation == "gaussian"){
+
+    if(is(dose.rate,"data.frame")){
+       De.error.gray <- round(sqrt((De.seconds*dose.rate[,2])^2+(dose.rate[,1]*De.error.seconds)^2), digits=3)
+
+    }else{
+      De.error.gray <- round(sqrt((De.seconds*dose.rate[2])^2+(dose.rate[1]*De.error.seconds)^2), digits=3)
+
+    }
+
+  }else if (error.propagation == "absolute"){
+
+    if(is(dose.rate,"data.frame")){
+      De.error.gray <- round(abs(dose.rate[,1] * De.error.seconds) + abs(De.seconds * dose.rate[,2]), digits=3)
+
+    }else{
+      De.error.gray <- round(abs(dose.rate[1] * De.error.seconds) + abs(De.seconds * dose.rate[2]), digits=3)
+
+    }
+
+  }else{
+
+    stop("[Second2Gray()] unsupported error propagation method!" )
+
+  }
+
+  # Return --------------------------------------------------------------------------------------
+
+  data <- data.frame(De=De.gray, De.error=De.error.gray)
+
+
+  return(data)
+
+}
diff --git a/R/analyse_IRSAR.RF.R b/R/analyse_IRSAR.RF.R
new file mode 100644
index 0000000..7b88c09
--- /dev/null
+++ b/R/analyse_IRSAR.RF.R
@@ -0,0 +1,1914 @@
+#' Analyse IRSAR RF measurements
+#'
+#' Function to analyse IRSAR RF measurements on K-feldspar samples, performed
+#' using the protocol according to Erfurt et al. (2003) and beyond.
+#'
+#' The function performs an IRSAR analysis described for K-feldspar samples by
+#' Erfurt et al. (2003) assuming a negligible sensitivity change of the RF
+#' signal.\cr
+#'
+#' \bold{General Sequence Structure} (according to Erfurt et al.
+#' (2003)) \enumerate{
+#'
+#' \item Measuring IR-RF intensity of the natural dose for a few seconds (\eqn{RF_{nat}})
+#' \item Bleach the samples under solar conditions for at least 30 min without changing the geometry
+#' \item Waiting for at least one hour
+#' \item Regeneration of the IR-RF signal to at least the natural level (measuring (\eqn{RF_{reg}})
+#' \item Fitting data with a stretched exponential function
+#' \item Calculate the the palaeodose \eqn{D_{e}} using the parameters from the
+#' fitting}
+#'
+#' Actually two methods are supported to obtain the \eqn{D_{e}}: \code{method = "FIT"} and
+#' \code{method = "SLIDE"}:
+#'
+#' \bold{\code{method = "FIT"}}\cr
+#'
+#' The principle is described above and follows the original suggestions by
+#' Erfurt et al., 2003. For the fitting the mean count value of the RF_nat curve is used.
+#'
+#' Function used for the fitting (according to Erfurt et al. (2003)): \cr
+#'
+#' \deqn{\phi(D) = \phi_{0}-\Delta\phi(1-exp(-\lambda*D))^\beta}
+#' with \eqn{\phi(D)} the dose dependent IR-RF flux, \eqn{\phi_{0}} the inital
+#' IR-RF flux, \eqn{\Delta\phi} the dose dependent change of the IR-RF flux,
+#' \eqn{\lambda} the exponential parameter, \eqn{D} the dose and \eqn{\beta}
+#' the dispersive factor.\cr\cr To obtain the palaeodose \eqn{D_{e}} the
+#' function is changed to:\cr \deqn{D_{e} = ln(-(\phi(D) -
+#' \phi_{0})/(-\lambda*\phi)^{1/\beta}+1)/-\lambda}\cr The fitting is done
+#' using the \code{port} algorithm of the \code{\link{nls}} function.\cr
+#'
+#'
+#' \bold{\code{method = "SLIDE"}}\cr
+#'
+#' For this method the natural curve is slided along the x-axis until
+#' congruence with the regenerated curve is reached. Instead of fitting this
+#' allows to work with the original data without the need of any physical
+#' model. This approach was introduced for RF curves by Buylaert et al., 2012
+#' and Lapp et al., 2012.
+#'
+#' Here the sliding is done by searching for the minimum of the squared residuals.\cr
+#'
+#' \bold{\code{method.control}}\cr
+#'
+#' To keep the generic argument list as clear as possible, arguments to control the methods
+#' for De estimation are all preset with meaningful default parameters and can be
+#' handled using the argument \code{method.control} only, e.g.,
+#' \code{method.control = list(trace = TRUE)}. Supported arguments are:\cr
+#'
+#' \tabular{lll}{
+#' ARGUMENT       \tab METHOD               \tab DESCRIPTION\cr
+#' \code{trace}   \tab \code{FIT}, \code{SLIDE} \tab as in \code{\link{nls}}; shows sum of squared residuals\cr
+#' \code{maxiter} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
+#' \code{warnOnly} \tab \code{FIT}           \tab as in \code{\link{nls}}\cr
+#' \code{minFactor} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
+#' \code{correct_onset} \tab \code{SLIDE}      \tab The logical argument literally spoken,
+#' shifts the curves along the x-axis by the first channel, as light is expected in the first channel.
+#'  The default value is \code{TRUE}.\cr
+#' \code{show_density} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
+#' enables or disables KDE plots for MC run results. If the distribution is too narrow nothing is shown.\cr
+#' \code{show_fit} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
+#' enables or disables the plot of the fitted curve rountinly obtained during the evaluation.\cr
+#'\code{n.MC}                  \tab \code{SLIDE}       \tab    \code{\link{integer}} (wiht default):
+#' This controls the number of MC runs within the sliding (assesing the possible minimum values).
+#' The default \code{n.MC = 1000}. Note: This parameter is not the same as controlled by the
+#' function argument \code{n.MC} \cr
+#' }
+#'
+#'
+#' \bold{Error estimation}\cr
+#'
+#' For \bold{\code{method = "FIT"}} the asymmetric error range is obtained by using the 2.5 \% (lower) and
+#' the 97.5 \% (upper) quantiles of the \eqn{RF_{nat}} curve for calculating the \eqn{D_{e}} error range.\cr
+#'
+#' For \bold{\code{method = "SLIDE"}} the error is obtained by bootstrapping the residuals of the slided
+#' curve to construct new natural curves for a Monte Carlo simulation. The error is returned in two
+#' ways: (a) the standard deviation of the herewith obtained \eqn{D_{e}} from the MC runs and (b) the confidence
+#' interval using the  2.5 \% (lower) and the 97.5 \% (upper) quantiles. The results of the MC runs
+#' are returned with the function output. \cr
+#'
+#' \bold{Test parameters}\cr
+#'
+#' The argument \code{test_parameters} allows to pass some thresholds for several test parameters,
+#' which will be evaluated during the function run. If a threshold is set and it will be exceeded the
+#' test parameter status will be set to "FAILED". Intentionally this parameter is not termed
+#' 'rejection criteria' as not all test parameters are evaluated for both methods and some parameters
+#' are calculated by not evaluated by default. Common for all parameters are the allowed argument options
+#' \code{NA} and \code{NULL}. If the parameter is set to \code{NA} the value is calculated but the
+#' result will not be evaluated, means it has no effect on the status ("OK" or "FAILED") of the parameter.
+#' Setting the parameter to \code{NULL} disables the parameter entirely and the parameter will be
+#' also removed from the function output. This might be useful in cases where a particular parameter
+#' asks for long computation times. Currently supported parameters are:
+#'
+#' \code{curves_ratio} \code{\link{numeric}} (default: \code{1.001}):\cr
+#'
+#' The ratio of \eqn{RF_{nat}} over \eqn{RF_{reg}} in the range of\eqn{RF_{nat}} of is calculated
+#' and should not exceed the threshold value. \cr
+#'
+#' \code{intersection_ratio} \code{\link{numeric}} (default: \code{NA}):\cr
+#'
+#' Calculated as absolute difference from 1 of the ratio of the integral of the normalised RF-curves,
+#' This value indicates intersection of the RF-curves and should be close to 0 if the curves
+#' have a similar shape. For this calculation first the corresponding time-count pair value on the RF_reg
+#' curve is obtained using the maximum count value of the RF_nat curve and only this segment (fitting to
+#' the RF_nat curve) on the RF_reg curve is taken for further calculating this ratio. If nothing is
+#' found at all, \code{Inf} is returned. \cr
+#'
+#' \code{residuals_slope} \code{\link{numeric}} (default: \code{NA}; only for \code{method = "SLIDE"}): \cr
+#'
+#' A linear function is fitted on the residuals after sliding.
+#' The corresponding slope can be used to discard values as a high (positive, negative) slope
+#' may indicate that both curves are fundamentally different and the method cannot be applied at all.
+#' Per default the value of this parameter is calculated but not evaluated. \cr
+#'
+#'\code{curves_bounds} \code{\link{numeric}} (default: \eqn{max(RF_{reg_counts})}:\cr
+#'
+#'This measure uses the maximum time (x) value of the regenerated curve.
+#'The maximum time (x) value of the natural curve cannot be larger than this value. However, although
+#'this is not recommended the value can be changed or disabled.\cr
+#'
+#'\code{dynamic_ratio} \code{\link{numeric}} (default: \code{NA}):\cr
+#'
+#'The dynamic ratio of the regenerated curve is calculated as ratio of the minimum and maximum count values.
+#'
+#'\code{lambda}, \code{beta} and \code{delta.phi}
+#'\code{\link{numeric}} (default: \code{NA}; \code{method = "SLIDE"}): \cr
+#'
+#'The stretched exponential function suggested by Erfurt et al. (2003) describing the decay of
+#'the RF signal, comprises several parameters that might be useful to evaluate the shape of the curves.
+#'For \code{method = "FIT"} this parameter is obtained during the fitting, for \code{method = "SLIDE"} a
+#'rather rough estimation is made using the function \code{\link[minpack.lm]{nlsLM}} and the equation
+#'given above. Note: As this procedure requests more computation time, setting of one of these three parameters
+#'to \code{NULL} also prevents a calculation of the remaining two.
+#'
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} or a \code{\link{list}} of \code{RLum.Analysis} objects (\bold{required}): input
+#' object containing data for protocol analysis. The function expects to find at least two curves in the
+#' \code{\linkS4class{RLum.Analysis}} object: (1) RF_nat, (2) RF_reg. If a \code{list} is provided as
+#' input all other parameters can be provided as \code{list} as well to gain full control.
+#'
+#' @param sequence_structure \code{\link{vector}} \link{character} (with
+#' default): specifies the general sequence structure. Allowed steps are
+#' \code{NATURAL}, \code{REGENERATED}. In addition any other character is
+#' allowed in the sequence structure; such curves will be ignored during the analysis.
+#'
+#' @param RF_nat.lim \code{\link{vector}} (with default): set minimum and maximum
+#' channel range for natural signal fitting and sliding. If only one value is provided this
+#' will be treated as minimum value and the maximum limit will be added automatically.
+#'
+#' @param RF_reg.lim \code{\link{vector}} (with default): set minimum and maximum
+#' channel range for regenerated signal fitting and sliding. If only one value is provided this
+#' will be treated as minimum value and the maximum limit will be added automatically.
+#'
+#' @param method \code{\link{character}} (with default): setting method applied
+#' for the data analysis. Possible options are \code{"FIT"} or \code{"SLIDE"}.
+#'
+#' @param method.control \code{\link{list}} (optional): parameters to control the method, that can
+#' be passed to the choosen method. These are for (1) \code{method = "FIT"}: 'trace', 'maxiter', 'warnOnly',
+#' 'minFactor' and for (2) \code{method = "SLIDE"}: 'correct_onset', 'show_density',  'show_fit', 'trace'.
+#' See details.
+#'
+#' @param test_parameters \code{\link{list} (with default)}: set test parameters.
+#' Supported parameters are: \code{curves_ratio}, \code{residuals_slope} (only for
+#' \code{method = "SLIDE"}), \code{curves_bounds}, \code{dynamic_ratio},
+#' \code{lambda}, \code{beta} and \code{delta.phi}. All input: \code{\link{numeric}}
+#' values, \code{NA} and \code{NULL} (s. Details)
+#'
+#' (see Details for further information)
+#'
+#' @param n.MC \code{\link{numeric}} (with default): set number of Monte
+#' Carlo runs for start parameter estimation (\code{method = "FIT"}) or
+#' error estimation (\code{method = "SLIDE"}). Note: Large values will
+#' significantly increase the computation time
+#'
+#' @param txtProgressBar \code{\link{logical}} (with default): enables \code{TRUE} or
+#' disables \code{FALSE} the progression bar during MC runs
+#'
+#' @param plot \code{\link{logical}} (with default): plot output (\code{TRUE}
+#' or \code{FALSE})
+#'
+#' @param plot_reduced \code{\link{logical}} (optional): provides a reduced plot output if enabled
+#' to allow common R plot combinations, e.g., \code{par(mfrow(...))}. If \code{TRUE} no residual plot
+#' is returned; it has no effect if \code{plot = FALSE}
+#'
+#' @param \dots further arguments that will be passed to the plot output.
+#' Currently supported arguments are \code{main}, \code{xlab}, \code{ylab},
+#' \code{xlim}, \code{ylim}, \code{log}, \code{legend} (\code{TRUE/FALSE}),
+#' \code{legend.pos}, \code{legend.text} (passes argument to x,y in
+#' \code{\link[graphics]{legend}}), \code{xaxt}
+#'
+#'
+#' @return A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
+#' returned:\cr
+#'
+#' \bold{@data}\cr
+#' $ data: \code{\link{data.frame}} table with De and corresponding values\cr
+#' ..$ DE : \code{numeric}: the obtained equivalent dose\cr
+#' ..$ DE.ERROR : \code{numeric}: (only method = "SLIDE") standard deviation obtained from MC runs \cr
+#' ..$ DE.LOWER : \code{numeric}: 2.5\% quantile for De values obtained by MC runs \cr
+#' ..$ DE.UPPER : \code{numeric}: 97.5\% quantile for De values obtained by MC runs  \cr
+#' ..$ DE.STATUS  : \code{character}: test parameter status\cr
+#' ..$ RF_NAT.LIM  : \code{charcter}: used RF_nat curve limits \cr
+#' ..$ RF_REG.LIM : \code{character}: used RF_reg curve limits\cr
+#' ..$ POSITION : \code{integer}: (optional) position of the curves\cr
+#' ..$ DATE : \code{character}: (optional) measurement date\cr
+#' ..$ SEQUENCE_NAME : \code{character}: (optional) sequence name\cr
+#' ..$ UID : \code{character}: unique data set ID \cr
+#' $ test_parameters : \code{\link{data.frame}} table test parameters \cr
+#' $ fit : {\code{\link{nls}} \code{nlsModel} object} \cr
+#' $ slide : \code{\link{list}} data from the sliding process, including the sliding matrix\cr
+#'
+#' \bold{@info}\cr
+#' $ call : \code{\link[methods]{language-class}}: the orignal function call \cr
+#'
+#' The output (\code{data}) should be accessed using the
+#' function \code{\link{get_RLum}}
+#'
+#' @note \bold{[THIS FUNCTION HAS BETA-STATUS]}\cr
+#'
+#' This function assumes that there is no sensitivity change during the
+#' measurements (natural vs. regenerated signal), which is in contrast to the
+#' findings from Buylaert et al. (2012). Furthermore: In course of ongoing research this function has
+#' been almost fully re-written, but further thoughtful tests are still pending!
+#' However, as a lot new package functionality was introduced with the changes made
+#' for this function and to allow a part of such tests the re-newed code was made part
+#' of the current package.\cr
+#'
+#' @section Function version: 0.6.11
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}},
+#' \code{\link{nls}}, \code{\link[minpack.lm]{nlsLM}}
+#'
+#'
+#' @references Buylaert, J.P., Jain, M., Murray, A.S., Thomsen, K.J., Lapp, T.,
+#' 2012. IR-RF dating of sand-sized K-feldspar extracts: A test of accuracy.
+#' Radiation Measurements 44 (5-6), 560-565. doi: 10.1016/j.radmeas.2012.06.021
+#'
+#' Erfurt, G., Krbetschek, M.R., 2003. IRSAR - A single-aliquot
+#' regenerative-dose dating protocol applied to the infrared radiofluorescence
+#' (IR-RF) of coarse- grain K-feldspar. Ancient TL 21, 35-42.
+#'
+#' Erfurt, G., 2003. Infrared luminescence of Pb+ centres in potassium-rich
+#' feldspars. physica status solidi (a) 200, 429-438.
+#'
+#' Erfurt, G., Krbetschek, M.R., 2003. Studies on the physics of the infrared
+#' radioluminescence of potassium feldspar and on the methodology of its
+#' application to sediment dating. Radiation Measurements 37, 505-510.
+#'
+#' Erfurt, G., Krbetschek, M.R., Bortolot, V.J., Preusser, F., 2003. A fully
+#' automated multi-spectral radioluminescence reading system for geochronometry
+#' and dosimetry. Nuclear Instruments and Methods in Physics Research Section
+#' B: Beam Interactions with Materials and Atoms 207, 487-499.
+#'
+#' Lapp, T., Jain, M., Thomsen, K.J., Murray, A.S., Buylaert, J.P., 2012. New
+#' luminescence measurement facilities in retrospective dosimetry. Radiation
+#' Measurements 47, 803-808. doi:10.1016/j.radmeas.2012.02.006
+#'
+#' Trautmann, T., 2000. A study of radioluminescence kinetics of natural
+#' feldspar dosimeters: experiments and simulations. Journal of Physics D:
+#' Applied Physics 33, 2304-2310.
+#'
+#' Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1998.
+#' Investigations of feldspar radioluminescence: potential for a new dating
+#' technique. Radiation Measurements 29, 421-425.
+#'
+#' Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1999. Feldspar
+#' radioluminescence: a new dating method and its physical background. Journal
+#' of Luminescence 85, 45-58.
+#'
+#' Trautmann, T., Krbetschek, M.R., Stolz, W., 2000. A systematic study of the
+#' radioluminescence properties of single feldspar grains. Radiation
+#' Measurements 32, 685-690.
+#'
+#'
+#' @keywords datagen
+#'
+#'
+#' @examples
+#'
+#' ##load data
+#' data(ExampleData.RLum.Analysis, envir = environment())
+#'
+#' ##(1) perform analysis using the method 'FIT'
+#' results <- analyse_IRSAR.RF(object = IRSAR.RF.Data)
+#'
+#' ##show De results and test paramter results
+#' get_RLum(results, data.object = "data")
+#' get_RLum(results, data.object = "test_parameters")
+#'
+#' ##(2) perform analysis using the method 'SLIDE'
+#' results <- analyse_IRSAR.RF(object = IRSAR.RF.Data, method = "SLIDE", n.MC = 1)
+#'
+#' \dontrun{
+#' ##(3) perform analysis using the method 'SLIDE' and method control option
+#' ## 'trace
+#' results <- analyse_IRSAR.RF(
+#'  object = IRSAR.RF.Data,
+#'  method = "SLIDE",
+#'  method.control = list(trace = TRUE))
+#'
+#' }
+#'
+#' @export
+analyse_IRSAR.RF<- function(
+  object,
+  sequence_structure = c("NATURAL", "REGENERATED"),
+  RF_nat.lim = NULL,
+  RF_reg.lim = NULL,
+  method = "FIT",
+  method.control = NULL,
+  test_parameters = NULL,
+  n.MC = 10,
+  txtProgressBar = TRUE,
+  plot = TRUE,
+  plot_reduced = FALSE,
+  ...
+){
+
+  ##TODO
+  ## - if a file path is given, the function should try to find out whether an XSYG-file or
+  ##   a BIN-file is provided
+
+  # SELF CALL -----------------------------------------------------------------------------------
+  if(is.list(object)){
+
+    ##extent the list of arguments if set
+
+    ##sequence_structure
+    sequence_structure <- rep(list(sequence_structure), length = length(object))
+
+    ##RF_nat.lim
+    RF_nat.lim <- rep(list(RF_nat.lim), length = length(object))
+
+    ##RF_reg.lim
+    RF_reg.lim <- rep(list(RF_reg.lim), length = length(object))
+
+    ##method
+    method <- rep(list(method), length = length(object))
+
+    ##method.control
+    method.control <- rep(list(method.control), length = length(object))
+
+    ##test_parameters
+    if(is(test_parameters[[1]], "list")){
+      test_parameters <- rep(test_parameters, length = length(object))
+
+    }else{
+     test_parameters <- rep(list(test_parameters), length = length(object))
+
+    }
+
+
+    ##n.MC
+    n.MC <- rep(list(n.MC), length = length(object))
+
+    ##main
+    if("main"%in% names(list(...))){
+
+      if(is(list(...)$main, "list")){
+        temp_main <- rep(list(...)$main, length = length(object))
+
+      }else{
+        temp_main <- rep(list(list(...)$main), length = length(object))
+
+      }
+
+    }else{
+      temp_main <- as.list(paste0("ALQ #",1:length(object)))
+
+    }
+
+
+    ##run analysis
+    temp <- lapply(1:length(object), function(x){
+
+      analyse_IRSAR.RF(
+        object = object[[x]],
+        sequence_structure = sequence_structure[[x]],
+        RF_nat.lim = RF_nat.lim[[x]],
+        RF_reg.lim = RF_reg.lim[[x]],
+        method = method[[x]],
+        method.control = method.control[[x]],
+        test_parameters = test_parameters[[x]],
+        n.MC = n.MC[[x]],
+        txtProgressBar = txtProgressBar,
+        plot = plot,
+        plot_reduced = plot_reduced,
+        main = temp_main[[x]],
+        ...)
+    })
+
+    ##combine everything to one RLum.Results object as this as what was written ... only
+    ##one object
+
+    ##merge results and check if the output became NULL
+    results <- merge_RLum(temp)
+
+    ##DO NOT use invisible here, this will stop the function from stopping
+    if(length(results) == 0){
+      return(NULL)
+
+    }else{
+      return(results)
+
+    }
+
+  }
+
+
+  ##===============================================================================================#
+  ## INTEGRITY TESTS AND SEQUENCE STRUCTURE TESTS
+  ##===============================================================================================#
+
+  ##MISSING INPUT
+  if(missing("object")){
+    stop("[analyse_IRSAR.RF()] No input 'object' set!")
+  }
+
+  ##INPUT OBJECTS
+  if(!is(object, "RLum.Analysis")){
+    stop("[analyse_IRSAR.RF()] Input object is not of type 'RLum.Analysis'!")
+  }
+
+  ##CHECK OTHER ARGUMENTS
+  if(!is(sequence_structure, "character")){
+    stop("[analyse_IRSAR.RF()] argument 'sequence_structure' needs to be of type character.")
+  }
+
+    ##n.MC
+    if(!is(n.MC, "numeric") || n.MC <= 0){
+      stop("[analyse_IRSAR.RF()] argument 'n.MC' has to be of type integer and >= 0")
+    }
+
+
+
+  ##SELECT ONLY MEASURED CURVES
+  ## (this is not really necessary but rather user friendly)
+  if(!length(suppressWarnings(get_RLum(object, curveType= "measured"))) == 0){
+    object <- get_RLum(object, curveType= "measured", drop = FALSE)
+
+  }
+
+  ##INVESTIGATE SEQUENCE OBJECT STRUCTURE
+
+  ##grep object strucute
+  temp.sequence_structure <- structure_RLum(object)
+
+  ##grep name of the sequence and the position this will be useful later on
+  ##name
+  if (!is.null(suppressWarnings(get_RLum(get_RLum(object, record.id = 1), info.object = "name")))) {
+    aliquot.sequence_name <-
+      get_RLum(get_RLum(object, record.id = 1), info.object = "name")
+
+  }else{
+    aliquot.sequence_name <- NA
+
+  }
+
+
+  ##position
+  if (!is.null(suppressWarnings(get_RLum(get_RLum(object, record.id = 1), info.object = "position")))){
+    aliquot.position <-
+      get_RLum(get_RLum(object, record.id = 1), info.object = "position")
+
+  }else{
+    aliquot.position <- NA
+
+  }
+
+  ##date
+  if (!is.null(suppressWarnings(get_RLum(get_RLum(object, record.id = 1), info.object = "startDate")))){
+    aliquot.date <-
+      get_RLum(get_RLum(object, record.id = 1), info.object = "startDate")
+
+    ##transform so far the format can be identified
+    if (nchar(aliquot.date) == 14) {
+      aliquot.date <-
+        paste(c(
+          substr(aliquot.date, 1,4),substr(aliquot.date, 5,6), substr(aliquot.date, 7,8)
+        ), collapse = "-")
+
+    }
+
+  }else{
+    aliquot.date <- NA
+
+  }
+
+
+
+
+  ##set structure values
+  temp.sequence_structure$protocol.step <-
+    rep(sequence_structure, length_RLum(object))[1:length_RLum(object)]
+
+  ##check if the first curve is shorter than the first curve
+  if (temp.sequence_structure[1,"n.channels"] > temp.sequence_structure[2,"n.channels"]) {
+    stop(
+      "[analyse_IRSAR.RF()] Number of data channels in RF_nat > RF_reg. This is not supported!"
+    )
+
+  }
+
+  ##===============================================================================================#
+  ## SET CURVE LIMITS
+  ##===============================================================================================#
+  ##the setting here will be valid for all subsequent operations
+
+  ##01
+  ##first get allowed curve limits, this makes the subsequent checkings easier and the code
+  ##more easier to read
+  RF_nat.lim.default <- c(1,max(
+    subset(
+      temp.sequence_structure,
+      temp.sequence_structure$protocol.step == "NATURAL"
+    )$n.channels
+  ))
+
+  RF_reg.lim.default <- c(1,max(
+    subset(
+      temp.sequence_structure,
+      temp.sequence_structure$protocol.step == "REGENERATED"
+    )$n.channels
+  ))
+
+
+  ##02 - check boundaris
+  ##RF_nat.lim
+  if (is.null(RF_nat.lim) || is.na(RF_nat.lim)) {
+    RF_nat.lim <- RF_nat.lim.default
+
+  }else {
+    ##this allows to provide only one boundary and the 2nd will be added automatically
+    if (length(RF_nat.lim) == 1) {
+      RF_nat.lim <- c(RF_nat.lim, RF_nat.lim.default[2])
+
+    }
+
+    if (min(RF_nat.lim) < RF_nat.lim.default[1] |
+        max(RF_nat.lim) > RF_nat.lim.default[2]) {
+      RF_nat.lim <- RF_nat.lim.default
+
+      warning(paste0(
+        "RF_nat.lim out of bounds, reset to: RF_nat.lim = c(",
+        paste(range(RF_nat.lim), collapse = ":")
+      ),")")
+    }
+
+  }
+
+  ##RF_reg.lim
+  ##
+  if (is.null(RF_reg.lim)) {
+    RF_reg.lim <- RF_reg.lim.default
+
+  }else {
+    ##this allows to provide only one boundary and the 2nd will be added automatically
+    if (length(RF_reg.lim) == 1) {
+      RF_reg.lim <- c(RF_reg.lim, RF_reg.lim.default[2])
+
+    }
+
+    if (min(RF_reg.lim) < RF_reg.lim.default[1] |
+        max(RF_reg.lim) > RF_reg.lim.default[2]) {
+      RF_reg.lim <- RF_reg.lim.default
+
+      warning(paste0(
+        "RF_reg.lim out of bounds, reset to: RF_reg.lim = c(",
+        paste(range(RF_reg.lim), collapse = ":")
+      ),")")
+
+    }
+  }
+
+  ##check if intervalls make sense at all
+  if(length(RF_reg.lim[1]:RF_reg.lim[2]) < RF_nat.lim[2]){
+    RF_reg.lim[2] <- RF_reg.lim[2] + abs(length(RF_reg.lim[1]:RF_reg.lim[2]) - RF_nat.lim[2]) + 1
+
+    warning(paste0("Length intervall RF_reg.lim < length RF_nat. Reset to RF_reg.lim = c(",
+                   paste(range(RF_reg.lim), collapse=":")),")")
+
+  }
+
+
+  ##===============================================================================================#
+  ## SET METHOD CONTROL PARAMETER - FOR BOTH METHODS
+  ##===============================================================================================#
+  ##
+  ##set supported values with default
+  method.control.settings <- list(
+    trace = FALSE,
+    maxiter = 500,
+    warnOnly = FALSE,
+    minFactor = 1 / 4096,
+    correct_onset = TRUE,
+    show_density = TRUE,
+    show_fit = FALSE,
+    n.MC = 1000
+  )
+
+  ##modify list if necessary
+  if(!is.null(method.control)){
+
+    if(!is(method.control, "list")){
+      stop("[analyse_IRSAR.RF()] 'method.control' has to be of type 'list'!")
+
+    }
+
+    ##check whether this arguments are supported at all
+    if (length(which(
+      names(method.control) %in% names(method.control.settings) == FALSE
+    ) != 0)) {
+      temp.text <- paste0(
+        "[analyse_IRSAR.RF()] Argument(s) '",
+        paste(names(method.control)[which(names(method.control) %in% names(method.control.settings) == FALSE)], collapse = " and "),
+        "' are not supported for 'method.control'. Supported arguments are: ",
+        paste(names(method.control.settings), collapse = ", ")
+      )
+
+      warning(temp.text)
+      rm(temp.text)
+
+    }
+
+    ##modify list
+    method.control.settings <- modifyList(x = method.control.settings, val = method.control)
+
+  }
+
+
+  ##===============================================================================================#
+  ## SET PLOT PARAMETERS
+  ##===============================================================================================#
+
+  ##get channel resolution (should be equal for all curves, but if not the mean is taken)
+  resolution.RF <- round(mean((temp.sequence_structure$x.max/temp.sequence_structure$n.channels)),digits=1)
+
+
+  plot.settings <- list(
+    main = "IR-RF",
+    xlab = "Time [s]",
+    ylab = paste0("IR-RF [cts/", resolution.RF," s]"),
+    log = "",
+    cex = 1,
+    legend = TRUE,
+    legend.text = c("RF_nat","RF_reg"),
+    legend.pos = "top",
+    xaxt = "s"
+    ##xlim and ylim see below as they has to be modified differently
+  )
+
+  ##modify list if something was set
+  plot.settings <- modifyList(plot.settings, list(...))
+
+  ##=============================================================================#
+  ## ANALYSIS
+  ##=============================================================================#
+
+  ##grep first regenerated curve
+  RF_reg <- as.data.frame(object at records[[
+    temp.sequence_structure[temp.sequence_structure$protocol.step=="REGENERATED","id"]]]@data)
+
+    ##correct of the onset of detection by using the first time value
+    if (method == "SLIDE" &
+        method.control.settings$correct_onset == TRUE) {
+      RF_reg[,1] <- RF_reg[,1] - RF_reg[1,1]
+
+    }
+
+
+  RF_reg.x <- RF_reg[RF_reg.lim[1]:RF_reg.lim[2],1]
+  RF_reg.y <- RF_reg[RF_reg.lim[1]:RF_reg.lim[2],2]
+
+
+  ##grep values from natural signal
+  RF_nat <- as.data.frame(object at records[[
+    temp.sequence_structure[temp.sequence_structure$protocol.step=="NATURAL","id"]]]@data)
+
+    ##correct of the onset of detection by using the first time value
+  if (method == "SLIDE" &
+      method.control.settings$correct_onset == TRUE) {
+    RF_nat[,1] <- RF_nat[,1] - RF_nat[1,1]
+  }
+
+
+  ##limit values to fit range (at least to the minimum)
+  RF_nat.limited<- RF_nat[min(RF_nat.lim):max(RF_nat.lim),]
+
+  ##calculate some useful parameters
+  RF_nat.mean <- mean(RF_nat.limited[,2])
+  RF_nat.sd <- sd(RF_nat.limited[,2])
+
+  RF_nat.error.lower <- quantile(RF_nat.limited[,2], 0.975)
+  RF_nat.error.upper <- quantile(RF_nat.limited[,2], 0.025)
+
+
+  ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+  ##METHOD FIT
+  ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+    ## REGENERATED SIGNAL
+    # set function for fitting ------------------------------------------------
+
+    fit.function <-
+      as.formula(y ~ phi.0 - (delta.phi * ((1 - exp(
+        -lambda * x
+      )) ^ beta)))
+
+    ##stretched expontial function according to Erfurt et al. (2003)
+    ## + phi.0 >> initial IR-RF flux
+    ## + delta.phi >> dose dependent change of the IR-RF flux
+    ## + lambda >> exponential parameter
+    ## + beta >> dispersive factor
+
+    # set start parameter estimation ------------------------------------------
+
+    fit.parameters.start <- c(
+      phi.0 = max(RF_reg.y),
+      lambda = 0.0001,
+      beta = 1,
+      delta.phi = 1.5 * (max(RF_reg.y) - min(RF_reg.y))
+    )
+
+  if(method == "FIT"){
+
+    # start nls fitting -------------------------------------------------------
+
+    ##Monte Carlo approach for fitting
+    fit.parameters.results.MC.results <- data.frame()
+
+    ##produce set of start paramters
+    phi.0.MC <- rep(fit.parameters.start["phi.0"], n.MC)
+    lambda.MC <- seq(0.0001, 0.001, by=(0.001-0.0001)/n.MC) ##TODO
+    beta.MC <- rep(fit.parameters.start["beta"], n.MC)
+    delta.phi.MC <- rep(fit.parameters.start["delta.phi"], n.MC)
+
+    ##start fitting loop for MC runs
+    for(i in 1:n.MC){
+
+      fit.MC <- try(nls(
+        fit.function,
+        trace = FALSE,
+        data = list(x = RF_reg.x, y = RF_reg.y),
+        algorithm = "port",
+        start = list(
+          phi.0 = phi.0.MC[i],
+          delta.phi = delta.phi.MC[i],
+          lambda = lambda.MC[i],
+          beta = beta.MC[i]
+        ),
+        nls.control(
+          maxiter = 100,
+          warnOnly = FALSE,
+          minFactor = 1 / 1024
+        ),
+        lower = c(
+          phi.0 = .Machine$double.xmin,
+          delta.phi = .Machine$double.xmin,
+          lambda = .Machine$double.xmin,
+          beta = .Machine$double.xmin
+        ),
+        upper = c(
+          phi.0 = max(RF_reg.y),
+          delta.phi = max(RF_reg.y),
+          lambda = 1,
+          beta = 100
+        )
+      ),
+      silent = TRUE
+      )
+
+      if(inherits(fit.MC,"try-error") == FALSE) {
+        temp.fit.parameters.results.MC.results <- coef(fit.MC)
+
+        fit.parameters.results.MC.results[i,"phi.0"] <-
+          temp.fit.parameters.results.MC.results["phi.0"]
+        fit.parameters.results.MC.results[i,"lambda"] <-
+          temp.fit.parameters.results.MC.results["lambda"]
+        fit.parameters.results.MC.results[i,"delta.phi"] <-
+          temp.fit.parameters.results.MC.results["delta.phi"]
+        fit.parameters.results.MC.results[i,"beta"] <-
+          temp.fit.parameters.results.MC.results["beta"]
+
+      }
+    }
+
+    ##FINAL fitting after successful MC
+    if(length(na.omit(fit.parameters.results.MC.results)) != 0){
+
+      ##choose median as final fit version
+      fit.parameters.results.MC.results <- sapply(na.omit(fit.parameters.results.MC.results), median)
+
+      ##try final fitting
+      fit <- try(nls(
+        fit.function,
+        trace = method.control.settings$trace,
+        data = data.frame(x = RF_reg.x, y = RF_reg.y),
+        algorithm = "port",
+        start = list(
+          phi.0 = fit.parameters.results.MC.results["phi.0"],
+          delta.phi = fit.parameters.results.MC.results["delta.phi"],
+          lambda = fit.parameters.results.MC.results["lambda"],
+          beta = fit.parameters.results.MC.results["beta"]
+        ),
+        nls.control(
+          maxiter = method.control.settings$maxiter,
+          warnOnly = method.control.settings$warnOnly,
+          minFactor = method.control.settings$minFactor
+        ),
+        lower = c(
+          phi.0 = .Machine$double.xmin,
+          delta.phi = .Machine$double.xmin,
+          lambda = .Machine$double.xmin,
+          beta = .Machine$double.xmin
+        ),
+        upper = c(
+          phi.0 = max(RF_reg.y),
+          delta.phi = max(RF_reg.y),
+          lambda = 1, beta = 100
+        )
+      ),
+      silent = FALSE
+      )
+    }else{
+
+      fit <- NA
+      class(fit) <- "try-error"
+
+    }
+
+    # get parameters ----------------------------------------------------------
+    # and with that the final De
+
+    if (!inherits(fit,"try-error")) {
+      fit.parameters.results <- coef(fit)
+
+    }else{
+      fit.parameters.results <- NA
+
+    }
+
+    ##calculate De value
+    if (!is.na(fit.parameters.results[1])) {
+      De <- suppressWarnings(round(log(
+        -((RF_nat.mean - fit.parameters.results["phi.0"]) /
+            -fit.parameters.results["delta.phi"]
+        ) ^ (1 / fit.parameters.results["beta"]) + 1
+      ) /
+        -fit.parameters.results["lambda"], digits =
+        2))
+
+      ##This could be solved with a MC simulation, but for this the code has to be adjusted
+      ##The question is: Where the parameters are coming from?
+      ##TODO
+      De.error <- NA
+
+      De.lower <- suppressWarnings(round(log(
+        -((RF_nat.error.lower - fit.parameters.results["phi.0"]) /
+            -fit.parameters.results["delta.phi"]
+        ) ^ (1 / fit.parameters.results["beta"]) + 1
+      ) /
+        -fit.parameters.results["lambda"],digits = 2))
+
+      De.upper <- suppressWarnings(round(log(
+        -((RF_nat.error.upper - fit.parameters.results["phi.0"]) /
+            -fit.parameters.results["delta.phi"]
+        ) ^ (1 / fit.parameters.results["beta"]) + 1
+      ) /
+        -fit.parameters.results["lambda"],digits = 2))
+
+    }else{
+      De <- NA
+      De.error <- NA
+      De.lower <- NA
+      De.upper <- NA
+
+    }
+  }
+
+  ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+  ##METHOD SLIDE - ANALYSIS
+  ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+  else if(method == "SLIDE"){
+
+    ##convert to matrix (in fact above the matrix data were first transfered to data.frames ... here
+    ##we correct this ... again)  ##TODO
+    RF_nat.limited <- as.matrix(RF_nat.limited)
+    RF_reg.limited <- matrix(c(RF_reg.x, RF_reg.y), ncol = 2)
+    RF_nat <- as.matrix(RF_nat)
+
+    ##DEFINE FUNCTION FOR SLIDING
+    ##FIND MINIMUM - this is done in a function so that it can be further used for MC simulations
+    sliding <- function(RF_nat,
+                        RF_nat.limited,
+                        RF_reg.limited,
+                        n.MC = method.control.settings$n.MC,
+                        numerical.only = FALSE){
+
+
+      ##(0) set objects ... nomenclature as used in Frouin et al., please note that here the index
+      ##is used instead the real time values
+      t_max.id <- nrow(RF_reg.limited)
+      t_max_nat.id <- nrow(RF_nat.limited)
+      t_min.id <- 1
+      t_min <- RF_nat.limited[1,1]
+
+      ##(1) calculate sum of residual squares using internal Rcpp function
+
+      #pre-allocate object
+      temp.sum.residuals <- vector("numeric", length = t_max.id - t_max_nat.id)
+
+      ##calculate sum of squared residuals ... for the entire set
+      temp.sum.residuals <-
+        .analyse_IRSARRF_SRS(
+          values_regenerated_limited =  RF_reg.limited[,2],
+          values_natural_limited = RF_nat.limited[,2],
+          n_MC =  n.MC
+        )
+
+
+      #(2) get minimum value (index and time value)
+      t_n.id <- which.min(temp.sum.residuals$sliding_vector)
+
+      temp.sliding.step <- RF_reg.limited[t_n.id] - t_min
+
+      ##(3) slide curve graphically ... full data set we need this for the plotting later
+      RF_nat.slided <- matrix(data = c(RF_nat[,1] + temp.sliding.step, RF_nat[,2]), ncol = 2)
+      t_n <- RF_nat.slided[1,1]
+
+      ##the same for the MC runs of the minimum values
+      t_n.MC <-
+        vapply(X = 1:length(temp.sum.residuals$sliding_vector_min_MC), FUN = function(x) {
+          t_n.id.MC <-
+            which(temp.sum.residuals$sliding_vector == temp.sum.residuals$sliding_vector_min_MC[x])
+          temp.sliding.step.MC <- RF_reg.limited[t_n.id.MC] - t_min
+          t_n.MC <- (RF_nat[,1] + temp.sliding.step.MC)[1]
+          return(t_n.MC)
+
+        }, FUN.VALUE = vector(mode = "numeric", length = 1))
+
+
+      ##(4) get residuals (needed to be plotted later)
+      ## they cannot be longer than the RF_reg.limited curve
+      if((t_n.id+length(RF_nat.limited[,2])-1) >= nrow(RF_reg.limited)){
+        residuals <- RF_nat.limited[1:length(t_n.id:nrow(RF_reg.limited)),2]
+        - RF_reg.limited[t_n.id:nrow(RF_reg.limited), 2]
+
+      }else{
+        residuals <- RF_nat.limited[,2] - RF_reg.limited[t_n.id:(t_n.id+length(RF_nat.limited[,2])-1), 2]
+
+      }
+
+      ##(4.1) calculate De from the first channel ... which is t_n here
+      De <- round(t_n, digits = 2)
+      De.MC <- round(t_n.MC, digits = 2)
+      temp.trend.fit <- NA
+
+      ##(5) calculate trend fit
+      if(length(RF_nat.limited[,1]) > length(residuals)){
+        temp.trend.fit <- coef(lm(y~x,
+                                  data.frame(x = RF_nat.limited[1:length(residuals),1], y = residuals)))
+
+      }else{
+        temp.trend.fit <- coef(lm(y~x, data.frame(x = RF_nat.limited[,1], y = residuals)))
+
+      }
+
+
+
+      ##return values and limited if they are not needed
+      if (numerical.only == FALSE) {
+        return(
+          list(
+            De = De,
+            De.MC = De.MC,
+            residuals = residuals,
+            trend.fit = temp.trend.fit,
+            RF_nat.slided = RF_nat.slided,
+            t_n.id = t_n.id,
+            squared_residuals = temp.sum.residuals$sliding_vector
+          )
+        )
+      }else{
+        return(list(De = De, De.MC = De.MC))
+      }
+
+    }##end of function sliding()
+
+
+    ##PERFORM sliding and overwrite values
+    slide <-  sliding(
+      RF_nat = RF_nat,
+      RF_nat.limited = RF_nat.limited,
+      RF_reg.limited = RF_reg.limited
+    )
+
+
+    ##write results in variables
+    De <- slide$De
+    residuals <- slide$residuals
+    RF_nat.slided <-  slide$RF_nat.slided
+
+
+    # ERROR ESTIMATION
+    # MC runs for error calculation ---------------------------------------------------------------
+
+    ##set residual matrix for MC runs, i.e. set up list of pseudo RF_nat curves as function
+    ##(i.e., bootstrap from the natural curve distribution)
+
+    slide.MC.list <- lapply(1:n.MC,function(x) {
+
+      ##also here we have to account for the case that user do not understand
+      ##what they are doing ...
+      if(slide$t_n.id + nrow(RF_nat.limited)-1 > nrow(RF_reg.limited)){
+        cbind(
+          RF_nat.limited[1:length(slide$t_n.id:nrow(RF_reg.limited)),1],
+          (RF_reg.limited[slide$t_n.id:nrow(RF_reg.limited) ,2]
+           + sample(residuals,
+                    size = length(slide$t_n.id:nrow(RF_reg.limited)),
+                    replace = TRUE)
+          )
+        )
+
+      }else{
+        cbind(
+          RF_nat.limited[,1],
+          (RF_reg.limited[slide$t_n.id:(slide$t_n.id + nrow(RF_nat.limited)-1) ,2]
+           + sample(residuals, size = nrow(RF_nat.limited), replace = TRUE)
+          )
+        )
+      }
+
+    })
+
+
+    if(txtProgressBar){
+      ##terminal output fo MC
+      cat("\n\t Run Monte Carlo loops for error estimation\n")
+
+      ##progress bar
+      pb<-txtProgressBar(min=0, max=n.MC, initial=0, char="=", style=3)
+    }
+
+
+    De.MC <- c(vapply(X = 1:n.MC,
+                    FUN.VALUE = vector("numeric", length = method.control.settings$n.MC),
+                    FUN = function(i){
+
+      temp.slide.MC <- sliding(
+        RF_nat = RF_nat,
+        RF_reg.limited = RF_reg.limited,
+        RF_nat.limited = slide.MC.list[[i]],
+        numerical.only = TRUE
+      )
+
+      ##update progress bar
+      if (txtProgressBar) {
+        setTxtProgressBar(pb, i)
+      }
+
+       ##do nothing else, just report all possible values
+       return(temp.slide.MC[[2]])
+
+    }))
+
+    ##close
+    if(txtProgressBar){close(pb)}
+
+    ##calculate absolute deviation between De and the here newly calculated De.MC
+    ##this is, e.g. ^t_n.1* - ^t_n in Frouin et al.
+    De.diff <- diff(x = c(De, De.MC))
+    De.error <- round(sd(De.MC), digits = 2)
+    De.lower <- De - quantile(De.diff, 0.975)
+    De.upper <- De - quantile(De.diff, 0.025)
+
+  }else{
+
+    warning("Analysis skipped: Unknown method or threshold of test parameter exceeded.")
+
+  }
+
+  ##===============================================================================================#
+  ## TEST PARAMETER
+  ##===============================================================================================#
+  ## Test parameter are evaluated after all the calculations have been done as
+  ## it should be up to the user to decide whether a value should be taken into account or not.
+
+  ##(0)
+  ##set default values and overwrite them if there was something new
+  ##set defaults
+  TP <- list(
+    curves_ratio = 1.001,
+    intersection_ratio = NA,
+    residuals_slope = NA,
+    curves_bounds = ceiling(max(RF_reg.x)),
+    dynamic_ratio = NA,
+    lambda = NA,
+    beta = NA,
+    delta.phi = NA
+  )
+
+    ##modify default values by given input
+    if(!is.null(test_parameters)){TP <- modifyList(TP, test_parameters)}
+
+    ##remove NULL elements from list
+    TP <- TP[!sapply(TP, is.null)]
+
+    ##set list with values we want to evaluate
+    TP <- lapply(TP, function(x){
+      data.frame(THRESHOLD = as.numeric(x), VALUE = NA, STATUS = "OK", stringsAsFactors = TRUE)
+
+    })
+
+
+  ##(1) check if RF_nat > RF_reg, considering the fit range
+  ##TP$curves_ratio
+    if ("curves_ratio" %in% names(TP)) {
+      TP$curves_ratio$VALUE <-
+        sum(RF_nat.limited[,2]) / sum(RF_reg[RF_nat.lim[1]:RF_nat.lim[2], 2])
+
+      if (!is.na(TP$curves_ratio$THRESHOLD)) {
+        TP$curves_ratio$STATUS <-
+          ifelse(TP$curves_ratio$VALUE > TP$curves_ratio$THRESHOLD, "FAILED", "OK")
+      }
+    }
+
+   ##(1.1) check if RF_nat > RF_reg, considering the fit range
+   ##TP$intersection_ratio
+    if ("intersection_ratio" %in% names(TP)) {
+
+      ##It is, as always, a little bit more complicated ...
+      ##We cannot just normalise both curves and compare ratios. With increasing De the curve
+      ##shape of the RF_nat curve cannot be the same as the RF_reg curve at t = 0. Therefore we
+      ##have to find the segment in the RF_reg curve that fits to the RF_nat curve
+      ##
+      ##(1) get maximum count value for RF_nat
+      IR_RF_nat.max <- max(RF_nat.limited[,2])
+
+      ##(2) find corresponding time value for RF_reg (here no limited)
+      IR_RF_reg.corresponding_id <- which.min(abs(RF_reg[,2] - IR_RF_nat.max))
+
+      ##(3) calculate ratio, but just starting from the point where both curves correspond
+      ##in terms of intensiy, otherwise the ratio cannot be correct
+
+      ##the boundary check is necessary to avoid errors
+      if((IR_RF_reg.corresponding_id + length(RF_nat.lim[1]:RF_nat.lim[2])) > length(RF_reg[,2])){
+        TP$intersection_ratio$VALUE <- Inf
+
+      }else{
+
+      TP$intersection_ratio$VALUE <-
+        abs(1 - sum((RF_nat.limited[, 2] / max(RF_nat.limited[, 2]))) /
+              sum(RF_reg[IR_RF_reg.corresponding_id:(IR_RF_reg.corresponding_id + length(RF_nat.lim[1]:RF_nat.lim[2]) - 1), 2] /
+                    max(RF_reg[IR_RF_reg.corresponding_id:(IR_RF_reg.corresponding_id + length(RF_nat.lim[1]:RF_nat.lim[2]) - 1), 2])))
+
+      if (!is.na(TP$intersection_ratio$THRESHOLD)) {
+        TP$intersection_ratio$STATUS <-
+          ifelse(TP$intersection_ratio$VALUE > TP$intersection_ratio$THRESHOLD, "FAILED", "OK")
+      }
+
+      rm(IR_RF_nat.max, IR_RF_reg.corresponding_id)
+
+      }
+    }
+
+  ##(2) check slop of the residuals using a linear fit
+  ##TP$residuals_slope
+    if ("residuals_slope" %in% names(TP)) {
+      if (exists("slide")) {
+        TP$residuals_slope$VALUE <- abs(slide$trend.fit[2])
+
+        if (!is.na(TP$residuals_slope$THRESHOLD)) {
+          TP$residuals_slope$STATUS <- ifelse(
+            TP$residuals_slope$VALUE > TP$residuals_slope$THRESHOLD, "FAILED", "OK")
+
+        }
+      }
+    }
+
+  ##(3) calculate dynamic range of regenrated curve
+  ##TP$dynamic_ratio
+  if ("dynamic_ratio"%in%names(TP)){
+    TP.dynamic_ratio <- subset(temp.sequence_structure,
+                               temp.sequence_structure$protocol.step == "REGENERATED")
+    TP$dynamic_ratio$VALUE <- TP.dynamic_ratio$y.max/TP.dynamic_ratio$y.min
+
+    if (!is.na(TP$dynamic_ratio$THRESHOLD)){
+      TP$dynamic_ratio$STATUS  <- ifelse(
+        TP$dynamic_ratio$VALUE < TP$dynamic_ratio$THRESHOLD , "FAILED", "OK")
+    }
+  }
+
+
+  ##(4) decay parameter
+  ##TP$lambda
+  if ("lambda"%in%names(TP) & "beta"%in%names(TP) & "delta.phi"%in%names(TP)){
+
+    fit.lambda <- try(minpack.lm::nlsLM(
+        fit.function,
+        data = data.frame(x = RF_reg.x, y = RF_reg.y),
+        algorithm = "LM",
+        start = list(
+          phi.0 = fit.parameters.start["phi.0"],
+          delta.phi = fit.parameters.start["delta.phi"],
+          lambda = fit.parameters.start["lambda"],
+          beta = fit.parameters.start["beta"]
+        ),
+        lower = c(
+          phi.0 = .Machine$double.xmin,
+          delta.phi = .Machine$double.xmin,
+          lambda = .Machine$double.xmin,
+          beta = .Machine$double.xmin
+        ),
+        upper = c(
+          phi.0 = max(RF_reg.y),
+          delta.phi = max(RF_reg.y),
+          lambda = 1, beta = 100
+        )
+      ),
+    silent = TRUE
+    )
+
+    if(!inherits(fit.lambda, "try-error")){
+       temp.coef <- coef(fit.lambda)
+
+       TP$lambda$VALUE <- temp.coef["lambda.lambda"]
+       TP$beta$VALUE <- temp.coef["beta.beta"]
+       TP$delta.phi$VALUE <- temp.coef["delta.phi.delta.phi"]
+
+       if (!is.na( TP$lambda$THRESHOLD)){
+        TP$lambda$STATUS <- ifelse(TP$lambda$VALUE <= TP$lambda$THRESHOLD, "FAILED", "OK")
+       }
+
+       if (!is.na( TP$beta$THRESHOLD)){
+         TP$beta$STATUS <- ifelse(TP$beta$VALUE <= TP$beta$THRESHOLD, "FAILED", "OK")
+       }
+
+       if (!is.na( TP$delta.phi$THRESHOLD)){
+         TP$delta.phi$STATUS <- ifelse(TP$delta.phi$VALUE <= TP$delta.phi$THRESHOLD, "FAILED", "OK")
+       }
+
+    }
+  }
+
+  ##(99) check whether after sliding the
+  ##TP$curves_bounds
+  if (!is.null(TP$curves_bounds)) {
+    if(exists("slide")){
+      ## add one channel on the top to make sure that it works
+      TP$curves_bounds$VALUE <- max(RF_nat.slided[RF_nat.lim,1]) + (RF_nat[2,1] - RF_nat[1,1])
+
+       if (!is.na(TP$curves_bounds$THRESHOLD)){
+        TP$curves_bounds$STATUS <- ifelse(TP$curves_bounds$VALUE >= floor(max(RF_reg.x)), "FAILED", "OK")
+       }
+
+
+    }else if(exists("fit")){
+      TP$curves_bounds$VALUE <- De.upper
+
+      if (!is.na(TP$curves_bounds$THRESHOLD)){
+        TP$curves_bounds$STATUS <- ifelse(TP$curves_bounds$VALUE  >= max(RF_reg.x), "FAILED", "OK")
+      }
+    }
+  }
+
+
+  ##Combine everything in a data.frame
+    if(length(TP) != 0) {
+      TP.data.frame <- as.data.frame(
+        cbind(
+          POSITION =  as.integer(aliquot.position),
+          PARAMETER = c(names(TP)),
+          do.call(data.table::rbindlist, args = list(l = TP)),
+          SEQUENCE_NAME = aliquot.sequence_name,
+          UID = NA
+        )
+      )
+
+      ##set De.status to indicate whether there is any problem with the De according to the test parameter
+      if ("FAILED" %in% TP.data.frame$STATUS) {
+        De.status <- "FAILED"
+      }else{
+        De.status <- "OK"
+      }
+
+    }else{
+      De.status <- "OK"
+      TP.data.frame <- NULL
+
+    }
+
+  ##===============================================================================================#
+  ## PLOTTING
+  ##===============================================================================================#
+  if(plot){
+
+
+    ##get internal colour definition
+    col <- get("col", pos = .LuminescenceEnv)
+
+    if (!plot_reduced) {
+
+      ##grep par default and define reset
+      def.par <- par(no.readonly = TRUE)
+      on.exit(par(def.par))
+
+      ##set plot frame, if a method was choosen
+      if (method == "SLIDE" | method == "FIT") {
+        layout(matrix(c(1, 2), 2, 1, byrow = TRUE), c(2), c(1.3, 0.4), TRUE)
+        par(
+          oma = c(1, 1, 1, 1),
+          mar = c(0, 4, 3, 0),
+          cex = plot.settings$cex
+        )
+
+      }
+    }else{
+
+      if(plot.settings[["cex"]] != 1){
+
+        def.par <- par()[["cex"]]
+        on.exit(par(def.par))
+
+        par(cex = plot.settings[["cex"]])
+
+      }
+
+    }
+
+    ##here control xlim and ylim behaviour
+    ##xlim
+    xlim  <- if ("xlim" %in% names(list(...))) {
+      list(...)$xlim
+    } else
+    {
+      if (plot.settings$log == "x" | plot.settings$log == "xy") {
+        c(min(temp.sequence_structure$x.min),max(temp.sequence_structure$x.max))
+
+      }else{
+        c(0,max(temp.sequence_structure$x.max))
+
+      }
+
+    }
+
+    ##ylim
+    ylim  <- if("ylim" %in% names(list(...))) {list(...)$ylim} else
+    {c(min(temp.sequence_structure$y.min), max(temp.sequence_structure$y.max))}
+
+
+    ##open plot area
+    plot(
+      NA,NA,
+      xlim = xlim,
+      ylim = ylim,
+      xlab = ifelse((method != "SLIDE" & method != "FIT") | plot_reduced, plot.settings$xlab," "),
+      xaxt = ifelse((method != "SLIDE" & method != "FIT") | plot_reduced, plot.settings$xaxt,"n"),
+      yaxt = "n",
+      ylab = plot.settings$ylab,
+      main = plot.settings$main,
+      log = plot.settings$log,
+
+    )
+
+    if(De.status == "FAILED"){
+
+      ##build list of failed TP
+      mtext.message <- paste0(
+        "Threshold exceeded for:  ",
+        paste(subset(TP.data.frame, TP.data.frame$STATUS == "FAILED")$PARAMETER, collapse = ", "),". For details see manual.")
+
+      ##print mtext
+      mtext(text = mtext.message,
+            side = 3, outer = TRUE, col = "red",
+            cex = 0.8 * par()[["cex"]])
+      warning(mtext.message)
+
+    }
+
+    ##use scientific format for y-axis
+    labels <- axis(2, labels = FALSE)
+    axis(side = 2, at = labels, labels = format(labels, scientific = TRUE))
+
+    ##(1) plot points that have been not selected
+    points(RF_reg[-(min(RF_reg.lim):max(RF_reg.lim)),1:2], pch=3, col=col[19])
+
+    ##(2) plot points that has been used for the fitting
+    points(RF_reg.x,RF_reg.y, pch=3, col=col[10])
+
+    ##show natural points if no analysis was done
+    if(method != "SLIDE" & method != "FIT"){
+
+      ##add points
+      points(RF_nat, pch = 20, col = "grey")
+      points(RF_nat.limited, pch = 20, col = "red")
+
+      ##legend
+      if (plot.settings$legend) {
+        legend(
+          plot.settings$legend.pos,
+          legend = plot.settings$legend.text,
+          pch = c(19, 3),
+          col = c("red", col[10]),
+          horiz = TRUE,
+          bty = "n",
+          cex = .9 * par()[["cex"]]
+        )
+      }
+
+
+    }
+
+
+    ##Add fitted curve, if possible. This is a graphical control that might be considered
+    ##as useful before further analysis will be applied
+    if (method.control.settings$show_fit) {
+
+      if(!is(fit.lambda, "try-error")){
+        fit.lambda_coef <- coef(fit.lambda)
+
+        curve(fit.lambda_coef[[1]]-
+                (fit.lambda_coef[[2]]*
+                   ((1-exp(-fit.lambda_coef[[3]]*x))^fit.lambda_coef[[4]])),
+              add=TRUE,
+              lty = 2,
+              col="red")
+
+        rm(fit.lambda_coef)
+      }else{
+        warning("[analyse_IRSAR.RF()] No fit possible, no fit shown.")
+
+
+      }
+
+    }
+
+    ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+    ## PLOT - METHOD FIT
+    ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+    if(method == "FIT"){
+
+      ##dummy to cheat R CMD check
+      x<-NULL; rm(x)
+
+      ##plot fitted curve
+      curve(fit.parameters.results["phi.0"]-
+              (fit.parameters.results["delta.phi"]*
+                 ((1-exp(-fit.parameters.results["lambda"]*x))^fit.parameters.results["beta"])),
+            add=TRUE,
+            from = RF_reg[min(RF_reg.lim), 1],
+            to = RF_reg[max(RF_reg.lim), 1],
+            col="red")
+
+      ##plotting to show the limitations if RF_reg.lim was chosen
+      ##show fitted curve GREY (previous red curve)
+      curve(fit.parameters.results["phi.0"]-
+              (fit.parameters.results["delta.phi"]*
+                 ((1-exp(-fit.parameters.results["lambda"]*x))^fit.parameters.results["beta"])),
+            add=TRUE,
+            from = min(RF_reg[, 1]),
+            to = RF_reg[min(RF_reg.lim), 1],
+            col="grey")
+
+      ##show fitted curve GREY (after red curve)
+      curve(fit.parameters.results["phi.0"]-
+              (fit.parameters.results["delta.phi"]*
+                 ((1-exp(-fit.parameters.results["lambda"]*x))^fit.parameters.results["beta"])),
+            add=TRUE,
+            from = RF_reg[max(RF_reg.lim), 1],
+            to = max(RF_reg[, 1]),
+            col="grey")
+
+      ##add points
+      points(RF_nat, pch = 20, col = col[19])
+      points(RF_nat.limited, pch = 20, col = col[2])
+
+      ##legend
+      if (plot.settings$legend) {
+        legend(
+          plot.settings$legend.pos,
+          legend = plot.settings$legend.text,
+          pch = c(19, 3),
+          col = c("red", col[10]),
+          horiz = TRUE,
+          bty = "n",
+          cex = .9 * par()[["cex"]]
+        )
+      }
+
+      ##plot range choosen for fitting
+      abline(v=RF_reg[min(RF_reg.lim), 1], lty=2)
+      abline(v=RF_reg[max(RF_reg.lim), 1], lty=2)
+
+      ##plot De if De was calculated
+      if(is.na(De) == FALSE & is.nan(De) == FALSE){
+
+        lines(c(0,De.lower), c(RF_nat.error.lower,RF_nat.error.lower), lty=2, col="grey")
+        lines(c(0,De), c(RF_nat.mean,RF_nat.mean), lty=2, col="red")
+        lines(c(0,De.upper), c(RF_nat.error.upper,RF_nat.error.upper), lty=2, col="grey")
+
+        lines(c(De.lower, De.lower),
+              c(0,RF_nat.error.lower), lty=2, col="grey")
+        lines(c(De,De), c(0, RF_nat.mean), lty=2, col="red")
+        lines(c(De.upper, De.upper),
+              c(0,RF_nat.error.upper), lty=2, col="grey")
+
+      }
+
+      ##Insert fit and result
+      if(is.na(De) != TRUE & (is.nan(De) == TRUE |
+                              De > max(RF_reg.x) |
+                              De.upper > max(RF_reg.x))){
+
+        try(mtext(side=3, substitute(D[e] == De,
+                                     list(De=paste(
+                                       De," (",De.lower," ", De.upper,")", sep=""))),
+                  line=0, cex=0.8 * par()[["cex"]], col="red"), silent=TRUE)
+
+        De.status <- "VALUE OUT OF BOUNDS"
+
+      } else{
+
+        if ("mtext" %in% names(list(...))) {
+          mtext(side = 3, list(...)$mtext)
+        }else{
+          try(mtext(
+            side = 3,
+            substitute(D[e] == De,
+                       list(
+                         De = paste(De," [",De.lower," ; ", De.upper,"]", sep =
+                                      "")
+                       )),
+            line = 0,
+            cex = 0.7 * par()[["cex"]]
+          ),
+          silent = TRUE)
+        }
+
+        De.status <- "OK"
+      }
+
+
+      if (!plot_reduced) {
+
+        ##==lower plot==##
+        par(mar = c(4.2, 4, 0, 0))
+
+        ##plot residuals
+        if (is.na(fit.parameters.results[1]) == FALSE) {
+          plot(
+            RF_reg.x,
+            residuals(fit),
+            xlim = c(0, max(temp.sequence_structure$x.max)),
+            xlab = plot.settings$xlab,
+            yaxt = "n",
+            xaxt = plot.settings$xaxt,
+            type = "p",
+            pch = 20,
+            col = "grey",
+            ylab = "E",
+            log = ""
+          )
+
+          ##add 0 line
+          abline(h = 0)
+        } else{
+          plot(
+            NA,
+            NA,
+            xlim = c(0, max(temp.sequence_structure$x.max)),
+            ylab = "E",
+            xlab = plot.settings$xlab,
+            xaxt = plot.settings$xaxt,
+            ylim = c(-1, 1)
+          )
+          text(x = max(temp.sequence_structure$x.max) / 2,
+               y = 0,
+               "Fitting Error!")
+        }
+
+      }
+    }
+
+    ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+    ## PLOT - METHOD SLIDE
+    ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
+    else if(method == "SLIDE"){
+
+      ##(0) density plot
+      if (method.control.settings$show_density) {
+        ##showing the density makes only sense when we see at least 10 data points
+        if (length(unique(De.MC)) >= 15) {
+          ##calculate density De.MC
+          density.De.MC <- density(De.MC)
+
+          ##calculate transformation function
+          x.1 <- max(density.De.MC$y)
+          x.2 <- min(density.De.MC$y)
+
+          ##with have to limit the scaling a little bit
+          if (RF_nat.limited[1,2] >
+            max(RF_reg.limited[,2]) - (max(RF_reg.limited[,2]) - min(RF_reg.limited[,2]))*.5) {
+
+            y.1 <- max(RF_reg.limited[,2]) - (max(RF_reg.limited[,2]) - min(RF_reg.limited[,2]))*.5
+
+          }else{
+            y.1 <- RF_nat.limited[1,2]
+
+          }
+
+          y.2 <- par("usr")[3]
+
+          m <- (y.1 - y.2) / (x.1 + x.2)
+          n <- y.1 - m * x.1
+
+          density.De.MC$y <- m * density.De.MC$y + n
+          rm(x.1,x.2,y.1,y.2,m,n)
+
+          polygon(density.De.MC$x,
+                  density.De.MC$y,
+                  col = rgb(0,0.4,0.8,0.5))
+
+        }else{
+          warning("Narrow density distribution, no density distribution plotted!")
+
+        }
+
+      }
+
+      ##(1) plot unused points in grey ... unused points are points outside of the set limit
+      points(
+        matrix(RF_nat.slided[-(min(RF_nat.lim):max(RF_nat.lim)),1:2], ncol = 2),
+        pch = 21, col = col[19]
+      )
+
+      ##(2) add used points
+      points(RF_nat.slided[min(RF_nat.lim):max(RF_nat.lim),], pch = 21, col = col[2],
+             bg = col[2])
+
+      ##(3) add line to show the connection between the first point and the De
+      lines(x = c(RF_nat.slided[1,1], RF_nat.slided[1,1]),
+            y = c(.Machine$double.xmin,RF_nat.slided[1,2]),
+            lty = 2,
+            col = col[2]
+      )
+
+      ##(4) add arrow at the lowest y-coordinate possible to show the sliding
+      if (plot.settings$log != "y" & plot.settings$log != "xy") {
+        shape::Arrows(
+          x0 = 0,
+          y0 = ylim[1],
+          y1 = ylim[1],
+          x1 = RF_nat.slided[1,1],
+          arr.type = "triangle",
+          arr.length = 0.3 * par()[["cex"]],
+          code = 2,
+          col = col[2],
+          arr.adj = 1,
+          arr.lwd = 1
+        )
+      }
+
+      ##TODO
+      ##uncomment here to see all the RF_nat curves produced by the MC runs
+      ##could become a polygone for future versions
+      #lapply(1:n.MC, function(x){lines(slide.MC.list[[x]], col = rgb(0,0,0, alpha = 0.2))})
+
+      ##plot range choosen for fitting
+      abline(v=RF_reg[min(RF_reg.lim), 1], lty=2)
+      abline(v=RF_reg[max(RF_reg.lim), 1], lty=2)
+
+      if (plot.settings$legend) {
+        legend(
+          plot.settings$legend.pos,
+          legend = plot.settings$legend.text,
+          pch = c(19, 3),
+          col = c("red", col[10]),
+          horiz = TRUE,
+          bty = "n",
+          cex = .9 * par()[["cex"]]
+        )
+
+      }
+
+
+
+      ##write information on the De in the plot
+      if("mtext" %in% names(list(...))) {
+
+        mtext(side = 3, list(...)$mtext)
+
+      }else{
+
+        try(mtext(side=3,
+                  substitute(D[e] == De, list(De=paste0(De," [", De.lower, " ; ", De.upper, "]"))),
+                  line=0,
+                  cex=0.7 * par()[["cex"]]),
+            silent=TRUE)
+
+      }
+
+      if (!plot_reduced) {
+        ##==lower plot==##
+        ##RESIDUAL PLOT
+        par(mar = c(4, 4, 0, 0))
+
+        plot(
+          NA,
+          NA,
+          ylim = range(residuals),
+          xlim = xlim,
+          xlab = plot.settings$xlab,
+          type = "p",
+          pch = 1,
+          col = "grey",
+          xaxt = plot.settings$xaxt,
+          ylab = "E",
+          yaxt = "n",
+          log = ifelse(
+            plot.settings$log == "y" |
+              plot.settings$log == "xy",
+            "",
+            plot.settings$log
+          )
+        )
+
+        ##add axis for 0 ... means if the 0 is not visible there is labelling
+        axis(side = 4,
+             at = 0,
+             labels = 0)
+
+        ##add residual indicator (should circle around 0)
+        col.ramp <- colorRampPalette(c(col[19], "white", col[19]))
+        col.polygon <- col.ramp(100)
+
+        if (plot.settings$log != "x") {
+          shape::filledrectangle(
+            mid = c((xlim[2]) + (par("usr")[2] - xlim[2]) / 2,
+                    max(residuals) - diff(range(residuals)) / 2),
+            wx = par("usr")[2] - xlim[2],
+            wy = diff(range(residuals)),
+            col = col.polygon
+          )
+
+        }
+        ##add 0 line
+        abline(h = 0, lty = 3)
+
+        ##0-line indicator and arrows if this is not visible
+        ##red colouring here only if the 0 point is not visible to avoid too much colouring
+        if (max(residuals) < 0 &
+            min(residuals) < 0) {
+          shape::Arrowhead(
+            x0 =   xlim[2] + (par("usr")[2] - xlim[2]) / 2,
+            y0 = max(residuals),
+            angle = 270,
+            lcol = col[2],
+            arr.length = 0.4,
+            arr.type = "triangle",
+            arr.col = col[2]
+          )
+
+        } else if (max(residuals) > 0 & min(residuals) > 0) {
+          shape::Arrowhead(
+            x0 =   xlim[2] + (par("usr")[2] - xlim[2]) / 2,
+            y0 = min(residuals),
+            angle = 90,
+            lcol = col[2],
+            arr.length = 0.4,
+            arr.type = "triangle",
+            arr.col = col[2]
+          )
+
+
+        } else{
+          points(xlim[2], 0, pch = 3)
+
+        }
+
+
+        ##add residual points
+        if (length(RF_nat.slided[c(min(RF_nat.lim):max(RF_nat.lim)), 1]) > length(residuals)) {
+          temp.points.diff <-
+            length(RF_nat.slided[c(min(RF_nat.lim):max(RF_nat.lim)), 1]) -
+            length(residuals)
+
+          points(RF_nat.slided[c(min(RF_nat.lim):(max(RF_nat.lim) - temp.points.diff)), 1],
+                 residuals,
+                 pch = 20,
+                 col = rgb(0, 0, 0, 0.4))
+
+        } else{
+          points(RF_nat.slided[c(min(RF_nat.lim):max(RF_nat.lim)), 1],
+                 residuals,
+                 pch = 20,
+                 col = rgb(0, 0, 0, 0.4))
+
+        }
+
+        ##add vertical line to mark De (t_n)
+        abline(v = De, lty = 2, col = col[2])
+
+        ##add numeric value of De ... t_n
+        axis(
+          side = 1,
+          at = De,
+          labels = De,
+          cex.axis = 0.8 * plot.settings$cex,
+          col = "blue",
+          padj = -1.55,
+        )
+
+
+        ##TODO- CONTROL PLOT! ... can be implemented in appropriate form in a later version
+        if (method.control.settings$trace) {
+          par(new = TRUE)
+          plot(
+            RF_reg.limited[1:length(slide$squared_residuals),1],
+            slide$squared_residuals,
+            ylab = "",
+            type = "l",
+            xlab = "",
+            xaxt = plot.settings$xaxt,
+            axes = FALSE,
+            xlim = xlim,
+            log = "y"
+          )
+
+
+        }
+
+      }
+
+    }
+
+  }#endif::plot
+  ##=============================================================================#
+  ## RETURN
+  ##=============================================================================#
+
+  ##catch up worst case scenarios ... means something went wrong
+  if(!exists("De")){De  <- NA}
+  if(!exists("De.error")){De.error  <- NA}
+  if(!exists("De.MC")){De.MC  <- NA}
+  if(!exists("De.lower")){De.lower  <- NA}
+  if(!exists("De.upper")){De.upper  <- NA}
+  if(!exists("De.status")){De.status  <- NA}
+  if (!exists("fit")) {
+  if (exists("fit.lambda")) {
+      fit <- fit.lambda
+
+    }else{
+      fit  <- list()
+
+    }
+  }
+  if(!exists("slide")){slide <- list()}
+
+  ##combine values for De into a data frame
+  De.values <- data.frame(
+      DE = De,
+      DE.ERROR = De.error,
+      DE.LOWER = De.lower,
+      DE.UPPER = De.upper,
+      DE.STATUS = De.status,
+      RF_NAT.LIM = paste(RF_nat.lim, collapse = ":"),
+      RF_REG.LIM = paste(RF_reg.lim, collapse = ":"),
+      POSITION =  as.integer(aliquot.position),
+      DATE = aliquot.date,
+      SEQUENCE_NAME = aliquot.sequence_name,
+      UID = NA,
+    row.names = NULL,
+    stringsAsFactors = FALSE
+  )
+
+  ##generate unique identifier
+  UID <- .create_UID()
+
+    ##update data.frames accordingly
+    De.values$UID <- UID
+
+    if(!is.null(TP.data.frame)){
+      TP.data.frame$UID <- UID
+
+    }
+
+
+  ##produce results object
+    newRLumResults.analyse_IRSAR.RF <- set_RLum(
+      class = "RLum.Results",
+      data = list(
+        data = De.values,
+        De.MC = De.MC,
+        test_parameters = TP.data.frame,
+        fit = fit,
+        slide = slide
+      ),
+      info = list(call = sys.call())
+    )
+
+  invisible(newRLumResults.analyse_IRSAR.RF)
+
+}
+
diff --git a/R/analyse_SAR.CWOSL.R b/R/analyse_SAR.CWOSL.R
new file mode 100644
index 0000000..b008c8c
--- /dev/null
+++ b/R/analyse_SAR.CWOSL.R
@@ -0,0 +1,1693 @@
+#' Analyse SAR CW-OSL measurements
+#'
+#' The function performs a SAR CW-OSL analysis on an
+#' \code{\linkS4class{RLum.Analysis}} object including growth curve fitting.
+#'
+#' The function performs an analysis for a standard SAR protocol measurements
+#' introduced by Murray and Wintle (2000) with CW-OSL curves. For the
+#' calculation of the Lx/Tx value the function \link{calc_OSLLxTxRatio} is
+#' used. For \bold{changing the way the Lx/Tx error is calculated} use the argument
+#' \code{background.count.distribution} and \code{sigmab}, which will be passed to the function
+#' \link{calc_OSLLxTxRatio}.\cr\cr
+#'
+#' \bold{Argument \code{object} is of type \code{list}}\cr\cr
+#'
+#' If the argument \code{object} is of type \code{\link{list}} containing \bold{only}
+#' \code{\linkS4class{RLum.Analysis}} objects, the function re-calls itself as often as elements
+#' are in the list. This is usefull if an entire measurement wanted to be analysed without
+#' writing separate for-loops. To gain in full control of the parameters (e.g., \code{dose.points}) for
+#' every aliquot (corresponding to one \code{\linkS4class{RLum.Analysis}} object in the list), in
+#' this case the arguments can be provided as \code{\link{list}}. This \code{list} should
+#' be of similar length as the \code{list} provided with the argument \code{object}, otherwise the function
+#' will create an own list of the requested lenght. Function output will be just one single \code{\linkS4class{RLum.Results}} object.
+#'
+#' Please be careful when using this option. It may allow a fast an efficient data analysis, but
+#' the function may also break with an unclear error message, due to wrong input data.\cr\cr
+#'
+#' \bold{Working with IRSL data}\cr\cr
+#'
+#' The function was originally designed to work just for 'OSL' curves,
+#' following the principles of the SAR protocol. An IRSL measurement protocol
+#' may follow this procedure, e.g., post-IR IRSL protocol (Thomsen et al.,
+#' 2008). Therefore this functions has been enhanced to work with IRSL data,
+#' however, the function is only capable of analysing curves that follow the
+#' SAR protocol structure, i.e., to analyse a post-IR IRSL protocol, curve data
+#' have to be pre-selected by the user to fit the standards of the SAR
+#' protocol, i.e., Lx,Tx,Lx,Tx and so on. \cr
+#'
+#' Example: Imagine the measurement contains pIRIR50 and pIRIR225 IRSL curves.
+#' Only one curve type can be analysed at the same time: The pIRIR50 curves or
+#' the pIRIR225 curves.\cr\cr
+#'
+#' \bold{Supported rejection criteria}\cr\cr \sQuote{recycling.ratio}:
+#' calculated for every repeated regeneration dose point.\cr
+#'
+#' \sQuote{recuperation.rate}: recuperation rate calculated by comparing the
+#' Lx/Tx values of the zero regeneration point with the Ln/Tn value (the Lx/Tx
+#' ratio of the natural signal). For methodological background see Aitken and
+#' Smith (1988).\cr
+#'
+#' \sQuote{testdose.error}: set the allowed error for the testdose, which per
+#' default should not exceed 10\%. The testdose error is calculated as Tx_net.error/Tx_net.
+#'
+#' \sQuote{palaeodose.error}: set the allowed error for the De value, which per
+#' default should not exceed 10\%.
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} (\bold{required}): input
+#' object containing data for analysis, alternatively a \code{\link{list}} of
+#' \code{\linkS4class{RLum.Analysis}} objects can be provided.
+#'
+#' @param signal.integral.min \code{\link{integer}} (\bold{required}): lower
+#' bound of the signal integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+#' of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+#' as the minimum signal integral for the Tx curve.
+#'
+#' @param signal.integral.max \code{\link{integer}} (\bold{required}): upper
+#' bound of the signal integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+#' of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+#' as the maximum signal integral for the Tx curve.
+#'
+#' @param background.integral.min \code{\link{integer}} (\bold{required}):
+#' lower bound of the background integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+#' of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+#' as the minimum background integral for the Tx curve.
+#'
+#' @param background.integral.max \code{\link{integer}} (\bold{required}):
+#' upper bound of the background integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+#' of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+#' as the maximum background integral for the Tx curve.
+#'
+#' @param rejection.criteria \code{\link{list}} (with default): provide a named list
+#' and set rejection criteria in percentage for further calculation. Can be a \code{\link{list}} in
+#' a \code{\link{list}}, if \code{object} is of type \code{\link{list}}
+#'
+#' Allowed arguments are \code{recycling.ratio}, \code{recuperation.rate},
+#' \code{palaeodose.error}, \code{testdose.error} and \code{exceed.max.regpoint = TRUE/FALSE}.
+#' Example: \code{rejection.criteria = list(recycling.ratio = 10)}.
+#' Per default all numerical values are set to 10, \code{exceed.max.regpoint = TRUE}.
+#' Every criterium can be set to \code{NA}. In this value are calculated, but not considered, i.e.
+#' the RC.Status becomes always \code{'OK'}
+#'
+#' @param dose.points \code{\link{numeric}} (optional): a numeric vector
+#' containg the dose points values Using this argument overwrites dose point
+#' values in the signal curves. Can be a \code{\link{list}} of \code{\link{numeric}} vectors,
+#' if \code{object} is of type \code{\link{list}}
+#'
+#' @param mtext.outer \code{\link{character}} (optional): option to provide an
+#' outer margin mtext. Can be a \code{\link{list}} of \code{\link{character}s},
+#' if \code{object} is of type \code{\link{list}}
+#'
+#' @param plot \code{\link{logical}} (with default): enables or disables plot
+#' output.
+#'
+#' @param plot.single \code{\link{logical}} (with default) or
+#' \code{\link{numeric}} (optional): single plot output (\code{TRUE/FALSE}) to
+#' allow for plotting the results in single plot windows. If a numerice vector
+#' is provided the plots can be selected individually, i.e. \code{plot.single =
+#' c(1,2,3,4)} will plot the TL and Lx, Tx curves but not the legend (5) or the
+#' growth curve (6), (7) and (8) belong to rejection criteria plots. Requires
+#' \code{plot = TRUE}.
+#'
+#' @param \dots further arguments that will be passed to the function
+#' \code{\link{plot_GrowthCurve}} or \code{\link{calc_OSLLxTxRatio}}
+#' (supported: \code{background.count.distribution}, \code{sigmab}, \code{sig0}). \bold{Please note} that
+#' if you consider to use the early light subtraction method you should provide your own \code{sigmab}
+#' value!
+#'
+#'
+#' @return A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
+#' returned containing the following elements:
+#' \item{De.values}{\link{data.frame} containing De-values, De-error and
+#' further parameters} \item{LnLxTnTx.values}{\link{data.frame} of all
+#' calculated Lx/Tx values including signal, background counts and the dose
+#' points} \item{rejection.criteria}{\link{data.frame} with values that might
+#' by used as rejection criteria. NA is produced if no R0 dose point exists.}
+#' \item{Formula}{\link{formula} formula that have been used for the growth
+#' curve fitting }\cr The output should be accessed using the function
+#' \code{\link{get_RLum}}.
+#'
+#'
+#' @note This function must not be mixed up with the function
+#' \code{\link{Analyse_SAR.OSLdata}}, which works with
+#' \link{Risoe.BINfileData-class} objects.\cr
+#'
+#' \bold{The function currently does only support 'OSL' or 'IRSL' data!}
+#'
+#' @section Function version: 0.7.5
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#'
+#' @seealso \code{\link{calc_OSLLxTxRatio}}, \code{\link{plot_GrowthCurve}},
+#' \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
+#' \code{\link{get_RLum}}
+#'
+#'
+#' @references Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
+#' after bleaching. Quaternary Science Reviews 7, 387-393.
+#'
+#' Duller, G., 2003. Distinguishing quartz and feldspar in single grain
+#' luminescence measurements. Radiation Measurements, 37 (2), 161-165.
+#'
+#' Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an
+#' improved single-aliquot regenerative-dose protocol. Radiation Measurements
+#' 32, 57-73.
+#'
+#' Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory
+#' fading rates of various luminescence signals from feldspar-rich sediment
+#' extracts. Radiation Measurements 43, 1474-1486.
+#' doi:10.1016/j.radmeas.2008.06.002
+#'
+#' @keywords datagen plot
+#'
+#' @examples
+#'
+#' ##load data
+#' ##ExampleData.BINfileData contains two BINfileData objects
+#' ##CWOSL.SAR.Data and TL.SAR.Data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##transform the values from the first position in a RLum.Analysis object
+#' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+#'
+#' ##perform SAR analysis and set rejection criteria
+#' results <- analyse_SAR.CWOSL(
+#' object = object,
+#' signal.integral.min = 1,
+#' signal.integral.max = 2,
+#' background.integral.min = 900,
+#' background.integral.max = 1000,
+#' log = "x",
+#' fit.method = "EXP",
+#' rejection.criteria = list(
+#'   recycling.ratio = 10,
+#'   recuperation.rate = 10,
+#'   testdose.error = 10,
+#'   palaeodose.error = 10,
+#'   exceed.max.regpoint = TRUE)
+#')
+#'
+#' ##show De results
+#' get_RLum(results)
+#'
+#' ##show LnTnLxTx table
+#' get_RLum(results, data.object = "LnLxTnTx.table")
+#'
+#' @export
+analyse_SAR.CWOSL<- function(
+  object,
+  signal.integral.min,
+  signal.integral.max,
+  background.integral.min,
+  background.integral.max,
+  rejection.criteria = NULL,
+  dose.points = NULL,
+  mtext.outer,
+  plot = TRUE,
+  plot.single = FALSE,
+  ...
+) {
+
+# SELF CALL -----------------------------------------------------------------------------------
+if(is.list(object)){
+
+  ##make live easy
+  if(missing("signal.integral.min")){
+    signal.integral.min <- 1
+    warning("[analyse_SAR.CWOSL()] 'signal.integral.min' missing, set to 1", call. = FALSE)
+  }
+
+  if(missing("signal.integral.max")){
+    signal.integral.max <- 2
+    warning("[analyse_SAR.CWOSL()] 'signal.integral.max' missing, set to 2", call. = FALSE)
+  }
+
+  ##now we have to extend everything to allow list of arguments ... this is just consequent
+  signal.integral.min <- rep(list(signal.integral.min), length = length(object))
+  signal.integral.max <- rep(list(signal.integral.max), length = length(object))
+  background.integral.min <- rep(list(background.integral.min), length = length(object))
+  background.integral.max <- rep(list(background.integral.max), length = length(object))
+
+
+  ##it is a little bit more complex, as we have a list in a list
+  if(is(rejection.criteria[[1]], "list")){
+    rejection.criteria <- rep(rejection.criteria, length = length(object))
+
+  }else{
+    rejection.criteria <- rep(list(rejection.criteria), length = length(object))
+
+  }
+
+
+  if(!is.null(dose.points)){
+
+    if(is(dose.points, "list")){
+      dose.points <- rep(dose.points, length = length(object))
+
+    }else{
+      dose.points <- rep(list(dose.points), length = length(object))
+
+    }
+
+  }else{
+    dose.points <- rep(list(NULL), length(object))
+
+  }
+
+  if(!missing(mtext.outer)){
+    mtext.outer <- rep(as.list(mtext.outer), length = length(object))
+
+  }else{
+    mtext.outer <- rep(list(""), length = length(object))
+
+  }
+
+   ##run analysis
+   temp <- lapply(1:length(object), function(x){
+
+    analyse_SAR.CWOSL(object[[x]],
+                      signal.integral.min = signal.integral.min[[x]],
+                      signal.integral.max = signal.integral.max[[x]],
+                      background.integral.min = background.integral.min[[x]],
+                      background.integral.max = background.integral.max[[x]] ,
+                      dose.points = dose.points[[x]],
+                      mtext.outer = mtext.outer[[x]],
+                      plot = plot,
+                      rejection.criteria = rejection.criteria[[x]],
+                      plot.single = plot.single,
+                      main = ifelse("main"%in% names(list(...)), list(...)$main, paste0("ALQ #",x)),
+                      ...)
+
+  })
+
+  ##combine everything to one RLum.Results object as this as what was written ... only
+  ##one object
+
+  ##merge results and check if the output became NULL
+  results <- merge_RLum(temp)
+
+  ##DO NOT use invisible here, this will stop the function from stopping
+  if(length(results) == 0){
+    return(NULL)
+
+  }else{
+    return(results)
+
+  }
+
+}
+
+# CONFIG  -----------------------------------------------------------------
+
+  ##set error list, this allows to set error messages without breaking the function
+  error.list <- list()
+
+# General Integrity Checks ---------------------------------------------------
+
+  ##GENERAL
+
+    ##MISSING INPUT
+    if(missing("object")){
+      stop("[analyse_SAR.CWOSL()] No value set for 'object'!")
+    }
+
+    ##INPUT OBJECTS
+    if(!is(object, "RLum.Analysis")){
+      stop("[analyse_SAR.CWOSL()] Input object is not of type 'RLum.Analyis'!")
+    }
+
+
+    if(missing("signal.integral.min") & !is.list(object)){
+      signal.integral.min <- 1
+      warning("[analyse_SAR.CWOSL()] 'signal.integral.min' missing, set to 1", call. = FALSE)
+    }
+
+    if(missing("signal.integral.max") & !is.list(object)){
+      signal.integral.min <- 2
+      warning("[analyse_SAR.CWOSL()] 'signal.integral.max' missing, set to 2", call. = FALSE)
+    }
+
+    if(missing("background.integral.min")){
+     stop("[analyse_SAR.CWOSL()] No value set for 'background.integral.min'!")
+    }
+
+    if(missing("background.integral.max")){
+      stop("[analyse_SAR.CWOSL()] No value set for 'background.integral.max'!")
+    }
+
+
+      ##build signal and background integrals
+      signal.integral <- c(signal.integral.min[1]:signal.integral.max[1])
+      background.integral <- c(background.integral.min[1]:background.integral.max[1])
+
+        ##account for the case that Lx and Tx integral differ
+        if (length(signal.integral.min) == 2 &
+            length(signal.integral.max) == 2) {
+          signal.integral.Tx <-
+            c(signal.integral.min[2]:signal.integral.max[2])
+
+        }else{
+          signal.integral.Tx <- NULL
+
+        }
+
+        if (length(background.integral.min) == 2 &
+            length(background.integral.max) == 2) {
+          background.integral.Tx <-
+            c(background.integral.min[2]:background.integral.max[2])
+
+        }else{
+          background.integral.Tx <- NULL
+
+        }
+
+        ##Account for the case that the use did not provide everything ...
+        if(is.null(signal.integral.Tx) & !is.null(background.integral.Tx)){
+          signal.integral.Tx <- signal.integral
+
+          warning("[analyse_SAR.CWOSL()] background integral for Tx curves set, but not for the signal integral; signal integral for Tx automatically set.")
+        }
+
+      if(!is.null(signal.integral.Tx) & is.null(background.integral.Tx)){
+        background.integral.Tx <- background.integral
+
+        warning("[analyse_SAR.CWOSL()] signal integral for Tx curves set, but not for the background integral; background integral for Tx automatically set.")
+      }
+
+
+    ##INTEGRAL LIMITS
+    if(!is(signal.integral, "integer") | !is(background.integral, "integer")){
+      stop("[analyse_SAR.CWOSL()] 'signal.integral' or 'background.integral' is not
+           of type integer!")
+    }
+
+
+
+      ##CHECK IF DATA SET CONTAINS ANY OSL curve
+      if (!any(grepl("OSL", structure_RLum(object)$recordType)) &&
+          !any(grepl("IRSL", structure_RLum(object)$recordType))) {
+        warning(
+          "[analyse_SAR.CWOSL()] No record of type 'OSL' or 'IRSL' are detected in the sequence object! NULL returned.",
+          call. = FALSE
+        )
+
+        return(NULL)
+
+      }
+
+    ##Check if any OSL curve is measured, if not set curve type on IRSL
+    ##to allow further proceedings
+    CWcurve.type  <- ifelse(!TRUE%in%grepl("OSL", structure_RLum(object)$recordType),
+                            "IRSL","OSL")
+
+
+# Rejection criteria ------------------------------------------------------
+
+    ##set list
+    rejection.criteria.default <- list(
+      recycling.ratio = 10,
+      recuperation.rate = 10,
+      palaeodose.error = 10,
+      testdose.error = 10,
+      exceed.max.regpoint = TRUE
+
+    )
+
+    ##modify list on the request
+    if(!is.null(rejection.criteria)){
+      rejection.criteria <- modifyList(rejection.criteria.default, rejection.criteria)
+
+    }else{
+      rejection.criteria <- rejection.criteria.default
+
+    }
+
+
+# Deal with extra arguments ----------------------------------------------------
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {""}
+
+  log <- if("log" %in% names(extraArgs)) {extraArgs$log} else
+  {""}
+
+  cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else
+  {1}
+
+  background.count.distribution <-
+    if ("background.count.distribution" %in% names(extraArgs)) {
+      extraArgs$background.count.distribution
+    } else
+    {
+      "non-poisson"
+    }
+
+  sigmab <- if("sigmab" %in% names(extraArgs)) {extraArgs$sigmab} else
+  {NULL}
+
+  sig0 <- if("sig0" %in% names(extraArgs)) {extraArgs$sig0} else
+  {0}
+
+
+# Protocol Integrity Checks --------------------------------------------------
+
+  ##check overall structur of the object
+  ##every SAR protocol has to have equal number of curves
+
+
+  ##grep curve types from analysis value and remove unwanted information
+  temp.ltype <- sapply(1:length(object at records), function(x) {
+
+                ##export as global variable
+                object at records[[x]]@recordType <<- gsub(" .*", "",
+                                                        object at records[[x]]@recordType)
+
+                object at records[[x]]@recordType
+
+  })
+
+
+  ##problem: FI lexsyg devices provide irradiation information in a separate curve
+  if("irradiation"%in%temp.ltype){
+
+    ##grep irraditation times
+    temp.irradiation <- structure_RLum(object)
+    temp.irradiation <- temp.irradiation[temp.irradiation$recordType == "irradiation",
+                                         "x.max"]
+
+    ##remove every 2nd entry (test dose) and add "0" dose for natural signal
+    temp.Dose <- c(0,temp.irradiation)
+
+    ##remove irradiation entries from file
+    object <- set_RLum(
+               class = "RLum.Analysis",
+               records = get_RLum(object, recordType = c(CWcurve.type, "TL")),
+               protocol = "SAR")
+
+  }
+
+  ##check if the wanted curves are a multiple of two
+  ##gsub removes unwanted information from the curves
+  if(table(temp.ltype)[CWcurve.type]%%2!=0){
+    error.list[[1]] <- "[analyse_SAR.CWOSL()] Input OSL/IRSL curves are not a multiple of two."
+  }
+
+  ##check if the curve lengths differ
+  temp.matrix.length <- unlist(sapply(1:length(object at records), function(x) {
+                          if(object at records[[x]]@recordType==CWcurve.type){
+                              length(object at records[[x]]@data[,1])
+                          }
+  }))
+
+  if(length(unique(temp.matrix.length))!=1){
+    error.list[[2]] <- "[analyse_SAR.CWOSL()] Input curves lengths differ."
+
+  }
+
+  ##just proceed if error list is empty
+  if (length(error.list) == 0) {
+
+    ##check background integral
+    if (max(signal.integral) == min(signal.integral)) {
+      signal.integral <-
+        c(min(signal.integral) : (max(signal.integral) + 1))
+
+      warning("[analyse_SAR.CWOSL()] integral signal limits cannot be equal, reset automatically!")
+
+    }
+
+
+    ##background integral should not longer than curve channel length
+    if (max(background.integral) == min(background.integral)) {
+      background.integral <-
+        c((min(background.integral) - 1) : max(background.integral))
+
+    }
+
+    if (max(background.integral) > temp.matrix.length[1]) {
+      background.integral <-
+          c((temp.matrix.length[1] - length(background.integral)):temp.matrix.length[1])
+
+      ##prevent that the background integral becomes negative
+      if(min(background.integral) < max(signal.integral)){
+        background.integral <- c((max(signal.integral) + 1):max(background.integral))
+
+      }
+
+      warning(
+        "[analyse_SAR.CWOSL()] Background integral out of bounds. Set to: c(",
+        min(background.integral),":", max(background.integral),")"
+      )
+
+    }
+
+    ##Do the same for the Tx-if set
+    if (!is.null(background.integral.Tx)) {
+
+      if (max(background.integral.Tx) == min(background.integral.Tx)) {
+        background.integral.Tx <-
+          c((min(background.integral.Tx) - 1) : max(background.integral.Tx))
+
+      }
+
+      if (max(background.integral.Tx) > temp.matrix.length[2]) {
+        background.integral.Tx <-
+          c((temp.matrix.length[2] - length(background.integral.Tx)):temp.matrix.length[2])
+
+
+        ##prevent that the background integral becomes negative
+        if (min(background.integral.Tx) < max(signal.integral.Tx)) {
+          background.integral.Tx <-
+            c((max(signal.integral.Tx) + 1):max(background.integral.Tx))
+
+
+        }
+
+        warning(
+          "Background integral for Tx out of bounds. Set to: c(",
+          min(background.integral.Tx),
+          ":",
+          max(background.integral.Tx),
+          ")"
+        )
+
+      }
+    }
+
+
+    # Grep Curves -------------------------------------------------------------
+
+    ##grep relevant curves from RLum.Analyis object
+    OSL.Curves.ID <-
+      get_RLum(object, recordType = CWcurve.type, get.index = TRUE)
+
+    ##separate curves by Lx and Tx (it makes it much easier)
+    OSL.Curves.ID.Lx <-
+      OSL.Curves.ID[seq(1,length(OSL.Curves.ID),by = 2)]
+    OSL.Curves.ID.Tx <-
+      OSL.Curves.ID[seq(2,length(OSL.Curves.ID),by = 2)]
+
+    ##get index of TL curves
+    TL.Curves.ID <-
+      suppressWarnings(get_RLum(object, recordType = "TL$", get.index = TRUE))
+
+    ##separate TL curves
+    TL.Curves.ID.Lx <-
+      sapply(1:length(OSL.Curves.ID.Lx), function(x) {
+        TL.Curves.ID[which(TL.Curves.ID == (OSL.Curves.ID.Lx[x] - 1))]
+      })
+
+    TL.Curves.ID.Tx <-
+      sapply(1:length(OSL.Curves.ID.Tx), function(x) {
+        TL.Curves.ID[which(TL.Curves.ID == (OSL.Curves.ID.Tx[x] - 1))]
+      })
+
+
+    # COMPONENT FITTING -------------------------------------------------------
+
+
+    # for(x in seq(1,length(OSL.Curves.ID),by=2)){
+    #
+    #
+    #   temp.fit.output <- fit_CWCurve(object at records[[OSL.Curves.ID[x]]],
+    #                 n.components.max=3,
+    #                 output.terminal = FALSE,
+    #                 output.terminalAdvanced = FALSE,
+    #                 plot = FALSE
+    #
+    #               )
+    #   if(exists("fit.output") == FALSE){
+    #
+    #     fit.output <- get_RLum(temp.fit.output)
+    #
+    #   }else{
+    #
+    #     fit.output <- rbind(fit.output, get_RLum(temp.fit.output))
+    #
+    #   }
+    #
+    # }
+
+    ##TODO
+
+    # Calculate LnLxTnTx values  --------------------------------------------------
+
+    ##calculate LxTx values using external function
+    LnLxTnTx <- lapply(seq(1,length(OSL.Curves.ID),by = 2), function(x){
+      temp.LnLxTnTx <- get_RLum(
+        calc_OSLLxTxRatio(
+          Lx.data = object at records[[OSL.Curves.ID[x]]]@data,
+          Tx.data = object at records[[OSL.Curves.ID[x + 1]]]@data,
+          signal.integral = signal.integral,
+          signal.integral.Tx = signal.integral.Tx,
+          background.integral = background.integral,
+          background.integral.Tx = background.integral.Tx,
+          background.count.distribution = background.count.distribution,
+          sigmab = sigmab,
+          sig0 = sig0
+        )
+      )
+
+        ##grep dose
+        if (exists("temp.irradiation") == FALSE) {
+          temp.Dose <- object at records[[OSL.Curves.ID[x]]]@info$IRR_TIME
+
+          ##for the case that no information on the dose can be found
+          if (is.null(temp.Dose)) {
+            temp.Dose <- NA
+          }
+
+          temp.LnLxTnTx <-
+            cbind(Dose = temp.Dose, temp.LnLxTnTx)
+
+        }else{
+          temp.LnLxTnTx <- cbind(Dose = temp.Dose[x], temp.LnLxTnTx)
+
+        }
+      })
+
+    ##combine
+    LnLxTnTx <- data.table::rbindlist(LnLxTnTx)
+
+    # Set regeneration points -------------------------------------------------
+
+    ##overwrite dose point manually
+    if (!is.null(dose.points)) {
+      if (length(dose.points) != length(LnLxTnTx$Dose)) {
+        stop("[analyse_SAR.CWOSL()] length 'dose.points' differs from number of curves.")
+
+      }
+
+      LnLxTnTx$Dose <- dose.points
+
+    }
+
+    ##check whether we have dose points at all
+    if (is.null(dose.points) & anyNA(LnLxTnTx$Dose)) {
+      stop("[analyse_SAR.CWOSL()] 'dose.points' contains NA values or have not been set!")
+
+    }
+
+    ##check whether the first OSL/IRSL curve (i.e., the Natural) has 0 dose. If not
+    ##not, it is probably a Dose Recovery Test with the given dose that is treated as the
+    ##unknown dose. We overwrite this value and warn the user.
+    if (LnLxTnTx$Dose[1] != 0) {
+      warning("[analyse_SAR.CWOSL()] The natural signal has a dose of ", LnLxTnTx$Dose[1],
+              " s, which is indicative of a dose recovery test. The natural dose was set to 0.", call. = FALSE)
+      LnLxTnTx$Dose[1] <- 0
+    }
+
+    #generate unique dose id - this are also the # for the generated points
+    temp.DoseID <- c(0:(length(LnLxTnTx$Dose) - 1))
+    temp.DoseName <- paste("R",temp.DoseID,sep = "")
+    temp.DoseName <-
+      cbind(Name = temp.DoseName,Dose = LnLxTnTx$Dose)
+
+
+    ##set natural
+    temp.DoseName[temp.DoseName[,"Name"] == "R0","Name"] <-
+      "Natural"
+
+    ##set R0
+    temp.DoseName[temp.DoseName[,"Name"] != "Natural" &
+                    temp.DoseName[,"Dose"] == 0,"Name"] <- "R0"
+
+    ##correct numeration numeration of other dose points
+
+    ##how many dose points do we have with 0?
+    non.temp.zero.dose.number <- nrow(temp.DoseName[temp.DoseName[, "Dose"] != 0,])
+
+    temp.DoseName[temp.DoseName[,"Name"] != "Natural" &
+                    temp.DoseName[,"Name"] != "R0","Name"] <- paste("R",c(1:non.temp.zero.dose.number),sep =
+                                                                      "")
+
+    ##find duplicated doses (including 0 dose - which means the Natural)
+    temp.DoseDuplicated <- duplicated(temp.DoseName[,"Dose"])
+
+    ##combine temp.DoseName
+    temp.DoseName <-
+      cbind(temp.DoseName,Repeated = temp.DoseDuplicated)
+
+    ##correct value for R0 (it is not really repeated)
+    temp.DoseName[temp.DoseName[,"Dose"] == 0,"Repeated"] <- FALSE
+
+    ##combine in the data frame
+    temp.LnLxTnTx <- data.frame(Name = temp.DoseName[,"Name"],
+                                Repeated = as.logical(temp.DoseName[,"Repeated"]))
+
+    LnLxTnTx <- cbind(temp.LnLxTnTx,LnLxTnTx)
+    LnLxTnTx[,"Name"] <- as.character(LnLxTnTx[,"Name"])
+
+    # Calculate Recycling Ratio -----------------------------------------------
+
+    ##Calculate Recycling Ratio
+    if (length(LnLxTnTx[LnLxTnTx[,"Repeated"] == TRUE,"Repeated"]) > 0) {
+      ##identify repeated doses
+      temp.Repeated <-
+        LnLxTnTx[LnLxTnTx[,"Repeated"] == TRUE,c("Name","Dose","LxTx")]
+
+      ##find concering previous dose for the repeated dose
+      temp.Previous <-
+        t(sapply(1:length(temp.Repeated[,1]),function(x) {
+          LnLxTnTx[LnLxTnTx[,"Dose"] == temp.Repeated[x,"Dose"] &
+                     LnLxTnTx[,"Repeated"] == FALSE,c("Name","Dose","LxTx")]
+        }))
+
+
+      ##convert to data.frame
+      temp.Previous <- as.data.frame(temp.Previous)
+
+      ##set column names
+      temp.ColNames <-
+        unlist(lapply(1:length(temp.Repeated[,1]),function(x) {
+          temp <- paste("Recycling ratio (", temp.Repeated[x,"Name"],"/",
+                temp.Previous[temp.Previous[,"Dose"] == temp.Repeated[x,"Dose"],"Name"],
+                ")",
+                sep = "")
+          return(temp[1])
+        }))
+
+
+      ##Calculate Recycling Ratio
+      RecyclingRatio <-
+        round(as.numeric(temp.Repeated[,"LxTx"]) / as.numeric(temp.Previous[,"LxTx"]),
+              digits = 4)
+
+      ##Just transform the matrix and add column names
+      RecyclingRatio <- t(RecyclingRatio)
+      colnames(RecyclingRatio) <- temp.ColNames
+
+    }else{
+      RecyclingRatio <- NA
+    }
+
+
+
+    # Calculate Recuperation Rate ---------------------------------------------
+
+
+    ##Recuperation Rate (capable to handle multiple type of recuperation values)
+    if (length(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","Name"]) > 0) {
+      Recuperation <-
+        sapply(1:length(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","Name"]),
+               function(x) {
+                 round(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","LxTx"][x] /
+                         LnLxTnTx[LnLxTnTx[,"Name"] == "Natural","LxTx"],
+                       digits = 4)
+               })
+      ##Just transform the matrix and add column names
+      Recuperation  <-  t(Recuperation)
+      colnames(Recuperation)  <-
+        unlist(strsplit(paste(
+          "Recuperation rate",
+          1:length(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","Name"]), collapse = ";"
+        ), ";"))
+
+    }else{
+      Recuperation <- NA
+    }
+
+
+    # Evaluate and Combine Rejection Criteria ---------------------------------
+
+    temp.criteria <- c(
+      if(!is.null(colnames(RecyclingRatio))){
+       colnames(RecyclingRatio)}else{NA},
+      if(!is.null(colnames(Recuperation))){
+        colnames(Recuperation)}else{NA})
+
+
+    temp.value <- c(RecyclingRatio,Recuperation)
+
+    temp.threshold <-
+      c(rep(
+        rejection.criteria$recycling.ratio / 100, length(RecyclingRatio)
+      ),
+      rep(
+        rejection.criteria$recuperation.rate / 100,
+        length(Recuperation)
+      ))
+
+    ##RecyclingRatio
+    if (!is.na(RecyclingRatio)[1] & !is.na(rejection.criteria$recycling.ratio)) {
+      temp.status.RecyclingRatio <-
+        sapply(1:length(RecyclingRatio), function(x) {
+          if (abs(1 - RecyclingRatio[x]) > (rejection.criteria$recycling.ratio / 100)) {
+            "FAILED"
+          }else{
+            "OK"
+          }
+        })
+
+    }else{
+      temp.status.RecyclingRatio <- "OK"
+
+    }
+
+    ##Recuperation
+    if (!is.na(Recuperation)[1] & !is.na(rejection.criteria$recuperation.rate)) {
+      temp.status.Recuperation  <-
+        sapply(1:length(Recuperation), function(x) {
+          if(Recuperation[x] > rejection.criteria$recuperation.rate){
+            "FAILED"
+
+          }else{
+            "OK"
+
+          }
+
+        })
+
+    } else{
+      temp.status.Recuperation <- "OK"
+
+    }
+
+    # Provide Rejection Criteria for Testdose error --------------------------
+    testdose.error.calculated <- (LnLxTnTx$Net_TnTx.Error/LnLxTnTx$Net_TnTx)[1]
+
+    testdose.error.threshold <-
+      rejection.criteria$testdose.error / 100
+
+    if (is.na(testdose.error.calculated)) {
+      testdose.error.status <- "FAILED"
+
+    }else{
+      if(!is.na(testdose.error.threshold)){
+        testdose.error.status <- ifelse(
+          testdose.error.calculated <= testdose.error.threshold,
+          "OK", "FAILED"
+        )
+
+      }else{
+        testdose.error.status <- "OK"
+
+      }
+
+    }
+
+    testdose.error.data.frame <- data.frame(
+      Criteria = "Testdose error",
+      Value = testdose.error.calculated,
+      Threshold = testdose.error.threshold,
+      Status =  testdose.error.status,
+      stringsAsFactors = FALSE
+    )
+
+
+    RejectionCriteria <- data.frame(
+      Criteria = temp.criteria,
+      Value = temp.value,
+      Threshold = temp.threshold,
+      Status = c(temp.status.RecyclingRatio,temp.status.Recuperation),
+      stringsAsFactors = FALSE
+    )
+
+    RejectionCriteria <- rbind(RejectionCriteria, testdose.error.data.frame)
+
+    ##============================================================================##
+    ##PLOTTING
+    ##============================================================================##
+
+    if (plot == TRUE) {
+      # Plotting - Config -------------------------------------------------------
+
+      ##colours and double for plotting
+      col <- get("col", pos = .LuminescenceEnv)
+
+      if (plot.single[1] == FALSE) {
+        ## read par settings
+        par.default <- par(no.readonly = TRUE)
+
+        layout(matrix(
+          c(1,1,3,3,
+            1,1,3,3,
+            2,2,4,4,
+            2,2,4,4,
+            5,5,5,5),5,4,byrow = TRUE
+        ))
+
+        par(
+          oma = c(0,0,0,0), mar = c(4,4,3,3), cex = cex * 0.6
+        )
+
+        ## 1 -> TL previous LnLx
+        ## 2 -> LnLx
+        ## 3 -> TL previous TnTx
+        ## 4 -> TnTx
+        ## 5 -> Legend
+
+        ## set selected curves to allow plotting of all curves
+        plot.single.sel <- c(1,2,3,4,5,6,7,8)
+
+      }else{
+        ##check for values in the single output of the function and convert
+        if (!is(plot.single, "logical")) {
+          if (!is(plot.single, "numeric")) {
+            stop("[analyse_SAR.CWOSL()] Invalid data type for 'plot.single'.")
+          }
+
+          plot.single.sel  <- plot.single
+
+        }else{
+          plot.single.sel <- c(1,2,3,4,5,6,7,8)
+
+        }
+
+      }
+
+
+      ##warning if number of curves exceed colour values
+      if (length(col) < length(OSL.Curves.ID) / 2) {
+        temp.message  <-
+          paste(
+            "\n[analyse_SAR.CWOSL()] To many curves! Only the first",
+            length(col),"curves are plotted!"
+          )
+        warning(temp.message)
+      }
+
+      ##legend text
+      legend.text <-
+        paste(LnLxTnTx$Name,"\n(",LnLxTnTx$Dose,")", sep = "")
+
+
+      ##get channel resolution (should be equal for all curves)
+      resolution.OSLCurves <- round(object at records[[OSL.Curves.ID[1]]]@data[2,1] -
+                                      object at records[[OSL.Curves.ID[1]]]@data[1,1],
+                                    digits = 2)
+
+
+      # Plotting TL Curves previous LnLx ----------------------------------------
+
+      ##overall plot option selection for plot.single.sel
+      if (1 %in% plot.single.sel) {
+        ##check if TL curves are available
+        if (length(TL.Curves.ID.Lx[[1]] > 0)) {
+          ##It is just an approximation taken from the data
+          resolution.TLCurves <-  round(mean(diff(
+            round(object at records[[TL.Curves.ID.Lx[1]]]@data[,1], digits = 1)
+          )), digits = 1)
+
+          ylim.range <-
+            sapply(seq(1,length(TL.Curves.ID.Lx),by = 1) ,function(x) {
+              range(object at records[[TL.Curves.ID.Lx[x]]]@data[,2])
+
+            })
+
+          plot(
+            NA,NA,
+            xlab = "T [\u00B0C]",
+            ylab = paste("TL [cts/",resolution.TLCurves," \u00B0C]",sep =
+                           ""),
+            xlim = c(object at records[[TL.Curves.ID.Lx[1]]]@data[1,1],
+                     max(object at records[[TL.Curves.ID.Lx[1]]]@data[,1])),
+            ylim = c(1,max(ylim.range)),
+            main = main,
+            log = if (log == "y" | log == "xy") {
+              "y"
+            }else{
+              ""
+            }
+          )
+
+          #provide curve information as mtext, to keep the space for the header
+          mtext(side = 3,
+                expression(paste(
+                  "TL previous ", L[n],",",L[x]," curves",sep = ""
+                )),
+                cex = cex * 0.7)
+
+          ##plot TL curves
+          sapply(1:length(TL.Curves.ID.Lx) ,function(x) {
+            lines(object at records[[TL.Curves.ID.Lx[x]]]@data,col = col[x])
+
+          })
+
+
+
+        }else{
+          plot(
+            NA,NA,xlim = c(0,1), ylim = c(0,1), main = "",
+            axes = FALSE,
+            ylab = "",
+            xlab = ""
+          )
+          text(0.5,0.5, "No TL curve detected")
+
+        }
+      }#plot.single.sel
+
+      # Plotting LnLx Curves ----------------------------------------------------
+
+      ##overall plot option selection for plot.single.sel
+      if (2 %in% plot.single.sel) {
+        ylim.range <- sapply(1:length(OSL.Curves.ID.Lx) ,function(x) {
+          range(object at records[[OSL.Curves.ID.Lx[x]]]@data[,2])
+        })
+
+        if((log == "x" | log == "xy") & object at records[[OSL.Curves.ID.Lx[[1]]]]@data[1,1] == 0){
+          xlim <- c(object at records[[OSL.Curves.ID.Lx[1]]]@data[2,1],
+                    max(object at records[[OSL.Curves.ID.Lx[1]]]@data[,1]) +
+                      object at records[[OSL.Curves.ID.Lx[1]]]@data[2,1])
+
+
+        }else{
+
+
+        xlim  <- c(object at records[[OSL.Curves.ID.Lx[1]]]@data[1,1],
+                   max(object at records[[OSL.Curves.ID.Lx[1]]]@data[,1]))
+
+        }
+        #open plot area LnLx
+        plot(
+          NA,NA,
+          xlab = "Time [s]",
+          ylab = paste(CWcurve.type," [cts/",resolution.OSLCurves," s]",sep =
+                         ""),
+          xlim = xlim,
+          ylim = range(ylim.range),
+          main = main,
+          log = log
+        )
+
+        #provide curve information as mtext, to keep the space for the header
+        mtext(side = 3, expression(paste(L[n],",",L[x]," curves",sep = "")),
+              cex = cex * 0.7)
+
+        ##plot curves
+        sapply(1:length(OSL.Curves.ID.Lx), function(x) {
+
+          if((log == "x" | log == "xy") & object at records[[OSL.Curves.ID.Lx[[x]]]]@data[1,1] == 0){
+            object at records[[OSL.Curves.ID.Lx[[x]]]]@data[1,] <-
+              object at records[[OSL.Curves.ID.Lx[[x]]]]@data[1,] +
+              diff(c(object at records[[OSL.Curves.ID.Lx[[x]]]]@data[1,1],
+                     object at records[[OSL.Curves.ID.Lx[[x]]]]@data[2,1]))
+
+            warnings("[analyse_SAR.CWOSL()] curves shifted by one chanel for log-plot.")
+          }
+
+          lines(object at records[[OSL.Curves.ID.Lx[[x]]]]@data,col = col[x])
+
+        })
+
+
+        ##mark integration limit Lx curves
+        abline(
+          v = (object at records[[OSL.Curves.ID.Lx[1]]]@data[min(signal.integral),1]), lty =
+            2, col = "gray"
+        )
+        abline(
+          v = (object at records[[OSL.Curves.ID.Lx[1]]]@data[max(signal.integral),1]), lty =
+            2, col = "gray"
+        )
+        abline(
+          v = (object at records[[OSL.Curves.ID.Lx[1]]]@data[min(background.integral),1]), lty =
+            2, col = "gray"
+        )
+        abline(
+          v = (object at records[[OSL.Curves.ID.Lx[1]]]@data[max(background.integral),1]), lty =
+            2, col = "gray"
+        )
+
+        ##mtext, implemented here, as a plot window has to be called first
+        if (missing(mtext.outer)) {
+          mtext.outer  <- ""
+        }
+        mtext(
+          mtext.outer, side = 4, outer = TRUE, line = -1.7, cex = cex, col = "blue"
+        )
+
+      }# plot.single.sel
+
+      # Plotting TL Curves previous TnTx ----------------------------------------
+
+      ##overall plot option selection for plot.single.sel
+      if (3 %in% plot.single.sel) {
+        ##check if TL curves are available
+        if (length(TL.Curves.ID.Tx[[1]] > 0)) {
+          ##It is just an approximation taken from the data
+          resolution.TLCurves <-  round(mean(diff(
+            round(object at records[[TL.Curves.ID.Tx[1]]]@data[,1], digits = 1)
+          )), digits = 1)
+
+
+          ylim.range <- sapply(1:length(TL.Curves.ID.Tx) ,function(x) {
+            range(object at records[[TL.Curves.ID.Tx[x]]]@data[,2])
+
+          })
+
+
+
+          plot(
+            NA,NA,
+            xlab = "T [\u00B0C]",
+            ylab = paste("TL [cts/",resolution.TLCurves," \u00B0C]",sep = ""),
+            xlim = c(object at records[[TL.Curves.ID.Tx[1]]]@data[1,1],
+                     max(object at records[[TL.Curves.ID.Tx[1]]]@data[,1])),
+            ylim = c(1,max(ylim.range)),
+            main = main,
+            log = if (log == "y" | log == "xy") {
+              "y"
+            }else{
+              ""
+            }
+          )
+
+          #provide curve information as mtext, to keep the space for the header
+          mtext(side = 3,
+                expression(paste(
+                  "TL previous ", T[n],",",T[x]," curves",sep = ""
+                )),
+                cex = cex * 0.7)
+
+          ##plot TL curves
+          sapply(1:length(TL.Curves.ID.Tx) ,function(x) {
+            lines(object at records[[TL.Curves.ID.Tx[x]]]@data,col = col[x])
+
+          })
+
+
+
+        }else{
+          plot(
+            NA,NA,xlim = c(0,1), ylim = c(0,1), main = "",
+            axes = FALSE,
+            ylab = "",
+            xlab = ""
+          )
+          text(0.5,0.5, "No TL curve detected")
+
+        }
+
+      }#plot.single.sel
+
+      # Plotting TnTx Curves ----------------------------------------------------
+
+      ##overall plot option selection for plot.single.sel
+      if (4 %in% plot.single.sel) {
+        ylim.range <- sapply(1:length(OSL.Curves.ID.Tx) ,function(x) {
+          range(object at records[[OSL.Curves.ID.Tx[x]]]@data[,2])
+
+        })
+
+        if((log == "x" | log == "xy") & object at records[[OSL.Curves.ID.Tx[[1]]]]@data[1,1] == 0){
+          xlim <- c(object at records[[OSL.Curves.ID.Tx[1]]]@data[2,1],
+                    max(object at records[[OSL.Curves.ID.Tx[1]]]@data[,1]) +
+                      object at records[[OSL.Curves.ID.Tx[1]]]@data[2,1])
+
+
+        }else{
+          xlim <- c(object at records[[OSL.Curves.ID.Tx[1]]]@data[1,1],
+                    max(object at records[[OSL.Curves.ID.Tx[1]]]@data[,1]))
+        }
+
+        #open plot area LnLx
+        plot(
+          NA,NA,
+          xlab = "Time [s]",
+          ylab = paste(CWcurve.type ," [cts/",resolution.OSLCurves," s]",sep =
+                         ""),
+          xlim = xlim,
+          ylim = range(ylim.range),
+          main = main,
+          log = log
+        )
+
+        #provide curve information as mtext, to keep the space for the header
+        mtext(side = 3,
+              expression(paste(T[n],",",T[x]," curves",sep = "")),
+              cex = cex * 0.7)
+
+        ##plot curves and get legend values
+        sapply(1:length(OSL.Curves.ID.Tx) ,function(x) {
+
+          ##account for log-scale and 0 values
+          if((log == "x" | log == "xy") & object at records[[OSL.Curves.ID.Tx[[x]]]]@data[1,1] == 0){
+            object at records[[OSL.Curves.ID.Tx[[x]]]]@data[1,] <-
+              object at records[[OSL.Curves.ID.Tx[[x]]]]@data[1,] +
+                 diff(c(object at records[[OSL.Curves.ID.Tx[[x]]]]@data[1,1],
+                      object at records[[OSL.Curves.ID.Tx[[x]]]]@data[2,1]))
+
+            warnings("[analyse_SAR.CWOSL()] curves shifted by one chanel for log-plot.")
+
+          }
+
+          lines(object at records[[OSL.Curves.ID.Tx[[x]]]]@data,col = col[x])
+
+        })
+
+        ##mark integration limit Tx curves
+        abline(
+          v = (object at records[[OSL.Curves.ID.Tx[1]]]@data[min(signal.integral),1]), lty =
+            2, col = "gray"
+        )
+        abline(
+          v = (object at records[[OSL.Curves.ID.Tx[1]]]@data[max(signal.integral),1]), lty =
+            2, col = "gray"
+        )
+        abline(
+          v = (object at records[[OSL.Curves.ID.Tx[1]]]@data[min(background.integral),1]), lty =
+            2, col = "gray"
+        )
+        abline(
+          v = (object at records[[OSL.Curves.ID.Tx[1]]]@data[max(background.integral),1]), lty =
+            2, col = "gray"
+        )
+
+      }# plot.single.sel
+
+      # Plotting Legend ----------------------------------------
+
+      ##overall plot option selection for plot.single.sel
+      if (5 %in% plot.single.sel) {
+        par.margin  <- par()$mar
+        par.mai  <- par()$mai
+        par(mar = c(1,1,1,1), mai = c(0,0,0,0))
+
+        plot(
+          c(1:(length(
+            OSL.Curves.ID
+          ) / 2)),
+          rep(7,length(OSL.Curves.ID) / 2),
+          type = "p",
+          axes = FALSE,
+          xlab = "",
+          ylab = "",
+          pch = 20,
+          col = unique(col[1:length(OSL.Curves.ID)]),
+          cex = 4 * cex,
+          ylim = c(0,10)
+        )
+
+        ##add text
+        text(c(1:(length(
+          OSL.Curves.ID
+        ) / 2)),
+        rep(7,length(OSL.Curves.ID) / 2),
+        legend.text,
+        offset = 1,
+        pos = 1)
+
+
+        ##add line
+        abline(h = 10,lwd = 0.5)
+
+        #reset margin
+        par(mar = par.margin, mai = par.mai)
+
+      }#plot.single.sel
+
+      if (exists("par.default")) {
+        par(par.default)
+
+      }
+
+
+    }##end plot == TRUE
+
+
+    # Plotting  GC  ----------------------------------------
+
+    temp.sample <- data.frame(
+      Dose = LnLxTnTx$Dose,
+      LxTx = LnLxTnTx$LxTx,
+      LxTx.Error = LnLxTnTx$LxTx.Error,
+      TnTx = LnLxTnTx$Net_TnTx
+    )
+
+    ##overall plot option selection for plot.single.sel
+    if (plot == TRUE && 6 %in% plot.single.sel) {
+      plot  <-  TRUE
+
+    }else {
+      plot  <- FALSE
+
+    }
+
+    ##Fit and plot growth curve
+    temp.GC <- plot_GrowthCurve(temp.sample,
+                                output.plot = plot,
+                                ...)
+
+    ##grep informaton on the fit object
+    temp.GC.fit.Formula  <- get_RLum(temp.GC, "Formula")
+
+    ##grep results
+    temp.GC <- get_RLum(temp.GC)
+
+    # Provide Rejection Criteria for Palaedose error --------------------------
+    palaeodose.error.calculated <- ifelse(is.na(temp.GC[,1]) == FALSE,
+                                          round(temp.GC[,2] / temp.GC[,1], digits = 5),
+                                          NA)
+
+    palaeodose.error.threshold <-
+      rejection.criteria$palaeodose.error / 100
+
+    if (is.na(palaeodose.error.calculated)) {
+      palaeodose.error.status <- "FAILED"
+
+    }else{
+      if(!is.na(palaeodose.error.threshold)){
+        palaeodose.error.status <- ifelse(
+          palaeodose.error.calculated <= palaeodose.error.threshold,
+          "OK", "FAILED"
+        )
+
+
+      }else{
+        palaeodose.error.status <- "OK"
+
+
+      }
+
+    }
+
+    palaeodose.error.data.frame <- data.frame(
+      Criteria = "Palaeodose error",
+      Value = palaeodose.error.calculated,
+      Threshold = palaeodose.error.threshold,
+      Status =  palaeodose.error.status,
+      stringsAsFactors = FALSE
+    )
+
+
+    ##add exceed.max.regpoint
+    if (!is.na(temp.GC[,1]) & !is.na(rejection.criteria$exceed.max.regpoint) && rejection.criteria$exceed.max.regpoint) {
+      status.exceed.max.regpoint <-
+        ifelse(max(LnLxTnTx$Dose) < temp.GC[,1], "FAILED", "OK")
+
+    }else{
+      status.exceed.max.regpoint <- "OK"
+
+    }
+
+    exceed.max.regpoint.data.frame <- data.frame(
+      Criteria = "De > max. dose point",
+      Value = as.numeric(temp.GC[,1]),
+      Threshold = if(is.na(rejection.criteria$exceed.max.regpoint)){
+          NA
+        }else if(!rejection.criteria$exceed.max.regpoint){
+          Inf
+        }else{
+          as.numeric(max(LnLxTnTx$Dose))
+        },
+      Status =  status.exceed.max.regpoint
+    )
+
+
+    ##add to RejectionCriteria data.frame
+    RejectionCriteria <- rbind(RejectionCriteria,
+                               palaeodose.error.data.frame,
+                               exceed.max.regpoint.data.frame)
+
+
+    ##add recjection status
+    if (length(grep("FAILED",RejectionCriteria$Status)) > 0) {
+      temp.GC <- data.frame(temp.GC, RC.Status = "FAILED")
+
+
+    }else{
+      temp.GC <- data.frame(temp.GC, RC.Status = "OK")
+
+
+    }
+
+
+    ##add information on the integration limits
+    temp.GC.extened <-
+      data.frame(
+        signal.range = paste(min(signal.integral),":",
+                             max(signal.integral)),
+        background.range = paste(min(background.integral),":",
+                                 max(background.integral)),
+        signal.range.Tx = paste(min(ifelse(is.null(signal.integral.Tx),NA,signal.integral.Tx)),":",
+                                max(ifelse(is.null(signal.integral.Tx),NA,signal.integral.Tx))),
+        background.range.Tx = paste(min(ifelse(is.null(background.integral.Tx), NA,background.integral.Tx)) ,":",
+                                    max(ifelse(is.null(background.integral.Tx), NA,background.integral.Tx))),
+        stringsAsFactors = FALSE
+      )
+
+
+
+    # Set return Values -----------------------------------------------------------
+
+    ##generate unique identifier
+    UID <- .create_UID()
+
+    temp.results.final <- set_RLum(
+      class = "RLum.Results",
+      data = list(
+        data = as.data.frame(c(temp.GC, temp.GC.extened, UID = UID), stringsAsFactors = FALSE),
+        LnLxTnTx.table = cbind(LnLxTnTx, UID = UID, stringsAsFactors = FALSE),
+        rejection.criteria = cbind(RejectionCriteria, UID, stringsAsFactors = FALSE),
+        Formula = temp.GC.fit.Formula
+      ),
+      info = list(call = sys.call())
+    )
+
+
+    # Plot graphical interpretation of rejection criteria -----------------------------------------
+
+    if (plot == TRUE && 7 %in% plot.single.sel) {
+      ##set graphical parameter
+      if (!plot.single) {
+        par(mfrow = c(1,2))
+      }else{
+        par(mfrow = c(1,1))
+      }
+
+
+      ##Rejection criteria
+      temp.rejection.criteria <- get_RLum(temp.results.final,
+                                          data.object = "rejection.criteria")
+
+      temp.rc.reycling.ratio <- temp.rejection.criteria[grep("Recycling ratio",temp.rejection.criteria[,"Criteria"]),]
+
+      temp.rc.recuperation.rate <- temp.rejection.criteria[grep("Recuperation rate",temp.rejection.criteria[,"Criteria"]),]
+
+      temp.rc.palaedose.error <- temp.rejection.criteria[grep("Palaeodose error",temp.rejection.criteria[,"Criteria"]),]
+
+      temp.rc.testdose.error <- temp.rejection.criteria[grep("Testdose error",temp.rejection.criteria[,"Criteria"]),]
+
+      plot(
+        NA,NA,
+        xlim = c(-0.5,0.5),
+        ylim = c(0,40),
+        yaxt = "n", ylab = "",
+        xaxt = "n", xlab = "",
+        bty = "n",
+        main = "Rejection criteria"
+      )
+
+      axis(
+        side = 1, at = c(-0.2,-0.1,0,0.1,0.2), labels = c("- 0.2", "- 0.1","0/1","+ 0.1", "+ 0.2")
+      )
+
+      ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++##
+      ##polygon for recycling ratio
+      text(
+        x = -0.35, y = 35, "Recycling R.", pos = 3, srt = 90, cex = 0.8*cex, offset = 0,
+      )
+      polygon(
+        x = c(
+          -as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1],-as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1],
+          as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1],
+          as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1]
+        ),
+        y = c(31,39,39,31),
+        col = "gray",
+        border = NA
+      )
+      polygon(
+        x = c(-0.3, -0.3, 0.3, 0.3) ,
+        y = c(31, 39, 39, 31),
+        border = ifelse(any(
+          grepl(pattern = "FAILED", temp.rc.reycling.ratio$Status)
+        ), "red", "black")
+      )
+
+
+      ##consider possibility of multiple pIRIR signals and multiple recycling ratios
+      if (nrow(temp.rc.recuperation.rate) > 0) {
+        col.id  <- 1
+        for (i in seq(1,nrow(temp.rc.recuperation.rate),
+                      length(unique(temp.rc.recuperation.rate[,"Criteria"])))) {
+          for (j in 0:length(unique(temp.rc.recuperation.rate[,"Criteria"]))) {
+            points(
+              temp.rc.reycling.ratio[i + j, "Value"] - 1,
+              y = 35,
+              pch = col.id,
+              col = col.id,
+              cex = 1.3 * cex
+            )
+
+          }
+          col.id <- col.id + 1
+        }
+        rm(col.id)
+
+        ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++##
+        ##polygon for recuperation rate
+        text(
+          x = -0.35, y = 25, "Recuperation", pos = 3, srt = 90, cex = 0.8*cex, offset = 0,
+        )
+        polygon(
+          x = c(
+            0,
+            0,
+            as.numeric(as.character(
+              temp.rc.recuperation.rate$Threshold
+            ))[1],
+            as.numeric(as.character(
+              temp.rc.recuperation.rate$Threshold
+            ))[1]
+          ),
+          y = c(21,29,29,21),
+          col = "gray",
+          border = NA
+        )
+
+        polygon(
+          x = c(-0.3, -0.3, 0.3, 0.3) ,
+          y = c(21, 29, 29, 21),
+          border = ifelse(any(
+            grepl(pattern = "FAILED", temp.rc.recuperation.rate$Status)
+          ), "red", "black")
+        )
+        polygon(
+          x = c(-0.3,-0.3,0,0) , y = c(21,29,29,21), border = NA, density = 10, angle = 45
+        )
+
+        for (i in 1:nrow(temp.rc.recuperation.rate)) {
+          points(
+            temp.rc.recuperation.rate[i, "Value"],
+            y = 25,
+            pch = i,
+            col = i,
+            cex = 1.3 * cex
+          )
+
+        }
+      }
+
+      ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++##
+      ##polygon for testdose error
+      text(
+        x = -0.35, y = 15, "Testdose Err.", pos = 3, srt = 90, cex = 0.8*cex, offset = 0,
+      )
+
+      polygon(
+        x = c(
+          0,
+          0,
+          as.numeric(as.character(temp.rc.testdose.error$Threshold))[1],
+          as.numeric(as.character(temp.rc.testdose.error$Threshold))[1]
+        ),
+        y = c(11,19,19,11),
+        col = "gray",
+        border = NA
+      )
+      polygon(
+        x = c(-0.3, -0.3, 0.3, 0.3) ,
+        y = c(11, 19, 19, 11),
+        border = ifelse(any(
+          grepl(pattern = "FAILED", temp.rc.testdose.error$Status)
+        ), "red", "black")
+      )
+      polygon(
+        x = c(-0.3,-0.3,0,0) , y = c(11,19,19,11), border = NA, density = 10, angle = 45
+      )
+
+
+      for (i in 1:nrow(temp.rc.testdose.error)) {
+        points(
+          temp.rc.testdose.error[i, "Value"],
+          y = 15,
+          pch = i,
+          col = i,
+          cex = 1.3 * cex
+        )
+      }
+
+      ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++##
+      ##polygon for palaeodose error
+      text(
+        x = -0.35, y = 5, "Palaeodose Err.", pos = 3, srt = 90, cex = 0.8*cex, offset = 0,
+      )
+      polygon(
+        x = c(
+          0,
+          0,
+          as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1],
+          as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1]
+        ),
+        y = c(1,9,9,1),
+        col = "gray",
+        border = NA
+      )
+      polygon(
+        x = c(-0.3, -0.3, 0.3, 0.3) ,
+        y = c(1, 9, 9, 1),
+        border = ifelse(any(
+          grepl(pattern = "FAILED", temp.rc.palaedose.error$Status)
+        ), "red", "black")
+      )
+      polygon(
+        x = c(-0.3,-0.3,0,0) , y = c(1,9,9,1), border = NA, density = 10, angle = 45
+      )
+
+
+      for (i in 1:nrow(temp.rc.palaedose.error)) {
+        points(
+          temp.rc.palaedose.error[i, "Value"],
+          y = 5,
+          pch = i,
+          col = i,
+          cex = 1.3 * cex
+        )
+      }
+    }
+
+
+    if (plot == TRUE && 8 %in% plot.single.sel) {
+      ##graphical represenation of IR-curve
+      temp.IRSL <- suppressWarnings(get_RLum(object, recordType = "IRSL"))
+
+      if(length(temp.IRSL) != 0){
+        plot_RLum.Data.Curve(temp.IRSL, par.local = FALSE)
+
+      }else{
+        plot(1, type="n", axes=F, xlab="", ylab="")
+        text(x = c(1,1), y = c(1, 1), labels = "No IRSL curve detected!")
+
+      }
+
+    }
+
+
+    ##It is doubled in this function, but the par settings need some more careful considerations ...
+    if (exists("par.default")) {
+      par(par.default)
+      rm(par.default)
+    }
+
+
+
+    # Return --------------------------------------------------------------------------------------
+    invisible(temp.results.final)
+
+  }else{
+    warning(paste0(
+      "\n",
+      paste(unlist(error.list), collapse = "\n"),"\n... >> nothing was done here!"
+    ), call. = FALSE)
+    invisible(NULL)
+
+  }
+
+}
diff --git a/R/analyse_SAR.TL.R b/R/analyse_SAR.TL.R
new file mode 100644
index 0000000..c27570b
--- /dev/null
+++ b/R/analyse_SAR.TL.R
@@ -0,0 +1,581 @@
+#' Analyse SAR TL measurements
+#'
+#' The function performs a SAR TL analysis on a
+#' \code{\linkS4class{RLum.Analysis}} object including growth curve fitting.
+#'
+#' This function performs a SAR TL analysis on a set of curves. The SAR
+#' procedure in general is given by Murray and Wintle (2000). For the
+#' calculation of the Lx/Tx value the function \link{calc_TLLxTxRatio} is
+#' used.\cr\cr \bold{Provided rejection criteria}\cr\cr
+#' \sQuote{recyling.ratio}: calculated for every repeated regeneration dose
+#' point.\cr \sQuote{recuperation.rate}: recuperation rate calculated by
+#' comparing the Lx/Tx values of the zero regeneration point with the Ln/Tn
+#' value (the Lx/Tx ratio of the natural signal).  For methodological
+#' background see Aitken and Smith (1988)\cr
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}}(\bold{required}): input
+#' object containing data for analysis
+#'
+#' @param object.background currently not used
+#'
+#' @param signal.integral.min \link{integer} (\bold{required}): requires the
+#' channel number for the lower signal integral bound (e.g.
+#' \code{signal.integral.min = 100})
+#'
+#' @param signal.integral.max \link{integer} (\bold{required}): requires the
+#' channel number for the upper signal integral bound (e.g.
+#' \code{signal.integral.max = 200})
+#'
+#' @param sequence.structure \link{vector} \link{character} (with default):
+#' specifies the general sequence structure. Three steps are allowed (
+#' \code{"PREHEAT"}, \code{"SIGNAL"}, \code{"BACKGROUND"}), in addition a
+#' parameter \code{"EXCLUDE"}. This allows excluding TL curves which are not
+#' relevant for the protocol analysis.  (Note: None TL are removed by default)
+#'
+#' @param rejection.criteria \link{list} (with default): list containing
+#' rejection criteria in percentage for the calculation.
+#'
+#' @param dose.points \code{\link{numeric}} (optional): option set dose points manually
+#'
+#' @param log \link{character} (with default): a character string which
+#' contains "x" if the x axis is to be logarithmic, "y" if the y axis is to be
+#' logarithmic and "xy" or "yx" if both axes are to be logarithmic. See
+#' \link{plot.default}).
+#'
+#' @param \dots further arguments that will be passed to the function
+#' \code{\link{plot_GrowthCurve}}
+#'
+#' @return A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
+#' returned containing the following elements:
+#' \item{De.values}{\link{data.frame} containing De-values and further
+#' parameters} \item{LnLxTnTx.values}{\link{data.frame} of all calculated Lx/Tx
+#' values including signal, background counts and the dose points.}
+#' \item{rejection.criteria}{\link{data.frame} with values that might by used
+#' as rejection criteria. NA is produced if no R0 dose point exists.}\cr\cr
+#' \bold{note:} the output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @note \bold{THIS IS A BETA VERSION}\cr\cr None TL curves will be removed
+#' from the input object without further warning.
+#'
+#' @section Function version: 0.1.5
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\link{calc_TLLxTxRatio}}, \code{\link{plot_GrowthCurve}},
+#' \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
+#' \code{\link{get_RLum}}
+#'
+#' @references Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
+#' after bleaching.  Quaternary Science Reviews 7, 387-393.
+#'
+#' Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an
+#' improved single-aliquot regenerative-dose protocol. Radiation Measurements
+#' 32, 57-73.
+#' @keywords datagen plot
+#' @examples
+#'
+#'
+#' ##load data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##transform the values from the first position in a RLum.Analysis object
+#' object <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos=3)
+#'
+#' ##perform analysis
+#' analyse_SAR.TL(object,
+#'                signal.integral.min = 210,
+#'                signal.integral.max = 220,
+#'                log = "y",
+#'                fit.method = "EXP OR LIN",
+#'                sequence.structure = c("SIGNAL", "BACKGROUND"))
+#'
+#' @export
+analyse_SAR.TL <- function(
+  object,
+  object.background,
+  signal.integral.min,
+  signal.integral.max,
+  sequence.structure = c("PREHEAT", "SIGNAL", "BACKGROUND"),
+  rejection.criteria = list(recycling.ratio = 10, recuperation.rate = 10),
+  dose.points,
+  log = "",
+  ...
+){
+
+  # CONFIG  -----------------------------------------------------------------
+
+  ##set allowed curve types
+  type.curves <- c("TL")
+
+  ##=============================================================================#
+  # General Integrity Checks ---------------------------------------------------
+
+  ##GENERAL
+
+  ##MISSING INPUT
+  if(missing("object")==TRUE){
+    stop("[analyse_SAR.TL] No value set for 'object'!")
+  }
+
+  if(missing("signal.integral.min") == TRUE){
+    stop("[analyse_SAR.TL] No value set for 'signal.integral.min'!")
+  }
+
+  if(missing("signal.integral.max") == TRUE){
+    stop("[analyse_SAR.TL] No value set for 'signal.integral.max'!")
+  }
+
+  ##INPUT OBJECTS
+  if(is(object, "RLum.Analysis") == FALSE){
+    stop("[analyse_SAR.TL] Input object is not of type 'RLum.Analyis'!")
+  }
+
+
+  # Protocol Integrity Checks --------------------------------------------------
+
+  ##Remove non TL-curves from object by selecting TL curves
+  object at records <- get_RLum(object, recordType = type.curves)
+
+  ##ANALYSE SEQUENCE OBJECT STRUCTURE
+
+  ##set vector for sequence structure
+  temp.protocol.step <- rep(sequence.structure,length(object at records))[1:length(object at records)]
+
+  ##grep object strucute
+  temp.sequence.structure <- structure_RLum(object)
+
+  ##set values for step
+  temp.sequence.structure[,"protocol.step"] <- temp.protocol.step
+
+  ##remove TL curves which are excluded
+  temp.sequence.structure <- temp.sequence.structure[which(
+    temp.sequence.structure[,"protocol.step"]!="EXCLUDE"),]
+
+  ##check integrity; signal and bg range should be equal
+  if(length(
+    unique(
+      temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","x.max"]))>1){
+
+    stop(paste(
+      "[analyse_SAR.TL()] Signal range differs. Check sequence structure.\n",
+      temp.sequence.structure
+    ))
+  }
+
+  ##check if the wanted curves are a multiple of the structure
+  if(length(temp.sequence.structure[,"id"])%%length(sequence.structure)!=0){
+
+    stop("[analyse_SAR.TL()] Input TL curves are not a multiple of the sequence structure.")
+
+  }
+
+
+
+  # # Calculate LnLxTnTx values  --------------------------------------------------
+
+  ##grep IDs for signal and background curves
+  TL.preheat.ID <- temp.sequence.structure[
+    temp.sequence.structure[,"protocol.step"] == "PREHEAT","id"]
+
+  TL.signal.ID <- temp.sequence.structure[
+    temp.sequence.structure[,"protocol.step"] == "SIGNAL","id"]
+
+  TL.background.ID <- temp.sequence.structure[
+    temp.sequence.structure[,"protocol.step"] == "BACKGROUND","id"]
+
+
+  ##calculate LxTx values using external function
+
+  for(i in seq(1,length(TL.signal.ID),by=2)){
+
+    temp.LnLxTnTx <- get_RLum(
+      calc_TLLxTxRatio(
+        Lx.data.signal = get_RLum(object, record.id=TL.signal.ID[i]),
+        Lx.data.background = get_RLum(object, record.id=TL.background.ID[i]),
+        Tx.data.signal = get_RLum(object, record.id=TL.signal.ID[i+1]),
+        Tx.data.background = get_RLum(object, record.id = TL.background.ID[i+1]),
+        signal.integral.min,
+        signal.integral.max))
+
+    ##grep dose
+    temp.Dose <- object at records[[TL.signal.ID[i]]]@info$IRR_TIME
+
+
+    temp.LnLxTnTx <- cbind(Dose=temp.Dose, temp.LnLxTnTx)
+
+    if(exists("LnLxTnTx")==FALSE){
+
+      LnLxTnTx <- data.frame(temp.LnLxTnTx)
+
+    }else{
+
+      LnLxTnTx <- rbind(LnLxTnTx,temp.LnLxTnTx)
+
+    }
+  }
+
+  ##set dose.points manual if argument was set
+  if(!missing(dose.points)){
+    temp.Dose <- dose.points
+    LnLxTnTx$Dose <- dose.points
+
+  }
+
+  # Set regeneration points -------------------------------------------------
+
+  #generate unique dose id - this are also the # for the generated points
+  temp.DoseID <- c(0:(length(temp.Dose)-1))
+  temp.DoseName <- paste("R",temp.DoseID,sep="")
+  temp.DoseName <- cbind(Name=temp.DoseName,Dose=temp.Dose)
+
+  ##set natural
+  temp.DoseName[temp.DoseName[,"Name"]=="R0","Name"]<-"Natural"
+
+  ##set R0
+  temp.DoseName[temp.DoseName[,"Name"]!="Natural" & temp.DoseName[,"Dose"]==0,"Name"]<-"R0"
+
+  ##find duplicated doses (including 0 dose - which means the Natural)
+  temp.DoseDuplicated<-duplicated(temp.DoseName[,"Dose"])
+
+  ##combine temp.DoseName
+  temp.DoseName<-cbind(temp.DoseName,Repeated=temp.DoseDuplicated)
+
+  ##correct value for R0 (it is not really repeated)
+  temp.DoseName[temp.DoseName[,"Dose"]==0,"Repeated"]<-FALSE
+
+  ##combine in the data frame
+  temp.LnLxTnTx<-data.frame(Name=temp.DoseName[,"Name"],
+                            Repeated=as.logical(temp.DoseName[,"Repeated"]))
+
+
+  LnLxTnTx<-cbind(temp.LnLxTnTx,LnLxTnTx)
+  LnLxTnTx[,"Name"]<-as.character(LnLxTnTx[,"Name"])
+
+
+  # Calculate Recycling Ratio -----------------------------------------------
+
+  ##Calculate Recycling Ratio
+
+  if(length(LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,"Repeated"])>0){
+
+    ##identify repeated doses
+    temp.Repeated<-LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,c("Name","Dose","LxTx")]
+
+    ##find concering previous dose for the repeated dose
+    temp.Previous<-t(sapply(1:length(temp.Repeated[,1]),function(x){
+      LnLxTnTx[LnLxTnTx[,"Dose"]==temp.Repeated[x,"Dose"] &
+                 LnLxTnTx[,"Repeated"]==FALSE,c("Name","Dose","LxTx")]
+    }))
+
+    ##convert to data.frame
+    temp.Previous<-as.data.frame(temp.Previous)
+
+    ##set column names
+    temp.ColNames<-sapply(1:length(temp.Repeated[,1]),function(x){
+      paste(temp.Repeated[x,"Name"],"/",
+            temp.Previous[temp.Previous[,"Dose"]==temp.Repeated[x,"Dose"],"Name"],
+            sep="")
+    })
+
+    ##Calculate Recycling Ratio
+    RecyclingRatio<-as.numeric(temp.Repeated[,"LxTx"])/as.numeric(temp.Previous[,"LxTx"])
+
+    ##Just transform the matrix and add column names
+    RecyclingRatio<-t(RecyclingRatio)
+    colnames(RecyclingRatio)<-temp.ColNames
+
+  }else{RecyclingRatio<-NA}
+
+
+  # Calculate Recuperation Rate ---------------------------------------------
+
+
+  ##Recuperation Rate
+  if("R0" %in% LnLxTnTx[,"Name"]==TRUE){
+    Recuperation<-round(LnLxTnTx[LnLxTnTx[,"Name"]=="R0","LxTx"]/
+                          LnLxTnTx[LnLxTnTx[,"Name"]=="Natural","LxTx"],digits=4)
+  }else{Recuperation<-NA}
+
+
+  # Combine and Evaluate Rejection Criteria ---------------------------------
+
+  RejectionCriteria <- data.frame(
+    citeria = c(colnames(RecyclingRatio), "recuperation rate"),
+    value = c(RecyclingRatio,Recuperation),
+    threshold = c(
+      rep(paste("+/-", rejection.criteria$recycling.ratio/100)
+          ,length(RecyclingRatio)),
+      paste("", rejection.criteria$recuperation.rate/100)
+    ),
+    status = c(
+
+      if(is.na(RecyclingRatio)==FALSE){
+
+        sapply(1:length(RecyclingRatio), function(x){
+          if(abs(1-RecyclingRatio[x])>(rejection.criteria$recycling.ratio/100)){
+            "FAILED"
+          }else{"OK"}})}else{NA},
+
+      if(is.na(Recuperation)==FALSE &
+           Recuperation>rejection.criteria$recuperation.rate){"FAILED"}else{"OK"}
+
+    ))
+
+  ##============================================================================##
+  ##PLOTTING
+  ##============================================================================##
+
+  # Plotting - Config -------------------------------------------------------
+
+  ##grep plot parameter
+  par.default <- par(no.readonly = TRUE)
+
+  ##colours and double for plotting
+  col <- get("col", pos = .LuminescenceEnv)
+
+  col.doubled <- rep(col, each=2)
+
+  layout(matrix(c(1,1,2,2,
+                  1,1,2,2,
+                  3,3,4,4,
+                  3,3,4,4,
+                  5,5,5,5),5,4,byrow=TRUE))
+
+  par(oma=c(0,0,0,0), mar=c(4,4,3,3))
+
+  ## 1 -> TL Lx
+  ## 2 -> TL Tx
+  ## 3 -> TL Lx Plateau
+  ## 4 -> TL Tx Plateau
+  ## 5 -> Legend
+
+  ##recalculate signal.integral from channels to temperature
+  signal.integral.temperature <- c(object at records[[TL.signal.ID[1]]]@data[signal.integral.min,1] :
+                                     object at records[[TL.signal.ID[1]]]@data[signal.integral.max,1])
+
+
+  ##warning if number of curves exceed colour values
+  if(length(col)<length(TL.signal.ID/2)){
+    cat("\n[analyse_SAR.TL.R] Warning: To many curves! Only the first",
+        length(col),"curves are plotted!")
+  }
+
+
+  # # Plotting TL Lx Curves ----------------------------------------------------
+
+  #open plot area LnLx
+  plot(NA,NA,
+       xlab="Temp. [\u00B0C]",
+       ylab=paste("TL [a.u.]",sep=""),
+       xlim=c(0.1,
+              max(temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","x.max"])),
+       ylim=c(
+         min(temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","y.min"]),
+         max(temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","y.max"])),
+
+       main=expression(paste(L[n],",",L[x]," curves",sep="")),
+       log=log)
+
+
+  ##plot curves
+  sapply(seq(1,length(TL.signal.ID),by=2), function(x){
+
+
+    lines(object at records[[TL.signal.ID[x]]]@data,col=col.doubled[x])
+
+  })
+
+  ##mark integration limits
+  abline(v=min(signal.integral.temperature), lty=2, col="gray")
+  abline(v=max(signal.integral.temperature), lty=2, col="gray")
+
+
+  # Plotting TnTx Curves ----------------------------------------------------
+
+  #open plot area TnTx
+  plot(NA,NA,
+       xlab="Temp. [\u00B0C]",
+       ylab=paste("TL [a.u.]",sep=""),
+       xlim=c(0.1,
+              max(temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","x.max"])),
+       ylim=c(
+         min(temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","y.min"]),
+         max(temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","y.max"])),
+
+       main=expression(paste(T[n],",",T[x]," curves",sep="")),
+       log=log)
+
+
+  ##plot curves
+  sapply(seq(2,length(TL.signal.ID),by=2), function(x){
+
+
+    lines(object at records[[TL.signal.ID[x]]]@data,col=col.doubled[x])
+
+  })
+
+  ##mark integration limits
+  abline(v=min(signal.integral.temperature), lty=2, col="gray")
+  abline(v=max(signal.integral.temperature), lty=2, col="gray")
+
+
+  # Plotting Plateau Test LnLx -------------------------------------------------
+
+  NTL.net.LnLx <- data.frame(object at records[[TL.signal.ID[1]]]@data[,1],
+                             object at records[[TL.signal.ID[1]]]@data[,2]-
+                               object at records[[TL.background.ID[1]]]@data[,2])
+
+  Reg1.net.LnLx <- data.frame(object at records[[TL.signal.ID[3]]]@data[,1],
+                              object at records[[TL.signal.ID[3]]]@data[,2]-
+                                object at records[[TL.background.ID[3]]]@data[,2])
+
+
+  TL.Plateau.LnLx <- data.frame(NTL.net.LnLx[,1], Reg1.net.LnLx[,2]/NTL.net.LnLx[,2])
+
+  ##Plot Plateau Test
+  plot(NA, NA,
+       xlab = "Temp. [\u00B0C]",
+       ylab = "TL [a.u.]",
+       xlim = c(min(signal.integral.temperature)*0.9, max(signal.integral.temperature)*1.1),
+       ylim = c(0, max(NTL.net.LnLx[,2])),
+       main = expression(paste("Plateau test ",L[n],",",L[x]," curves",sep=""))
+  )
+
+
+  ##plot single curves
+  lines(NTL.net.LnLx, col=col[1])
+  lines(Reg1.net.LnLx, col=col[2])
+
+
+  ##plot
+  par(new=TRUE)
+  plot(TL.Plateau.LnLx,
+       axes=FALSE,
+       xlab="",
+       ylab="",
+       ylim=c(0,
+              quantile(TL.Plateau.LnLx[c(signal.integral.min:signal.integral.max),2],
+                       probs = c(0.90), na.rm = TRUE)+3),
+       col="darkgreen")
+  axis(4)
+
+
+  # Plotting Plateau Test TnTx -------------------------------------------------
+
+  ##get NTL signal
+  NTL.net.TnTx <- data.frame(object at records[[TL.signal.ID[2]]]@data[,1],
+                             object at records[[TL.signal.ID[2]]]@data[,2]-
+                               object at records[[TL.background.ID[2]]]@data[,2])
+
+  ##get signal from the first regeneration point
+  Reg1.net.TnTx <- data.frame(object at records[[TL.signal.ID[4]]]@data[,1],
+                              object at records[[TL.signal.ID[4]]]@data[,2]-
+                                object at records[[TL.background.ID[4]]]@data[,2])
+
+
+  ##combine values
+  TL.Plateau.TnTx <- data.frame(NTL.net.TnTx[,1], Reg1.net.TnTx[,2]/NTL.net.TnTx[,2])
+
+  ##Plot Plateau Test
+  plot(NA, NA,
+       xlab = "Temp. [\u00B0C]",
+       ylab = "TL [a.u.]",
+       xlim = c(min(signal.integral.temperature)*0.9, max(signal.integral.temperature)*1.1),
+       ylim = c(0, max(NTL.net.TnTx[,2])),
+       main = expression(paste("plateau Test ",T[n],",",T[x]," curves",sep=""))
+  )
+
+
+  ##plot single curves
+  lines(NTL.net.TnTx, col=col[1])
+  lines(Reg1.net.TnTx, col=col[2])
+
+
+  ##plot
+  par(new=TRUE)
+  plot(TL.Plateau.TnTx,
+       axes=FALSE,
+       xlab="",
+       ylab="",
+       ylim=c(0,
+              quantile(TL.Plateau.TnTx[c(signal.integral.min:signal.integral.max),2],
+                       probs = c(0.90), na.rm = TRUE)+3),
+       col="darkgreen")
+  axis(4)
+
+
+
+
+  # Plotting Legend ----------------------------------------
+
+
+  plot(c(1:(length(TL.signal.ID)/2)),
+       rep(8,length(TL.signal.ID)/2),
+       type = "p",
+       axes=FALSE,
+       xlab="",
+       ylab="",
+       pch=15,
+       col=col[1:length(TL.signal.ID)],
+       cex=2,
+       ylim=c(0,10)
+  )
+
+  ##add text
+  text(c(1:(length(TL.signal.ID)/2)),
+       rep(4,length(TL.signal.ID)/2),
+       paste(LnLxTnTx$Name,"\n(",LnLxTnTx$Dose,")", sep="")
+
+  )
+
+  ##add line
+  abline(h=10,lwd=0.5)
+
+  ##set failed text and mark De as failed
+  if(length(grep("FAILED",RejectionCriteria$status))>0){
+
+    mtext("[FAILED]", col="red")
+
+
+  }
+
+  ##reset par
+  par(par.default)
+  rm(par.default)
+
+  # Plotting  GC  ----------------------------------------
+  temp.sample <- data.frame(Dose=LnLxTnTx$Dose,
+                            LxTx=LnLxTnTx$LxTx,
+                            LxTx.Error=LnLxTnTx$LxTx*0.1,
+                            TnTx=LnLxTnTx$TnTx
+  )
+
+  temp.GC <- get_RLum(plot_GrowthCurve(temp.sample,
+                                               ...))[,c("De","De.Error")]
+
+  ##add recjection status
+  if(length(grep("FAILED",RejectionCriteria$status))>0){
+
+    temp.GC <- data.frame(temp.GC, RC.Status="FAILED")
+
+  }else{
+
+    temp.GC <- data.frame(temp.GC, RC.Status="OK")
+
+  }
+
+  # Return Values -----------------------------------------------------------
+
+  newRLumResults.analyse_SAR.TL <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      data = temp.GC,
+      LnLxTnTx.table = LnLxTnTx,
+      rejection.criteria = RejectionCriteria
+    ),
+    info = list(info = sys.call())
+  )
+
+  return(newRLumResults.analyse_SAR.TL)
+
+}
diff --git a/R/analyse_baSAR.R b/R/analyse_baSAR.R
new file mode 100644
index 0000000..fb08d99
--- /dev/null
+++ b/R/analyse_baSAR.R
@@ -0,0 +1,2463 @@
+#' Bayesian models (baSAR) applied on luminescence data
+#'
+#' This function allows the application of Bayesian models on luminescence data, measured
+#' with the single-aliquot regenerative-dose (SAR, Murray and Wintle, 2000) protocol. In particular,
+#' it follows the idea proposed by Combes et al., 2015 of using an hierarchical model for estimating
+#' a central equivalent dose from a set of luminescence measurements. This function is (I) the adaption
+#' of this approach for the R environment and (II) an extension and a technical refinement of the
+#' published code.\cr
+#'
+#' Internally the function consists of two parts: (I) The Bayesian core for the Bayesian calculations
+#' and applying the hierchical model and (II) a data pre-processing part. The Bayesian core can be run
+#' independently, if the input data are sufficient (see below). The data pre-processing part was
+#' implemented to simplify the analysis for the user as all needed data pre-processing is done
+#' by the function, i.e. in theory it is enough to provide a BIN/BINX-file with the SAR measurement
+#' data. For the Bayesian analysis for each aliquot the following information are needed from the SAR analysis.
+#' LxTx, the LxTx error and the dose values for all regeneration points.
+#'
+#' \bold{How the systematic error contribution is calculated?}\cr
+#'
+#' Standard errors (so far) provided with the source dose rate are considered as systematic uncertainties
+#' and added to final central dose by:
+#'
+#' \deqn{systematic.error = 1/n \sum SE(source.doserate)}
+#'
+#' \deqn{SE(central.dose.final) = \sqrt{SE(central.dose)^2 + systematic.error^2}}
+#'
+#' Please note that this approach is rather rough and can only be valid if the source dose rate
+#' errors, in case different readers had been used, are similar. In cases where more than
+#' one source dose rate is provided a warning is given.\cr
+#'
+#' \bold{Input / output scenarios}\cr
+#'
+#' Various inputs are allowed for this function. Unfortunately this makes the function handling rather
+#' complex, but at the same time very powerful. Available scenarios:\cr
+#'
+#' \bold{(1) - \code{object} is BIN-file or link to a BIN-file}
+#'
+#' Finally it does not matter how the information of the BIN/BINX file are provided. The function
+#' supports (a) either a path to a file or directory or a \code{list} of file names or paths or (b)
+#' a \code{\linkS4class{Risoe.BINfileData}} object or a list of these objects. The latter one can
+#' be produced by using the function \code{\link{read_BIN2R}}, but this function is called automatically
+#' if only a filename and/or a path is provided. In both cases it will become the data that can be
+#' used for the analysis.
+#'
+#' \code{[XLS_file = NULL]}\cr
+#'
+#' If no XLS file (or data frame with the same format) is provided the functions runs an automatic process that
+#' consists of the following steps:
+#'
+#' \itemize{
+#'  \item Select all valid aliquots using the function \code{\link{verify_SingleGrainData}}
+#'  \item Calculate Lx/Tx values using the function \code{\link{calc_OSLLxTxRatio}}
+#'  \item Calculate De values using the function \code{\link{plot_GrowthCurve}}
+#' }
+#'
+#' These proceeded data are subsequently used in for the Bayesian analysis
+#'
+#' \code{[XLS_file != NULL]}\cr
+#'
+#' If an XLS-file is provided or a \code{data.frame} providing similar information the pre-processing
+#' steps consists of the following steps:
+#'
+#' \itemize{
+#'  \item Calculate Lx/Tx values using the function \code{\link{calc_OSLLxTxRatio}}
+#'  \item Calculate De values using the function \code{\link{plot_GrowthCurve}}
+#' }
+#'
+#' Means, the XLS file should contain a selection of the BIN-file names and the aliquots selected
+#' for the further analysis. This allows a manual selection of input data, as the automatic selection
+#' by \code{\link{verify_SingleGrainData}} might be not totally sufficient.\cr
+#'
+#'
+#' \bold{(2) - \code{object} \code{RLum.Results object}}
+#'
+#' If an \code{\linkS4class{RLum.Results}} object is provided as input and(!) this object was
+#' previously created by the function \code{analyse_baSAR()} itself, the pre-processing part
+#' is skipped and the function starts directly the Bayesian analysis. This option is very powerful
+#' as it allows to change parameters for the Bayesian analysis without the need to repeat
+#' the data pre-processing. If furthermore the argument \code{aliquot_range} is set, aliquots
+#' can be manually excluded based on previous runs. \cr
+#'
+#' \bold{\code{method_control}}\cr
+#'
+#' These are arguments that can be passed directly to the Bayesian calculation core, supported arguments
+#' are:
+#'
+#' \tabular{lll}{
+#' \bold{Parameter} \tab \bold{Type} \tab \bold{Descritpion}\cr
+#' \code{lower_centralD} \tab \code{\link{numeric}} \tab sets the lower bound for the expected De range. Change it only if you know what you are doing!\cr
+#' \code{upper_centralD} \tab \code{\link{numeric}} \tab sets the upper bound for the expected De range. Change it only if you know what you are doing!\cr
+#' \code{n.chains} \tab \code{\link{integer}} \tab sets number of parallel chains for the model (default = 3)
+#' (cf. \code{\link[rjags]{jags.model}})\cr
+#' \code{inits} \tab \code{\link{list}} \tab option to set initialisation values (cf. \code{\link[rjags]{jags.model}}) \cr
+#' \code{thin} \tab \code{\link{numeric}} \tab thinning interval for monitoring the Bayesian process (cf. \code{\link[rjags]{jags.model}})\cr
+#' \code{variables.names} \tab \code{\link{character}} \tab set the variables to be monitored during the MCMC run, default:
+#' \code{'central_D'}, \code{'sigma_D'}, \code{'D'}, \code{'Q'}, \code{'a'}, \code{'b'}, \code{'c'}, \code{'g'}.
+#' Note: only variables present in the model can be monitored.
+#' }
+#'
+#' \bold{User defined models}\cr
+#'
+#' The function provides the option to modify and to define own models that can be used for
+#' the Bayesian calculation. In the case the user wants to modify a model, a new model
+#' can be piped into the funtion via the argument \code{baSAR_model} as \code{character}.
+#' The model has to be provided in the JAGS dialect of the BUGS language (cf. \code{\link[rjags]{jags.model}})
+#' and parameter names given with the pre-defined names have to be respected, otherwise the function
+#' will break.\cr
+#'
+#' \bold{FAQ}\cr
+#'
+#' Q: How can I set the seed for the random number generator (RNG)?\cr
+#' A: Use the argument \code{method_control}, e.g., for three MCMC chains
+#' (as it is the default):\cr
+#' \code{method_control = list(
+#' inits = list(
+#'  list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 1),
+#'  list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 2),
+#'  list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 3)
+#' ))}\cr
+#' This sets a reproducible set for every chain separately.\cr
+#'
+#' Q: How can I modify the output plots?\cr
+#' A: You can't, but you can use the function output to create own, modified plots.\cr
+#'
+#' Q: Can I change the boundaries for the central_D?\cr
+#' A: Yes, we made it possible, but we DO NOT recommend it, except you know what you are doing!
+#' Example: \code{method_control = list(lower_centralD = 10))}\cr
+#'
+#' \bold{Additional arguments support via the \code{...} argument }\cr
+#'
+#' This list summarizes the additional arguments that can be passed to the internally used
+#' functions.
+#'
+#' \tabular{llll}{
+#' \bold{Supported argument} \tab \bold{Corresponding function} \tab \bold{Default} \tab \bold{Short description }\cr
+#' \code{threshold} \tab \code{\link{verify_SingleGrainData}} \tab \code{30} \tab change rejection threshold for curve selection \cr
+#' \code{sheet} \tab \code{\link[readxl]{read_excel}} \tab \code{1} \tab select XLS-sheet for import\cr
+#' \code{col_names} \tab \code{\link[readxl]{read_excel}} \tab \code{TRUE} \tab first row in XLS-file is header\cr
+#' \code{col_types} \tab \code{\link[readxl]{read_excel}} \tab \code{NULL} \tab limit import to specific columns\cr
+#' \code{skip} \tab \code{\link[readxl]{read_excel}} \tab \code{0} \tab number of rows to be skipped during import\cr
+#' \code{n.records} \tab \code{\link{read_BIN2R}} \tab \code{NULL} \tab limit records during BIN-file import\cr
+#' \code{duplicated.rm} \tab \code{\link{read_BIN2R}} \tab \code{TRUE} \tab remove duplicated records in the BIN-file\cr
+#' \code{pattern} \tab \code{\link{read_BIN2R}} \tab \code{TRUE} \tab select BIN-file by name pattern\cr
+#' \code{position} \tab \code{\link{read_BIN2R}} \tab \code{NULL} \tab limit import to a specific position\cr
+#' \code{background.count.distribution} \tab \code{\link{calc_OSLLxTxRatio}} \tab \code{"non-poisson"} \tab set assumed count distribution\cr
+#' \code{fit.weights} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables fit weights\cr
+#' \code{fit.bounds} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables fit bounds\cr
+#' \code{NumberIterations.MC} \tab \code{\link{plot_GrowthCurve}} \tab \code{100} \tab number of MC runs for error calculation\cr
+#' \code{output.plot} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables dose response curve plot\cr
+#' \code{output.plotExtended} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables extended dose response curve plot\cr
+#' }
+#'
+#'
+#' @param object \code{\linkS4class{Risoe.BINfileData}} or \code{\linkS4class{RLum.Results}} or
+#' \code{\link{character}} or \code{\link{list}} (\bold{required}):
+#' input object used for the Bayesian analysis. If a \code{character} is provided the function
+#' assumes a file connection and tries to import a BIN-file using the provided path. If a \code{list} is
+#' provided the list can only contain either \code{Risoe.BINfileData} objects or \code{character}s
+#' providing a file connection. Mixing of both types is not allowed. If an \code{\linkS4class{RLum.Results}}
+#' is provided the function directly starts with the Bayesian Analysis (see details)
+#'
+#' @param XLS_file \code{\link{character}} (optional): XLS_file with data for the analysis. This file must contain 3 columns: the name of the file, the disc position and the grain position (the last being 0 for multi-grain measurements)
+#'
+#' @param aliquot_range \code{\link{numeric}} (optional): allows to limit the range of the aliquots
+#' used for the analysis. This argument has only an effect if the argument \code{XLS_file} is used or
+#' the input is the previous output (i.e. is \code{\linkS4class{RLum.Results}}). In this case the
+#' new selection will add the aliquots to the removed aliquots table.
+#'
+#' @param source_doserate \code{\link{numeric}} \bold{(required)}: source dose rate of beta-source used
+#' for the measuremnt and its uncertainty in Gy/s, e.g., \code{source_doserate = c(0.12, 0.04)}.
+#' Paramater can be provided as \code{list}, for the case that more than one BIN-file is provided, e.g.,
+#' \code{source_doserate = list(c(0.04, 0.004), c(0.05, 0.004))}.
+#'
+#' @param signal.integral \code{\link{vector}} (\bold{required}): vector with the
+#' limits for the signal integral used for the calculation, e.g., \code{signal.integral = c(1:5)}
+#' Ignored if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+#' The parameter can be provided as \code{list}, \code{source_doserate}.
+#'
+#' @param signal.integral.Tx \code{\link{vector}} (optional): vector with the
+#' limits for the signal integral for the Tx curve. If nothing is provided the
+#' value from \code{signal.integral} is used and it is ignored
+#' if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+#' The parameter can be provided as \code{list}, see \code{source_doserate}.
+#'
+#' @param background.integral \code{\link{vector}} (\bold{required}): vector with the
+#' bounds for the background integral.
+#' Ignored if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+#' The parameter can be provided as \code{list}, see \code{source_doserate}.
+#'
+#' @param background.integral.Tx \code{\link{vector}} (optional): vector with the
+#' limits for the background integral for the Tx curve. If nothing is provided the
+#' value from \code{background.integral} is used.
+#' Ignored if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+#' The parameter can be provided as \code{list}, see \code{source_doserate}.
+#'
+#' @param sigmab \code{\link{numeric}} (with default): option to set a manual value for
+#' the overdispersion (for LnTx and TnTx), used for the Lx/Tx error
+#' calculation. The value should be provided as absolute squared count values, cf. \code{\link{calc_OSLLxTxRatio}}.
+#' The parameter can be provided as \code{list}, see \code{source_doserate}.
+#'
+#' @param sig0 \code{\link{numeric}} (with default): allow adding an extra component of error
+#' to the final Lx/Tx error value (e.g., instrumental errror, see details is \code{\link{calc_OSLLxTxRatio}}).
+#' The parameter can be provided as \code{list}, see \code{source_doserate}.
+#'
+#' @param distribution \code{\link{character}} (with default): type of distribution that is used during
+#' Bayesian calculations for determining the Central dose and overdispersion values.
+#' Allowed inputs are \code{"cauchy"}, \code{"normal"} and \code{"log_normal"}.
+#'
+#' @param baSAR_model \code{\link{character}} (optional): option to provide an own modified or new model for the
+#' Bayesian calculation (see details). If an own model is provided the argument \code{distribution} is ignored
+#' and set to \code{'user_defined'}
+#'
+#' @param n.MCMC \code{\link{integer}} (with default): number of iterations for the Markov chain Monte Carlo (MCMC)
+#' simulations
+#'
+#' @param fit.method \code{\link{character}} (with default): fit method used for fitting the growth
+#' curve using the function \code{\link{plot_GrowthCurve}}. Here supported methods: \code{EXP},
+#' \code{EXP+LIN} and \code{LIN}
+#'
+#' @param fit.force_through_origin \code{\link{logical}} (with default): force fitting through origin
+#'
+#' @param fit.includingRepeatedRegPoints \code{\link{logical}} (with default):
+#' includes the recycling point (assumed to be measured during the last cycle)
+#'
+#' @param method_control \code{\link{list}} (optional): named list of control parameters that can be directly
+#' passed to the Bayesian analysis, e.g., \code{method_control = list(n.chains = 4)}.
+#' See details for further information
+#'
+#' @param digits \code{\link{integer}} (with default): round output to the number of given digits
+#'
+#' @param plot \code{\link{logical}} (with default): enables or disables plot output
+#'
+#' @param plot_reduced \code{\link{logical}} (with default): enables or disables the advanced plot output
+#'
+#' @param plot.single \code{\link{logical}} (with default): enables or disables single plots or plots
+#' arranged by analyse_baSAR
+#'
+#' @param verbose \code{\link{logical}} (with default): enables or disables verbose mode
+#'
+#' @param ... parameters that can be passed to the function \code{\link{calc_OSLLxTxRatio}} (almost full support)
+#' \code{\link[readxl]{read_excel}} (full support), \code{\link{read_BIN2R}} (\code{n.records},
+#' \code{position}, \code{duplicated.rm}), see details.
+#'
+#'
+#' @return Function returns results numerically and graphically:\cr
+#'
+#' -----------------------------------\cr
+#' [ NUMERICAL OUTPUT ]\cr
+#' -----------------------------------\cr
+#' \bold{\code{RLum.Reuslts}}-object\cr
+#'
+#' \bold{slot:} \bold{\code{@data}}\cr
+#' \tabular{lll}{
+#' \bold{Element} \tab \bold{Type} \tab \bold{Description}\cr
+#'  \code{$summary} \tab \code{data.frame} \tab statistical summary, including the central dose \cr
+#'  \code{$mcmc} \tab \code{mcmc} \tab object including raw output of \code{\link[rjags]{rjags}} \cr
+#'  \code{$models} \tab \code{character} \tab implemented models used in the baSAR-model core \cr
+#'  \code{$input_object} \tab \code{data.frame} \tab summarising table (same format as the XLS-file) including, e.g., Lx/Tx values\cr
+#'  \code{$removed_aliquots} \tab \code{data.frame} \tab table with removed aliquots (e.g., NaN, or Inf Lx/Tx values). If nothing was removed \code{NULL} is returned
+#' }
+#'
+#'\bold{slot:} \bold{\code{@info}}\cr
+#'
+#' The original function call\cr
+#'
+#' ------------------------\cr
+#' [ PLOT OUTPUT ]\cr
+#' ------------------------\cr
+#'
+#' \itemize{
+#'  \item (A) Ln/Tn curves with set integration limits,
+#'  \item (B) trace plots are returned by the baSAR-model, showing the convergence of the parameters (trace)
+#'  and the resulting kernel density plots. If \code{plot_reduced = FALSE} for every(!) dose a trace and
+#'  a density plot is returned (this may take a long time),
+#'  \item (C) dose plots showing the dose for every aliquot as boxplots and the marked
+#'  HPD in within. If boxes are coloured 'orange' or 'red' the aliquot itself should be checked,
+#'  \item (D) the dose response curve resulting from the monitoring of the Bayesian modelling are
+#'  provided along with the Lx/Tx values and the HPD. Note: The amount for curves displayed
+#'  is limited to 1000 (random choice) for performance reasons,
+#'  \item (E) the final plot is the De distribution as calculated using the conventional approach
+#'  and the central dose with the HPDs marked within.
+#'
+#' }
+#'
+#' \bold{Please note: If distribution was set to \code{log_normal} the central dose is given
+#' as geometric mean!}
+#'
+#'
+#' @section Function version: 0.1.25
+#'
+#' @author Norbert Mercier, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Sebastian Kreutzer,
+#' IRAMAT-CRP2A, Universite Bordeaux Montaigne (France) \cr
+#'
+#' The underlying Bayesian model based on a contribution by Combes et al., 2015.
+#'
+#' @seealso \code{\link{read_BIN2R}}, \code{\link{calc_OSLLxTxRatio}}, \code{\link{plot_GrowthCurve}},
+#' \code{\link[readxl]{read_excel}}, \code{\link{verify_SingleGrainData}},
+#' \code{\link[rjags]{jags.model}}, \code{\link[rjags]{coda.samples}}, \code{\link{boxplot.default}}
+#'
+#'
+#' @references
+#'
+#' Combes, B., Philippe, A., Lanos, P., Mercier, N., Tribolo, C., Guerin, G., Guibert, P., Lahaye, C., 2015.
+#' A Bayesian central equivalent dose model for optically stimulated luminescence dating.
+#' Quaternary Geochronology 28, 62-70. doi:10.1016/j.quageo.2015.04.001
+#'
+#' \bold{Further reading}
+#'
+#' Gelman, A., Carlin, J.B., Stern, H.S., Dunson, D.B., Vehtari, A., Rubin, D.B., 2013.
+#' Bayesian Data Analysis, Third Edition. CRC Press.
+#'
+#' Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot
+#' regenerative-dose protocol. Radiation Measurements 32, 57-73. doi:10.1016/S1350-4487(99)00253-X
+#'
+#' @note \bold{If you provide more than one BIN-file}, it is \bold{strongly} recommanded to provide
+#' a \code{list} with the same number of elements for the following parameters:\cr
+#' \code{source_doserate}, \code{signal.integral}, \code{signal.integral.Tx}, \code{background.integral},
+#' \code{background.integral.Tx}, \code{sigmab}, \code{sig0}.\cr
+#'
+#' Example for two BIN-files: \code{source_doserate = list(c(0.04, 0.006), c(0.05, 0.006))}\cr
+#'
+#' \bold{The function is currently limited to work with standard Risoe BIN-files only!}
+#'
+#' @keywords datagen
+#'
+#' @examples
+#'
+#'##(1) load package test data set
+#'data(ExampleData.BINfileData, envir = environment())
+#'
+#'##(2) selecting relevant curves, and limit dataset
+#'CWOSL.SAR.Data <- subset(
+#'  CWOSL.SAR.Data,
+#'  subset = POSITION%in%c(1:3) & LTYPE == "OSL")
+#'
+#'\dontrun{
+#'##(3) run analysis
+#'##please not that the here selected parameters are
+#'##choosen for performance, not for reliability
+#'results <- analyse_baSAR(
+#'  object = CWOSL.SAR.Data,
+#'  source_doserate = c(0.04, 0.001),
+#'  signal.integral = c(1:2),
+#'  background.integral = c(80:100),
+#'  fit.method = "LIN",
+#'  plot = FALSE,
+#'  n.MCMC = 200
+#'
+#')
+#'
+#'print(results)
+#'
+#'
+#' ##XLS_file template
+#' ##copy and paste this the code below in the terminal
+#' ##you can further use the function write.csv() to export the example
+#'
+#' XLS_file <-
+#' structure(
+#' list(
+#'  BIN_FILE = NA_character_,
+#'  DISC = NA_real_,
+#'  GRAIN = NA_real_),
+#'    .Names = c("BIN_FILE", "DISC", "GRAIN"),
+#'    class = "data.frame",
+#'    row.names = 1L
+#' )
+#'
+#' }
+#'
+#' @export
+analyse_baSAR <- function(
+  object,
+  XLS_file = NULL,
+  aliquot_range = NULL,
+  source_doserate = NULL,
+  signal.integral,
+  signal.integral.Tx = NULL,
+  background.integral,
+  background.integral.Tx = NULL,
+  sigmab = 0,
+  sig0 = 0.025,
+  distribution = "cauchy",
+  baSAR_model = NULL,
+  n.MCMC = 100000,
+  fit.method = "EXP",
+  fit.force_through_origin = TRUE,
+  fit.includingRepeatedRegPoints = TRUE,
+  method_control = list(),
+  digits = 3L,
+  plot = TRUE,
+  plot_reduced = TRUE,
+  plot.single = FALSE,
+  verbose = TRUE,
+  ...
+){
+
+  ##////////////////////////////////////////////////////////////////////////////////////////////////
+  ##FUNCTION TO BE CALLED to RUN the Bayesian Model
+  ##////////////////////////////////////////////////////////////////////////////////////////////////
+  ##START
+  .baSAR_function <-
+    function(Nb_aliquots,
+             distribution,
+             data.Dose,
+             data.Lum,
+             data.sLum,
+             fit.method,
+             n.MCMC,
+             fit.force_through_origin,
+             fit.includingRepeatedRegPoints,
+             method_control,
+             baSAR_model,
+             verbose)
+    {
+
+      ##lower and uppder De, grep from method_control ... for sure we find it here,
+      ##as it was set before the function call
+      lower_centralD <- method_control[["lower_centralD"]]
+      upper_centralD <- method_control[["upper_centralD"]]
+
+      ##number of MCMC
+      n.chains <-  if (is.null(method_control[["n.chains"]])) {
+        3
+      } else{
+        method_control[["n.chains"]]
+      }
+
+      ##inits
+      inits <-  if (is.null(method_control[["inits"]])) {
+        NULL
+      } else{
+        method_control[["inits"]]
+      }
+
+      ##thin
+      thin <-  if (is.null(method_control[["thin"]])) {
+        if(n.MCMC >= 1e+05){
+          thin <- n.MCMC/1e+05 * 250
+
+        }else{
+          thin <- 10
+
+        }
+      } else{
+        method_control[["thin"]]
+      }
+
+      ##variable.names
+      variable.names <-  if (is.null(method_control[["variable.names"]])) {
+        c('central_D', 'sigma_D', 'D', 'Q', 'a', 'b', 'c', 'g')
+      } else{
+        method_control[["variable.names"]]
+      }
+
+
+      #check whether this makes sense at all, just a direty and quick test
+      stopifnot(lower_centralD >= 0)
+
+      Limited_cycles <- vector()
+
+      if (fit.method == "EXP") {ExpoGC <- 1 ; LinGC <-  0 }
+      if (fit.method == "LIN") {ExpoGC <- 0 ; LinGC <-  1 }
+      if (fit.method == "EXP+LIN") {ExpoGC <- 1 ; LinGC <-  1 }
+      if (fit.force_through_origin == TRUE) {GC_Origin <- 1} else {GC_Origin <- 0}
+
+      ##Include or exclude repeated dose points
+      if (fit.includingRepeatedRegPoints) {
+        for (i in 1:Nb_aliquots) {
+          Limited_cycles[i] <- length(stats::na.exclude(data.Dose[,i]))
+        }
+
+      }else{
+
+        for (i in 1:Nb_aliquots) {
+
+          temp.logic <- !duplicated(data.Dose[,i], incomparables=c(0))  # logical excluding 0
+
+          m <- length(which(!temp.logic))
+
+          data.Dose[,i] <-  c(data.Dose[,i][temp.logic], rep(NA, m))
+          data.Lum[,i] <-  c(data.Lum[,i][temp.logic], rep(NA, m))
+          data.sLum[,i]  <-  c(data.sLum[,i][temp.logic], rep(NA, m))
+
+          rm(m)
+          rm(temp.logic)
+
+        }
+
+        for (i in 1:Nb_aliquots) {
+          Limited_cycles[i] <- length(data.Dose[, i]) - length(which(is.na(data.Dose[, i])))
+
+        }
+
+      }
+
+      ##check and correct for distribution name
+      if(!is.null(baSAR_model)){
+        if(distribution != "user_defined"){
+          distribution <- "user_defined"
+          warning("[analyse_baSAR()] 'distribution' set to 'user_defined'.", call. = FALSE)
+
+        }
+
+      }
+
+      # Bayesian Models ----------------------------------------------------------------------------
+      baSAR_model <- list(
+
+        cauchy = "model {
+
+            central_D ~  dunif(lower_centralD,upper_centralD)
+
+            precision_D ~ dt(0, pow(0.16*central_D, -2), 1)T(0, )
+            sigma_D <-  1/sqrt(precision_D)
+
+            for (i in 1:Nb_aliquots) {
+              a[i] ~  dnorm(6.5 , 1/(9.2^2) ) T(0, )
+              b[i] ~  dnorm(50 , 1/(1000^2) )  T(0, )
+              c[i] ~  dnorm(1.002 , 1/(0.9^2) ) T(0, )
+              g[i] ~  dnorm(0.5 , 1/(2.5^2) ) I(-a[i], )
+              sigma_f[i]  ~  dexp (20)
+
+              D[i] ~ dt ( central_D , precision_D, 1)    #  Cauchy distribution
+
+              S_y[1,i] <-  1/(sLum[1,i]^2 + sigma_f[i]^2)
+              Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i])
+              Q[1,i]  <-  GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) )
+
+              for (m in 2:Limited_cycles[i]) {
+                S_y[m,i] <-  1/(sLum[m,i]^2 + sigma_f[i]^2)
+                Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] )
+                Q[m,i]  <-  GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) )
+              }
+            }
+          }",
+
+       normal = "model {
+
+            central_D ~  dunif(lower_centralD,upper_centralD)
+
+            sigma_D ~ dunif(0.01, 1 * central_D)
+
+            for (i in 1:Nb_aliquots) {
+              a[i] ~  dnorm(6.5 , 1/(9.2^2) ) T(0, )
+              b[i] ~  dnorm(50 , 1/(1000^2) )  T(0, )
+              c[i] ~  dnorm(1.002 , 1/(0.9^2) ) T(0, )
+              g[i] ~  dnorm(0.5 , 1/(2.5^2) ) I(-a[i], )
+              sigma_f[i]  ~  dexp (20)
+
+              D[i] ~ dnorm ( central_D , 1/(sigma_D^2) )   #   Normal distribution
+
+              S_y[1,i] <-  1/(sLum[1,i]^2 + sigma_f[i]^2)
+              Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i])
+              Q[1,i]  <-  GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) )
+
+              for (m in 2:Limited_cycles[i]) {
+                S_y[m,i] <-  1/(sLum[m,i]^2 + sigma_f[i]^2)
+                Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] )
+                Q[m,i]  <-  GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) )
+              }
+            }
+            }",
+
+       log_normal = "model {
+
+            central_D ~  dunif(lower_centralD,upper_centralD)
+
+            log_central_D <-  log(central_D) - 0.5 * l_sigma_D^2
+            l_sigma_D ~ dunif(0.01, 1 * log(central_D))
+            sigma_D <-  sqrt((exp(l_sigma_D^2) -1) * exp( 2*log_central_D + l_sigma_D^2) )
+
+            for (i in 1:Nb_aliquots) {
+              a[i] ~  dnorm(6.5 , 1/(9.2^2) ) T(0, )
+              b[i] ~  dnorm(50 , 1/(1000^2) )  T(0, )
+              c[i] ~  dnorm(1.002 , 1/(0.9^2) ) T(0, )
+              g[i] ~  dnorm(0.5 , 1/(2.5^2) ) I(-a[i], )
+              sigma_f[i]  ~  dexp (20)
+
+              log_D[i] ~ dnorm ( log_central_D , 1/(l_sigma_D^2) )  #   Log-Normal distribution
+              D[i] <-  exp(log_D[i])
+
+              S_y[1,i] <-  1/(sLum[1,i]^2 + sigma_f[i]^2)
+              Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i])
+              Q[1,i]  <-  GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) )
+
+            for (m in 2:Limited_cycles[i]) {
+                S_y[m,i] <-  1/(sLum[m,i]^2 + sigma_f[i]^2)
+                Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] )
+                Q[m,i]  <-  GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) )
+              }
+            }
+        }",
+
+        user_defined = baSAR_model
+       )
+
+      ##check whether the input for distribution was sufficient
+      if(!any(distribution%in%names(baSAR_model))){
+        stop(paste0("[analyse_baSAR()] No model is pre-defined for the requested distribution. Please select ", paste(rev(names(baSAR_model))[-1], collapse = ", ")), " or define an own model using the argument 'baSAR_model'!")
+
+      }else{
+        if(is.null(baSAR_model)){
+          stop("[analyse_baSAR()] You have specified a 'user_defined' distribution, but you have not provided a model via 'baSAR_model'!")
+
+        }
+
+      }
+
+
+      ### Bayesian inputs
+      data_Liste  <- list(
+        'Dose' = data.Dose,
+        'Lum' = data.Lum,
+        'sLum' = data.sLum,
+        'LinGC' = LinGC,
+        'ExpoGC' = ExpoGC,
+        'GC_Origin' = GC_Origin,
+        'Limited_cycles' = Limited_cycles,
+        'lower_centralD' = lower_centralD,
+        'upper_centralD' = upper_centralD,
+        'Nb_aliquots' = Nb_aliquots
+      )
+
+      if(verbose){
+        cat("\n[analyse_baSAR()] ---- baSAR-model ---- \n")
+        cat("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n")
+        cat("[analyse_baSAR()] Bayesian analysis in progress ... ")
+        message(paste(".. >> bounds set to: lower_centralD =", lower_centralD, "| upper_centralD =", upper_centralD))
+      }
+
+      Nb_Iterations <- n.MCMC
+
+      if (verbose) {
+        message(paste0(
+          ".. >> calculation will be done assuming a '",
+          distribution,
+          "' distribution\n"
+        ))
+      }
+
+      ##set model
+      jagsfit <- rjags::jags.model(
+          file = textConnection(baSAR_model[[distribution]]),
+          data = data_Liste,
+          inits = inits,
+          n.chains = n.chains,
+          n.adapt = Nb_Iterations,
+          quiet = if(verbose){FALSE}else{TRUE}
+       )
+
+
+      ##update jags model (it is a S3-method)
+      update(
+        object = jagsfit,
+        n.iter = Nb_Iterations,
+        progress.bar = if(verbose){"text"}else{NULL}
+        )
+
+      ##get data ... full and reduced, the reduced one to limit the plot output
+      sampling <- rjags::coda.samples(
+        model = jagsfit,
+        variable.names = variable.names,
+        n.iter = Nb_Iterations,
+        thin = thin
+      )
+
+      ##this we need for the output of the terminal
+      ##Why sampling reduced? Because the summary() method produces a considerable overhead while
+      ##running over all the variables
+      sampling_reduced <- rjags::coda.samples(
+        model = jagsfit,
+        variable.names = c('central_D', 'sigma_D'),
+        n.iter = Nb_Iterations,
+        thin = thin
+      )
+
+
+      pt_zero <- 0
+      nb_decal <-  2
+      pt_zero <- Nb_aliquots
+
+      ##standard error and mean
+      output.mean <-
+        round(summary(sampling_reduced)[[1]][c("central_D", "sigma_D"), 1:2], digits)
+
+        ##calculate geometric mean for the case that the distribution is log-normal
+        if(distribution == "log_normal"){
+          temp.vector <- unlist(lapply(sampling_reduced, function(x){as.vector(x[,1])}))
+          gm <- round(exp(sum(log(temp.vector))/length(temp.vector)),digits)
+          rm(temp.vector)
+        }else{
+          gm <- NULL
+
+        }
+
+      ##quantiles
+      ##68% + 95%
+      output.quantiles <-
+        round(summary(sampling_reduced, quantiles = c(0.025, 0.16, 0.84, 0.975))[[2]][c("central_D", "sigma_D"), 1:4], digits)
+
+      #### output data.frame with results
+      baSAR.output <- data.frame(
+        DISTRIBUTION = distribution,
+        NB_ALIQUOTS = Nb_aliquots,
+        N.CHAINS = n.chains,
+        N.MCMC = n.MCMC,
+        FIT_METHOD = fit.method,
+        CENTRAL = if(is.null(gm)){output.mean[1,1]}else{gm},
+        CENTRAL.SD = output.mean[1,2],
+        SIGMA = output.mean[2,1],
+        SIGMA.SD = output.mean[2,2],
+        CENTRAL_Q_.16 = output.quantiles[1,2],
+        CENTRAL_Q_.84 = output.quantiles[1,3],
+        SIGMA_Q_.16 = output.quantiles[2,2],
+        SIGMA_Q_.84 = output.quantiles[2,3],
+        CENTRAL_Q_.025 = output.quantiles[1,1],
+        CENTRAL_Q_.975 = output.quantiles[1,4],
+        SIGMA_Q_.025 = output.quantiles[2,1],
+        SIGMA_Q_.975 = output.quantiles[2,4]
+      )
+
+      return(
+        baSAR.output = list(
+          baSAR.output_summary = baSAR.output,
+          baSAR.output_mcmc = sampling,
+          models = list(
+            cauchy = baSAR_model[["cauchy"]],
+            normal = baSAR_model[["normal"]],
+            log_normal = baSAR_model[["log_normal"]],
+            user_defined = baSAR_model[["user_defined"]]
+          )
+        )
+      )
+
+    }
+  ##END
+  ##////////////////////////////////////////////////////////////////////////////////////////////////
+
+  # Integrity tests -----------------------------------------------------------------------------
+
+  ##check whether rjags is available
+  ##code snippet taken from
+  ##http://r-pkgs.had.co.nz/description.html
+  if (!requireNamespace("rjags", quietly = TRUE)) {
+    stop("[analyse_baSAR()] To use this function you have to first install the package 'rjags'.",
+         call. = FALSE)
+  }
+
+  if (!requireNamespace("coda", quietly = TRUE)) {
+    stop("[analyse_baSAR()] To use this function you have to first install the package 'coda'.",
+         call. = FALSE)
+  }
+
+  #capture additional piped arguments
+  additional_arguments <- list(
+
+    ##verify_SingleGrainData
+    threshold = 30,
+
+    ##calc_OSLLxTxRatio()
+    background.count.distribution = "non-poisson",
+
+    ##readxl::read_excel()
+    sheet = 1,
+    col_names = TRUE,
+    col_types = NULL,
+    skip = 0,
+
+    ##read_BIN2R()
+    n.records = NULL,
+    duplicated.rm = TRUE,
+    position = NULL,
+    pattern = NULL,
+
+    ##plot_GrowthCurve()
+    fit.weights = TRUE,
+    fit.bounds = TRUE,
+    NumberIterations.MC = 100,
+    output.plot = if(plot){TRUE}else{FALSE},
+    output.plotExtended = if(plot){TRUE}else{FALSE}
+
+  )
+
+  #modify this list on purpose
+  additional_arguments <- modifyList(x = additional_arguments,
+                                     val = list(...))
+
+  ##set function arguments
+  function_arguments <- NULL
+
+  ##SET fit.method
+  if (fit.method != "EXP" &
+      fit.method != "EXP+LIN" &
+      fit.method != "LIN"){
+
+    stop("[analyse_baSAR()] Unsupported fitting method. Supported: 'EXP', 'EXP+LIN' and 'LIN'")
+  }
+
+  # Set input -----------------------------------------------------------------------------------
+
+  ##if the input is alreayd of type RLum.Results, use the input and do not run
+  ##all pre-calculations again
+  if(is(object, "RLum.Results")){
+
+    if(object at originator == "analyse_baSAR"){
+
+      ##We want to use previous function arguments and recycle them
+
+        ##(1) get information you need as input from the RLum.Results object
+        function_arguments <- as.list(object at info$call)
+
+        ##(2) overwrite by current provided arguments
+        ##by using a new argument we have the choise which argument is allowed for
+        ##changes
+        function_arguments.new <- modifyList(x = function_arguments, val = as.list(match.call()))
+
+     ##get maximum cycles
+     max_cycles <- max(object$input_object[["CYCLES_NB"]])
+
+     ##set Nb_aliquots
+     Nb_aliquots <- nrow(object$input_object)
+
+     ##return NULL if not a minium of three aliquots are used for the calculation
+     if(Nb_aliquots < 2){
+       try(stop("[analyse_baSAR()] number of aliquots < 3, this makes no sense, NULL returned!", call. = FALSE))
+       return(NULL)
+
+     }
+
+     ##set variables
+     ##Why is.null() ... it prevents that the function crashed is nothing is provided ...
+
+     ##set changeable function arguments
+
+       ##distribution
+       if(!is.null(function_arguments.new$distribution)){
+         distribution <- function_arguments.new$distribution
+       }
+
+       ##n.MCMC
+       if(!is.null(function_arguments.new$n.MCMC)){
+         n.MCMC <- function_arguments.new$n.MCMC
+       }
+
+       ##fit.method
+       if(!is.null(function_arguments.new$fit.method)){
+         fit.method <- function_arguments.new$fit.method
+       }
+
+       ## fit.force_through_origin
+       if(!is.null(function_arguments.new$fit.force_through_origin)){
+          fit.force_through_origin <- function_arguments.new$fit.force_through_origin
+       }
+
+       ##fit.includingRepeatedRegPoints
+       if(!is.null(function_arguments.new$fit.includingRepeatedRegPoints)){
+          fit.includingRepeatedRegPoints <- function_arguments.new$fit.includingRepeatedRegPoints
+       }
+
+       ##source_doserate
+       if(length(as.list(match.call())$source_doserate) > 0){
+         warning("[analyse_baSAR()] Argument 'source_doserate' is ignored in this modus, as it was alreay set.", call. = FALSE)
+
+       }
+
+       ##aliquot_range
+       if(!is.null(function_arguments.new$aliquot_range)){
+         aliquot_range <- eval(function_arguments.new$aliquot_range)
+       }
+
+       ##method_control
+       if(!is.null(function_arguments.new$method_control)){
+         method_control <- eval(function_arguments.new$method_control)
+       }
+
+       ##baSAR_model
+       if(!is.null(function_arguments.new$baSAR_model)){
+         baSAR_model <- eval(function_arguments.new$baSAR_model)
+       }
+
+       ##plot
+       if(!is.null(function_arguments.new$plot)){
+         plot <- function_arguments.new$plot
+       }
+
+       ##verbose
+       if(!is.null(function_arguments.new$verbose)){
+         verbose <- function_arguments.new$verbose
+       }
+
+
+     ##limit according to aliquot_range
+     ##TODO Take car of the case that this was provided, otherwise more and more is removed!
+     if (!is.null(aliquot_range)) {
+       if (max(aliquot_range) <= nrow(object$input_object)) {
+         input_object <- object$input_object[aliquot_range, ]
+
+         ##update list of removed aliquots
+         removed_aliquots <-rbind(object$removed_aliquots, object$input_object[-aliquot_range,])
+
+         ##correct Nb_aliquots
+         Nb_aliquots <- nrow(input_object)
+
+       } else{
+         try(stop("[analyse_basAR()] aliquot_range out of bounds! Input ignored!",
+                  call. = FALSE))
+
+         ##reset aliquot range
+         aliquot_range <- NULL
+
+         ##take entire object
+         input_object <- object$input_object
+
+         ##set removed aliquots
+         removed_aliquots <- object$removed_aliquots
+
+       }
+
+
+     } else{
+       ##set the normal case
+       input_object <- object$input_object
+
+       ##set removed aliquots
+       removed_aliquots <- object$removed_aliquots
+
+
+     }
+
+     ##set non function arguments
+     Doses <- t(input_object[,9:(8 + max_cycles)])
+     LxTx <- t(input_object[,(9 + max_cycles):(8 + 2 * max_cycles)])
+     LxTx.error <-  t(input_object[,(9 + 2 * max_cycles):(8 + 3 * max_cycles)])
+
+     rm(max_cycles)
+
+    }else{
+      stop("[analyse_baSAR()] 'object' is of type 'RLum.Results', but has not been produced by analyse_baSAR()!")
+
+    }
+
+
+  }else{
+
+    if(verbose){
+      cat("\n[analyse_baSAR()] ---- PREPROCESSING ----")
+
+    }
+
+    ##Supported input types are:
+    ##  (1) BIN-file
+    ##      .. list
+    ##      .. character
+    ##  (2) RisoeBINfileData object
+    ##      .. list
+    ##      .. S4
+
+    if (is(object, "Risoe.BINfileData")) {
+      fileBIN.list <- list(object)
+
+    } else if (is(object, "list")) {
+      ##check what the list containes ...
+      object_type <-
+        unique(unlist(lapply(
+          1:length(object),
+          FUN = function(x) {
+            is(object[[x]])[1]
+          }
+        )))
+
+      if (length(object_type)  == 1) {
+        if (object_type == "Risoe.BINfileData") {
+          fileBIN.list <- object
+
+        } else if (object_type == "character") {
+          fileBIN.list <- read_BIN2R(
+            file = object,
+            position = additional_arguments$position,
+            duplicated.rm = additional_arguments$duplicated.rm,
+            n.records = additional_arguments$n.records,
+            pattern = additional_arguments$pattern,
+            verbose = verbose
+          )
+        } else{
+          stop(
+            "[analyse_baSAR()] data type in the input list provided for 'object' is not supported!"
+          )
+        }
+
+      } else{
+        stop("[analyse_baSAR()] 'object' only accepts a list with objects of similar type!")
+      }
+
+    } else if (is(object, "character")) {
+      fileBIN.list <- list(
+        read_BIN2R(
+          file = object,
+          position = additional_arguments$position,
+          duplicated.rm = additional_arguments$duplicated.rm,
+          n.records = additional_arguments$n.records,
+          verbose = verbose
+        )
+      )
+
+    } else{
+      stop(
+        paste0(
+          "[analyse_baSAR()] '",
+          is(object)[1],
+          "' as input is not supported. Check manual for allowed input objects."
+        )
+      )
+    }
+
+    ##Problem ... the user might have made a pre-selection in the Analyst software, if this the
+    ##we respect this selection
+    if(!all(unlist(lapply(fileBIN.list, FUN = function(x){(x at METADATA[["SEL"]])})))){
+
+      fileBIN.list <- lapply(fileBIN.list, function(x){
+
+            ##reduce data
+            x at DATA <- x at DATA[x at METADATA[["SEL"]]]
+            x at METADATA <- x at METADATA[x at METADATA[["SEL"]], ]
+
+            ##reset index
+            x at METADATA[["ID"]] <- 1:nrow(x at METADATA)
+            return(x)
+
+
+      })
+
+      if(verbose){
+        cat("\n[analyse_baSAR()] Record pre-selection in BIN-file detected >> record reduced to selection")
+
+      }
+
+    }
+
+    # Declare variables ---------------------------------------------------------------------------
+    Dose <-  list()
+    LxTx <-  list()
+    sLxTx <-  list()
+
+    Disc <-  list()
+    Grain <- list()
+    Disc_Grain.list <- list()
+
+    Nb_aliquots <-  0
+    previous.Nb_aliquots <- 0
+    object.file_name <- list()
+
+    Mono_grain <-  TRUE
+
+    Limited_cycles <- vector()
+
+    ##set information
+    for (i in 1 : length(fileBIN.list)) {
+      Disc[[i]] <-  list()
+      Grain[[i]] <-  list()
+
+      ##get BIN-file name
+      object.file_name[[i]] <- unique(fileBIN.list[[i]]@METADATA[["FNAME"]])
+
+
+    }
+
+    ##check for duplicated entries; remove them as they would cause a function crash
+    if(any(duplicated(unlist(object.file_name)))){
+
+      ##provide messages
+      if(verbose){
+        message(paste0(
+          "[analyse_baSAR()] '",
+          paste(
+            object.file_name[which(duplicated(unlist(object.file_name)))],
+            collapse = ", ",
+            "' is a duplicate and therefore removed from the input!"
+          )
+        ))
+
+      }
+
+      warning(paste0(
+        "[analyse_baSAR()] '",
+        paste(
+          object.file_name[which(duplicated(unlist(object.file_name)))],
+          collapse = ", ",
+          "' is a duplicate and therefore removed from the input!"
+        )
+      ))
+
+      ##remove entry
+      Disc[which(duplicated(unlist(object.file_name)))] <- NULL
+      Grain[which(duplicated(unlist(object.file_name)))] <- NULL
+      fileBIN.list[which(duplicated(unlist(object.file_name)))] <- NULL
+      object.file_name[which(duplicated(unlist(object.file_name)))] <- NULL
+
+    }
+
+
+  # Expand parameter list -----------------------------------------------------------------------
+
+  ##test_parameter = source_doserate
+  if(!is.null(source_doserate)){
+    if(is(source_doserate, "list")){
+      source_doserate <- rep(source_doserate, length = length(fileBIN.list))
+    }else{
+      source_doserate <- rep(list(source_doserate), length = length(fileBIN.list))
+    }
+  }else{
+    stop("[analyse_baSAR()] 'source_doserate' is missing, but required as the current implementation expects dose values in Gy!")
+
+  }
+
+  ##sigmab
+  if(is(sigmab, "list")){
+    sigmab <- rep(sigmab, length = length(fileBIN.list))
+    }else{
+    sigmab <- rep(list(sigmab), length = length(fileBIN.list))
+    }
+
+  ##sig0
+  if(is(sig0, "list")){
+    sig0 <- rep(sig0, length = length(fileBIN.list))
+  }else{
+    sig0 <- rep(list(sig0), length = length(fileBIN.list))
+  }
+
+
+  ##test_parameter = signal.integral
+  if(is(signal.integral, "list")){
+    signal.integral <- rep(signal.integral, length = length(fileBIN.list))
+  }else{
+    signal.integral <- rep(list(signal.integral), length = length(fileBIN.list))
+  }
+
+
+  ##test_parameter = signal.integral.Tx
+  if (!is.null(signal.integral.Tx)) {
+    if (is(signal.integral.Tx, "list")) {
+      signal.integral.Tx <- rep(signal.integral.Tx, length = length(fileBIN.list))
+    } else{
+      signal.integral.Tx <- rep(list(signal.integral.Tx), length = length(fileBIN.list))
+    }
+  }
+
+  ##test_parameter = background.integral
+  if(is(background.integral, "list")){
+    background.integral <- rep(background.integral, length = length(fileBIN.list))
+  }else{
+    background.integral <- rep(list(background.integral), length = length(fileBIN.list))
+  }
+
+
+  ##test_parameter = background.integral
+  if(is(background.integral, "list")){
+    background.integral <- rep(background.integral, length = length(fileBIN.list))
+  }else{
+    background.integral <- rep(list(background.integral), length = length(fileBIN.list))
+  }
+
+
+
+  ##test_parameter = background.integral.Tx
+  if (!is.null(background.integral.Tx)) {
+    if (is(background.integral.Tx, "list")) {
+      background.integral.Tx <-
+        rep(background.integral.Tx, length = length(fileBIN.list))
+    } else{
+      background.integral.Tx <-
+        rep(list(background.integral.Tx), length = length(fileBIN.list))
+    }
+  }
+
+
+
+  # Read EXCEL sheet ----------------------------------------------------------------------------
+  if(is.null(XLS_file)){
+
+    ##select aliquots giving light only, this function accepts also a list as input
+    if(verbose){
+      cat("\n[analyse_baSAR()] No XLS file provided, running automatic grain selection ...")
+
+    }
+
+
+    for (k in 1:length(fileBIN.list)) {
+
+      ##if the uses provides only multiple grain data (GRAIN == 0), the verification
+      ##here makes not really sense and should be skipped
+      if(length(unique(fileBIN.list[[k]]@METADATA[["GRAIN"]])) > 1){
+        aliquot_selection <-
+          verify_SingleGrainData(
+            object = fileBIN.list[[k]],
+            cleanup_level = "aliquot",
+            threshold = additional_arguments$threshold,
+            cleanup = FALSE
+          )
+
+
+        ##remove grain position 0 (this are usually TL measurements on the cup or we are talking about multipe aliquot)
+        if (sum(aliquot_selection$unique_pairs[["GRAIN"]] == 0, na.rm = TRUE) > 0) {
+          warning(
+            paste(
+              "[analyse_baSAR()] Automatic grain selection:",
+              sum(aliquot_selection$unique_pairs[["GRAIN"]] == 0, na.rm = TRUE),
+              "curve(s) with grain index 0 had been removed from the dataset."
+            ),
+            call. = FALSE
+          )
+        }
+
+        datalu <-
+          aliquot_selection$unique_pairs[!aliquot_selection$unique_pairs[["GRAIN"]] == 0,]
+
+        if(nrow(datalu) == 0){
+
+          try(stop("[analyse_baSAR()] Sorry, nothing was left after the automatic grain selection! NULL returned!", call. = FALSE))
+          return(NULL)
+
+        }
+
+      }else{
+
+          warning("[analyse_baSAR()] Only multiple grain data provided, automatic selection skipped!", call. = FALSE)
+          datalu <- unique(fileBIN.list[[k]]@METADATA[, c("POSITION", "GRAIN")])
+
+          ##set mono grain to FALSE
+          Mono_grain <- FALSE
+          aliquot_selection <- NA
+
+      }
+
+      ##get number of aliquots (one aliquot has a position and a grain number)
+      Nb_aliquots <- nrow(datalu)
+
+      ##write information in variables
+      Disc[[k]] <-  datalu[["POSITION"]]
+      Grain[[k]] <- datalu[["GRAIN"]]
+
+      ##free memory
+      rm(datalu, aliquot_selection)
+    }
+    rm(k)
+
+  } else if (is(XLS_file, "data.frame") || is(XLS_file, "character")) {
+
+    ##load file if we have an XLS file
+    if (is(XLS_file, "character")) {
+      ##test for valid file
+      if(!file.exists(XLS_file)){
+        stop("[analyse_baSAR()] Defined XLS_file does not exists!")
+
+      }
+
+      ##import Excel sheet
+      datalu <- as.data.frame(readxl::read_excel(
+        path = XLS_file,
+        sheet = additional_arguments$sheet,
+        col_names = additional_arguments$col_names,
+        col_types = additional_arguments$col_types,
+        skip = additional_arguments$skip
+      ), stringsAsFactors = FALSE)
+
+      ##get rid of empty rows if the BIN_FILE name column is empty
+      datalu <- datalu[!is.na(datalu[[1]]), ]
+
+
+    } else{
+
+      datalu <- XLS_file
+
+      ##problem: the first column should be of type charcter, the others are
+      ##of type numeric, unfortunately it is too risky to rely on the user, we do the
+      ##proper conversion by ourself ...
+      datalu[[1]] <- as.character(datalu[[1]])
+      datalu[[2]] <- as.numeric(datalu[[2]])
+      datalu[[3]] <- as.numeric(datalu[[3]])
+
+    }
+
+
+    ##limit aliquot range
+    if (!is.null(aliquot_range)) {
+      datalu <- datalu[aliquot_range,]
+
+    }
+
+    Nb_ali <-  0
+    k <- NULL
+
+    for (nn in 1:length((datalu[, 1]))) {
+      if (!is.na(datalu[nn, 1]))  {
+
+        ##check wether one file fits
+        if (any(grepl(
+          pattern = strsplit(
+            x = basename(datalu[nn, 1]),
+            split = ".",
+            fixed = TRUE
+          )[[1]][1],
+          x = unlist(object.file_name)
+        ))) {
+
+          k <- grep(pattern = strsplit(
+            x = basename(datalu[nn, 1]),
+            split = ".",
+            fixed = TRUE
+          )[[1]][1],
+          x = object.file_name)
+
+          nj <-  length(Disc[[k]]) + 1
+
+          Disc[[k]][nj] <-  as.numeric(datalu[nn, 2])
+          Grain[[k]][nj] <-  as.numeric(datalu[nn, 3])
+          Nb_ali <-  Nb_ali + 1
+          if (is.na(Grain[[k]][nj]) || Grain[[k]][nj] == 0) {
+            Mono_grain <- FALSE
+          }
+
+        }else{
+          warning(
+            paste0("[analyse_baSAR] '", (datalu[nn, 1]), "' not recognized or not loaded; skipped!"),
+            call. = FALSE
+          )
+        }
+
+
+      } else{
+
+        if (Nb_ali == 0) {
+          stop("[analyse_baSAR()] Nb. discs/grains  = 0 !")
+        }
+
+        break()
+      }
+    }
+
+    ##if k is NULL it means it was not set so far, so there was
+    ##no corresponding BIN-file found
+    if(is.null(k)){
+      stop("[analyse_baSAR()] BIN-file names in XLS-file do not fit to the loaded BIN-files!")
+
+    }
+
+  } else{
+    stop("[analyse_baSAR()] input type for 'XLS_file' not supported!")
+  }
+
+
+  ###################################### loops on files_number
+  for (k in 1:length(fileBIN.list)) {
+
+    Disc_Grain.list[[k]] <- list()   # data.file number
+    n_aliquots_k <- length((Disc[[k]]))
+
+      if(n_aliquots_k == 0){
+        fileBIN.list[[k]] <- NULL
+        if(verbose){
+          message(paste("[analyse_baSAR()] No data has been seletecd from BIN-file", k, ">> BIN-file removed from input!"))
+        }
+        warning(paste("[analyse_baSAR()] No data has been seletecd from BIN-file", k, ">> BIN-file removed from input!"), call. = FALSE)
+        next()
+      }
+
+    for (d in 1:n_aliquots_k) {
+      dd <-  as.integer(unlist(Disc[[k]][d]))
+      Disc_Grain.list[[k]][[dd]] <- list()  # data.file number ,  disc_number
+    }
+
+    for (d in 1:n_aliquots_k) {
+      dd <-  as.integer(unlist(Disc[[k]][d]))
+      if (Mono_grain == FALSE) {
+        gg <- 1
+      }
+      if (Mono_grain == TRUE)  {
+        gg <- as.integer(unlist(Grain[[k]][d]))}
+
+        Disc_Grain.list[[k]][[dd]][[gg]] <- list()  # data.file number ,  disc_number, grain_number
+        for (z in 1:6) {
+          Disc_Grain.list[[k]][[dd]][[gg]][[z]] <- list()
+          # 1 = index numbers, 2 = irradiation doses,  3 = LxTx , 4 = sLxTx,  5 = N d'aliquot, 6 = De +- D0 +- (4 values)
+        }
+    }
+  }
+
+  if(verbose){
+    cat("\n[analyse_baSAR()] Preliminary analysis in progress ... ")
+    cat("\n[analyse_baSAR()] Hang on, this may take a long time ... \n")
+  }
+
+
+  for (k in 1:length(fileBIN.list)) {
+
+    n_index.vector <- vector("numeric")
+
+    measured_discs.vector <- vector("numeric")
+    measured_grains.vector <- vector("numeric")
+    measured_grains.vector_list <- vector("numeric")
+    irrad_time.vector <- vector("numeric")
+
+    disc_pos <- vector("numeric")
+    grain_pos <- vector("numeric")
+
+    ### METADATA
+    length_BIN <-  length(fileBIN.list[[k]])
+    n_index.vector <- fileBIN.list[[k]]@METADATA[["ID"]][1:length_BIN]              #  curves indexes vector
+
+    measured_discs.vector <-  fileBIN.list[[k]]@METADATA[["POSITION"]][1:length_BIN] # measured discs vector
+    measured_grains.vector <- fileBIN.list[[k]]@METADATA[["GRAIN"]][1:length_BIN]    # measured grains vector
+    irrad_time.vector <- fileBIN.list[[k]]@METADATA[["IRR_TIME"]][1:length_BIN]      # irradiation durations vector
+
+    ##if all irradiation times are 0 we should stop here
+    if (length(unique(irrad_time.vector)) == 1) {
+      try(stop(
+        "[analyse_baSAR()] It appears the the irradiation times are all the same. Analysis stopped an NULL returned!",
+        call. = FALSE
+      ))
+      return(NULL)
+    }
+
+    disc_pos <- as.integer(unlist(Disc[[k]]))
+    grain_pos <- as.integer(unlist(Grain[[k]]))
+
+    ### Automatic Filling - Disc_Grain.list
+    for (i in 1: length(Disc[[k]])) {
+
+      disc_selected <-  as.integer(Disc[[k]][i])
+
+      if (Mono_grain == TRUE) {grain_selected <- as.integer(Grain[[k]][i])} else { grain_selected <-0}
+
+         ##hard break if the disc number or grain number does not fit
+
+         ##disc (position)
+         disc_logic <- (disc_selected == measured_discs.vector)
+
+          if (!any(disc_logic)) {
+            try(stop(
+              paste0(
+                "[analyse_baSAR()] In BIN-file '",
+                unique(fileBIN.list[[k]]@METADATA[["FNAME"]]),
+                "' position number ",
+                disc_selected,
+                " does not exist! NULL returned!"
+              ),
+              call. = FALSE
+            ))
+            return(NULL)
+          }
+
+          ##grain
+          grain_logic <- (grain_selected == measured_grains.vector)
+
+          if (!any(grain_logic)) {
+            try(stop(
+              paste0(
+                "[analyse_baSAR()] In BIN-file '",
+                unique(fileBIN.list[[k]]@METADATA[["FNAME"]]),
+                "' grain number ",
+                grain_selected,
+                " does not exist! NULL returned!"
+              ),
+              call. = FALSE
+            ))
+            return(NULL)
+          }
+
+          ##if the test passed, compile index list
+          index_liste <- n_index.vector[disc_logic & grain_logic]
+
+      if (Mono_grain == FALSE)  { grain_selected <-1}
+
+          for (kn in 1: length(index_liste)) {
+
+              t <- index_liste[kn]
+
+              ##check if the source_doserate is NULL or not
+              if(!is.null(unlist(source_doserate))){
+                dose.value <-  irrad_time.vector[t] * unlist(source_doserate[[k]][1])
+
+              }else{
+                dose.value <-  irrad_time.vector[t]
+
+              }
+
+              s <- 1 + length( Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]] )
+              Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][s] <- n_index.vector[t]  # indexes
+              if ( s%%2 == 1) { Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]][as.integer(1+s/2)] <- dose.value  }      # irradiation doses
+
+
+          }
+    }
+
+
+  }
+
+
+  ######################  Data associated with a single Disc/Grain
+  max_cycles <-  0
+  count <- 1
+  calc_OSLLxTxRatio_warning <- list()
+
+  for (k in 1:length(fileBIN.list)) {
+
+    if (Mono_grain == TRUE) (max.grains <- 100) else (max.grains <- 1)
+
+
+    ##plot Ln and Tn curves if wanted
+    ##we want to plot the Ln and Tn curves to get a better feeling
+    ##The approach here is rather rough coded, but it works
+    if (plot) {
+      curve_index <- vapply((1:length(Disc[[k]])), function(i) {
+        disc_selected <-  as.integer(Disc[[k]][i])
+        if (Mono_grain == TRUE) {
+          grain_selected <- as.integer(Grain[[k]][i])
+        } else {
+          grain_selected <- 1
+        }
+
+        Ln_index <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][1])
+        Tn_index <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2])
+
+        return(c(Ln_index, Tn_index))
+      }, FUN.VALUE = vector(mode = "numeric", length = 2))
+
+
+      ##set matrix for Ln values
+      Ln_matrix <- cbind(1:length(fileBIN.list[[k]]@DATA[[curve_index[1, 1]]]),
+                         matrix(unlist(fileBIN.list[[k]]@DATA[curve_index[1, ]]), ncol = ncol(curve_index)))
+
+      Tn_matrix <- cbind(1:length(fileBIN.list[[k]]@DATA[[curve_index[2, 1]]]),
+                         matrix(unlist(fileBIN.list[[k]]@DATA[curve_index[2, ]]), ncol = ncol(curve_index)))
+
+      ##open plot are
+      if(!plot.single){
+        par.default <- par()$mfrow
+        par(mfrow = c(1, 2))
+
+      }
+
+      ##get natural curve and combine them in matrix
+      graphics::matplot(
+        x = Ln_matrix[, 1],
+        y = Ln_matrix[, -1],
+        col = rgb(0, 0, 0, 0.3),
+        ylab = "Luminescence [a.u.]",
+        xlab = "Channel",
+        main = expression(paste(L[n], " - curves")),
+        type = "l"
+
+      )
+
+      ##add integration limits
+      abline(v = range(signal.integral[[k]]), lty = 2, col = "green")
+      abline(v = range(background.integral[[k]]), lty = 2, col = "red")
+      mtext(paste0("ALQ: ",count, ":", count + ncol(curve_index)))
+
+      graphics::matplot(
+        x = Tn_matrix[, 1],
+        y = Tn_matrix[, -1],
+        col = rgb(0, 0, 0, 0.3),
+        ylab = "Luminescence [a.u.]",
+        xlab = "Channel",
+        main = expression(paste(T[n], " - curves")),
+        type = "l"
+
+      )
+
+      ##add integration limits depending on the choosen value
+      if(is.null(signal.integral.Tx[[k]])){
+        abline(v = range(signal.integral[[k]]), lty = 2, col = "green")
+
+      }else{
+        abline(v = range(signal.integral.Tx[[k]]), lty = 2, col = "green")
+
+      }
+
+      if(is.null(background.integral.Tx[[k]])){
+        abline(v = range(background.integral[[k]]), lty = 2, col = "green")
+
+      }else{
+        abline(v = range(background.integral.Tx[[k]]), lty = 2, col = "red")
+
+      }
+
+      mtext(paste0("ALQ: ",count, ":", count + ncol(curve_index)))
+
+
+      ##reset par
+      if(!plot.single){
+        par(mfrow = par.default)
+
+      }
+
+      ##remove some variables
+      rm(curve_index, Ln_matrix, Tn_matrix)
+
+    }
+
+
+    for (i in 1:length(Disc[[k]])) {
+
+      disc_selected <-  as.integer(Disc[[k]][i])
+      if (Mono_grain == TRUE) {
+        grain_selected <- as.integer(Grain[[k]][i])
+      } else {
+        grain_selected <- 1
+      }
+
+      # Data for the selected Disc-Grain
+      for (nb_index in 1:((length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]]))/2 )) {
+
+        index1 <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2*nb_index-1])
+        index2 <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2*nb_index])
+        Lx.data <- data.frame(seq(1:length( fileBIN.list[[k]]@DATA[[index1]])), fileBIN.list[[k]]@DATA[[index1]])
+        Tx.data <- data.frame(seq(1:length( fileBIN.list[[k]]@DATA[[index2]])), fileBIN.list[[k]]@DATA[[index2]])
+
+        ## call calc_OSLLxTxRatio()
+        ## we run this function with a warnings catcher to reduce the load of warnings for the user
+        temp_LxTx <- withCallingHandlers(
+          calc_OSLLxTxRatio(
+            Lx.data = Lx.data,
+            Tx.data = Tx.data,
+            signal.integral = signal.integral[[k]],
+            signal.integral.Tx = signal.integral.Tx[[k]],
+            background.integral = background.integral[[k]],
+            background.integral.Tx = background.integral.Tx[[k]],
+            background.count.distribution = additional_arguments$background.count.distribution,
+            sigmab = sigmab[[k]],
+            sig0 = sig0[[k]]
+          ),
+          warning = function(c) {
+            calc_OSLLxTxRatio_warning[[i]] <<- c
+            invokeRestart("muffleWarning")
+          }
+        )
+
+        ##get LxTx table
+        LxTx.table <- temp_LxTx$LxTx.table
+
+        Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]][nb_index] <- LxTx.table[[9]]
+        Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]][nb_index] <- LxTx.table[[10]]
+        Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[5]][nb_index] <- LxTx.table[[7]]
+
+        ##free memory
+        rm(LxTx.table)
+        rm(temp_LxTx)
+      }
+
+
+      # Fitting Growth curve and Plot
+      sample_dose <-  unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
+      sample_LxTx <-  unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]])
+      sample_sLxTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]])
+
+      TnTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[5]])
+
+      ##create needed data.frame
+      selected_sample <- data.frame (sample_dose, sample_LxTx, sample_sLxTx, TnTx)
+
+      ##call plot_GrowthCurve() to get De and De value
+      fitcurve <-
+        suppressWarnings(plot_GrowthCurve(
+          sample = selected_sample,
+          na.rm = TRUE,
+          fit.method = fit.method,
+          fit.force_through_origin = fit.force_through_origin,
+          fit.weights = additional_arguments$fit.weights,
+          fit.includingRepeatedRegPoints = fit.includingRepeatedRegPoints,
+          fit.bounds = additional_arguments$fit.bounds,
+          NumberIterations.MC = additional_arguments$NumberIterations.MC,
+          output.plot = additional_arguments$output.plot,
+          output.plotExtended = additional_arguments$output.plotExtended,
+          txtProgressBar = FALSE,
+          verbose = verbose,
+          main = paste0("ALQ: ", count," | POS: ", Disc[[k]][i], " | GRAIN: ", Grain[[k]][i])
+        ))
+
+        ##get data.frame with De values
+        if(!is.null(fitcurve)){
+          fitcurve_De <- get_RLum(fitcurve, data.object = "De")
+
+          Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1] <-
+            fitcurve_De[["De"]]
+          Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][2] <-
+            fitcurve_De[["De.Error"]]
+          Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][3] <-
+            fitcurve_De[["D01"]]
+          Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][4] <-
+            fitcurve_De[["D01.ERROR"]]
+
+        }else{
+          ##we have to do this, otherwise the grains will be sorted out
+          Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1:4] <- NA
+
+        }
+
+        Limited_cycles[previous.Nb_aliquots + i] <-
+          length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
+
+        if (length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) > max_cycles) {
+          max_cycles <-
+            length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
+
+        }
+
+        previous.Nb_aliquots <-
+            length(Limited_cycles) # Total count of aliquots
+
+
+      count <- count + 1
+    }
+
+  }   ##  END of loop on BIN files
+  rm(count)
+
+  ##evaluate warnings from calc_OSLLxTxRatio()
+  if(length(calc_OSLLxTxRatio_warning)>0){
+    w_table <- table(unlist(calc_OSLLxTxRatio_warning))
+    w_table_names <- names(w_table)
+
+    for(w in 1:length(w_table)){
+      warning(paste(w_table_names[w], "This warning occurred", w_table[w], "times!"), call. = FALSE)
+
+    }
+    rm(w_table)
+    rm(w_table_names)
+
+  }
+  rm(calc_OSLLxTxRatio_warning)
+
+
+  Nb_aliquots <- previous.Nb_aliquots
+
+  ##create results matrix
+  OUTPUT_results <-
+    matrix(nrow = Nb_aliquots,
+           ncol = (8 + 3 * max_cycles),
+           byrow = TRUE)
+
+  ## set column name (this makes it much easier to debug)
+  colnames(OUTPUT_results) <- c(
+    "INDEX_BINfile",
+    "DISC",
+    "GRAIN",
+    "DE",
+    "DE.SD",
+    "D0",
+    "D0.SD",
+    "CYCLES_NB",
+    paste0("DOSE_", 1:max_cycles),
+    paste0("LxTx_", 1:max_cycles),
+    paste0("LxTx_", 1:max_cycles, ".SD")
+
+  )
+
+  comptage <- 0
+  for (k in 1:length(fileBIN.list)) {
+
+    for (i in 1:length(Disc[[k]])) {
+
+      disc_selected <-  as.numeric(Disc[[k]][i])
+
+      if (Mono_grain == TRUE) {
+        grain_selected <-
+          as.numeric(Grain[[k]][i])
+      } else {
+        grain_selected <- 1
+      }
+      comptage <- comptage + 1
+
+      OUTPUT_results[comptage, 1] <- k
+
+      OUTPUT_results[comptage, 2] <- as.numeric(disc_selected)
+      if (Mono_grain == TRUE) {
+        OUTPUT_results[comptage, 3] <- grain_selected
+      }
+      else {
+        OUTPUT_results[comptage, 3] <- 0
+      }
+
+     if (length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]]) != 0) {
+
+        ##DE
+        OUTPUT_results[comptage, 4] <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1])
+
+        ##DE.SD
+        OUTPUT_results[comptage, 5] <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][2])
+
+        ##D0
+        OUTPUT_results[comptage, 6] <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][3])
+
+        ##D0.SD
+        OUTPUT_results[comptage, 7] <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][4])
+
+        ##CYCLES_NB
+        OUTPUT_results[comptage, 8] <-
+          length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
+
+          ##auxillary variable
+          llong <-
+            length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
+
+        ##Dose
+        OUTPUT_results[comptage, 9:(8 + llong)] <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]])
+
+        ##LxTx values
+        OUTPUT_results[comptage, (9 + max_cycles):(8 + max_cycles + llong)] <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]])
+
+        ##LxTx SD values
+         OUTPUT_results[comptage, (9 + 2*max_cycles):(8 + 2*max_cycles + llong)] <-
+          as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]])
+
+     }
+
+    }
+  }
+
+
+  ##Clean matrix and remove all unwanted entries
+
+    ##remove all NA columns, means all NA columns in POSITION and DISC
+    ##this NA values are no calculation artefacts, but coming from the data processing and have
+    ##no further value
+    OUTPUT_results <- OUTPUT_results[!is.na(OUTPUT_results[,2]),]
+
+    ##clean up NaN values in the LxTx and corresponding error values
+    ##the transposition of the matrix may increase the performance for very large matricies
+    OUTPUT_results_reduced <- t(OUTPUT_results)
+    selection <- vapply(X = 1:ncol(OUTPUT_results_reduced), FUN = function(x){
+        !any(is.nan(OUTPUT_results_reduced[9:(8+3*max_cycles), x]) | is.infinite(OUTPUT_results_reduced[9:(8+3*max_cycles), x]))
+
+    }, FUN.VALUE = vector(mode = "logical", length = 1))
+
+    removed_aliquots <- t(OUTPUT_results_reduced[,!selection])
+    OUTPUT_results_reduced <- t(OUTPUT_results_reduced[,selection])
+
+
+    ##finally, check for difference in the number of dose points ... they should be the same
+    if(length(unique(OUTPUT_results_reduced[,"CYCLES_NB"])) > 1){
+       warning("[analyse_baSAR()] The number of dose points differs across your data set. Check your data!", call. = FALSE)
+
+    }
+
+  ##correct number of aliquots if necessary
+  if(Nb_aliquots > nrow(OUTPUT_results_reduced)) {
+    Nb_aliquots <- nrow(OUTPUT_results_reduced)
+    warning(
+      paste0(
+        "[analyse_baSAR()] 'Nb_aliquots' corrected due to NaN or Inf values in Lx and/or Tx to ", Nb_aliquots, ". You might want to check 'removed_aliquots' in the function output."), call. = FALSE)
+
+  }
+
+  ##Prepare for Bayesian analysis
+  Doses <- t(OUTPUT_results_reduced[,9:(8 + max_cycles)])
+  LxTx <- t(OUTPUT_results_reduced[, (9 + max_cycles):(8 + 2 * max_cycles)])
+  LxTx.error <- t(OUTPUT_results_reduced[, (9 + 2 * max_cycles):(8 + 3 * max_cycles)])
+
+  ##prepare data frame for output that can used as input
+  input_object <- data.frame(
+    BIN_FILE = unlist(object.file_name)[OUTPUT_results_reduced[[1]]],
+    OUTPUT_results_reduced[, -1],
+    stringsAsFactors = FALSE
+  )
+
+
+  ##prepare data frame for output that shows rejected aliquots
+  if (length(removed_aliquots) > 0) {
+    removed_aliquots <-
+      as.data.frame(removed_aliquots,  stringsAsFactors = FALSE)
+    removed_aliquots <- cbind(BIN_FILE = unlist(object.file_name)[removed_aliquots[[1]]],
+                              removed_aliquots[, -1])
+
+  }else{
+    removed_aliquots <- NULL
+  }
+
+}
+
+  # Call baSAR-function -------------------------------------------------------------------------
+
+  ##check for the central_D bound settings
+  ##Why do we use 0 and 1000: Combes et al., 2015 wrote
+  ## that "We set the bounds for the prior on the central dose D, Dmin = 0 Gy and
+  ## Dmax = 1000 Gy, to cover the likely range of possible values for D.
+
+
+    ##check if something is set in method control, if not, set it
+    if (is.null(method_control[["upper_centralD"]])) {
+      method_control <- c(method_control, upper_centralD = 1000)
+
+
+    }else{
+      if(distribution == "normal" | distribution == "cauchy" | distribution == "log_normal"){
+        warning("[analyse_baSAR()] You have modified the upper central_D boundary, while applying a predefined model. This is possible but not recommended!", call. = FALSE)
+
+      }
+
+
+    }
+
+    ##we do the same for the lower_centralD, just to have everthing in one place
+    if (is.null(method_control[["lower_centralD"]])) {
+      method_control <- c(method_control, lower_centralD = 0)
+
+    }else{
+      if(distribution == "normal" | distribution == "cauchy" | distribution == "log_normal"){
+        warning("[analyse_baSAR()] You have modified the lower central_D boundary while applying a predefined model. This is possible but not recommended!", call. = FALSE)
+      }
+
+    }
+
+
+    if(min(input_object[["DE"]][input_object[["DE"]] > 0], na.rm = TRUE) < method_control$lower_centralD |
+       max(input_object[["DE"]], na.rm = TRUE) > method_control$upper_centralD){
+
+      warning("[analyse_baSAR()] Your set lower_centralD and/or upper_centralD value seem to do not fit to your input data. This may indicate a wronlgy set 'source_doserate'.", call. = FALSE)
+
+
+    }
+
+  ##>> try here is much better, as the user might run a very long preprocessing and do not
+  ##want to fail here
+  results <-
+    try(.baSAR_function(
+      Nb_aliquots = Nb_aliquots,
+      distribution = distribution,
+      data.Dose = Doses,
+      data.Lum = LxTx,
+      data.sLum = LxTx.error,
+      fit.method = fit.method,
+      n.MCMC = n.MCMC,
+      fit.force_through_origin = fit.force_through_origin,
+      fit.includingRepeatedRegPoints = fit.includingRepeatedRegPoints,
+      method_control = method_control,
+      baSAR_model = baSAR_model,
+      verbose = verbose
+    ))
+
+  ##check whether this became NULL
+  if(!is(results, "try-error")){
+
+    ##how do we add the systematic error?
+    ##(1) source_doserate is a list, not a vector, but the user can
+    ##provide many source dose rates and he can provide only a single vector (no error)
+
+    if(!is.null(unlist(source_doserate)) || !is.null(function_arguments$source_doserate)){
+
+      ##if it comes from the previous call, it is, unfortunately not that simple
+      if(!is.null(function_arguments$source_doserate)){
+        source_doserate <- eval(function_arguments$source_doserate)
+
+        if(!is(source_doserate, "list")){
+          source_doserate <- list(source_doserate)
+
+        }
+
+      }
+
+      systematic_error <- unlist(lapply(source_doserate, function(x){
+        if(length(x) == 2) {
+          x[2]
+        } else{
+          NULL
+        }
+
+        }))
+
+    }else{
+      systematic_error <- 0
+
+
+    }
+
+
+    ##state are warning for very different errors
+    if(mean(systematic_error) != systematic_error[1]){
+      warning("[analyse_baSAR()] Provided source dose rate errors differ. The mean was taken, but the calculated
+              systematic error might be not valid!", .call = FALSE)
+
+    }
+
+    ##add to the final de
+    DE_FINAL.ERROR <- sqrt(results[[1]][["CENTRAL.SD"]]^2 + mean(systematic_error)^2)
+
+    ##consider the case that we get NA and this might be confusing
+    if(is.na(DE_FINAL.ERROR)){
+      DE_FINAL.ERROR <- results[[1]][["CENTRAL.SD"]]
+
+    }
+
+
+
+    ##combine
+    results[[1]] <- cbind(results[[1]], DE_FINAL = results[[1]][["CENTRAL"]], DE_FINAL.ERROR = DE_FINAL.ERROR)
+
+  }else{
+    results <- NULL
+    verbose <- FALSE
+    plot <- FALSE
+
+  }
+
+  # Terminal output -----------------------------------------------------------------------------
+  if(verbose){
+    cat("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n")
+    cat("\n[analyse_baSAR()] ---- RESULTS ---- \n")
+    cat("------------------------------------------------------------------\n")
+    cat(paste0("Used distribution:\t\t", results[[1]][["DISTRIBUTION"]],"\n"))
+    if(!is.null(removed_aliquots)){
+      if(!is.null(aliquot_range)){
+        cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/",
+                   results[[1]][["NB_ALIQUOTS"]] + nrow(removed_aliquots),
+                   " (manually removed: " ,length(aliquot_range),")\n"))
+
+      }else{
+        cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/",
+                   results[[1]][["NB_ALIQUOTS"]] + nrow(removed_aliquots),"\n"))
+
+      }
+
+    }else{
+      cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/", results[[1]][["NB_ALIQUOTS"]],"\n"))
+
+    }
+
+    if(!is.null(baSAR_model)){
+      cat(paste0("Considered fitting method:\t", results[[1]][["FIT_METHOD"]]," (user defined)\n"))
+    }else{
+      cat(paste0("Considered fitting method:\t", results[[1]][["FIT_METHOD"]],"\n"))
+    }
+    cat(paste0("Number of independent chains:\t", results[[1]][["N.CHAINS"]],"\n"))
+    cat(paste0("Number MCMC iterations/chain:\t", results[[1]][["N.MCMC"]],"\n"))
+
+    cat("------------------------------------------------------------------\n")
+    if(distribution == "log_normal"){
+      cat("\t\t\t\tmean*\tsd\tHPD\n")
+
+    }else{
+      cat("\t\t\t\tmean\tsd\tHPD\n")
+
+    }
+
+
+    cat(paste0(">> Central dose:\t\t", results[[1]][["CENTRAL"]],"\t",
+               results[[1]][["CENTRAL.SD"]],"\t",
+               "[", results[[1]][["CENTRAL_Q_.16"]]," ; ", results[[1]][["CENTRAL_Q_.84"]], "]**\t"))
+    cat(paste0("\n\t\t\t\t\t\t[", results[[1]][["CENTRAL_Q_.025"]]," ; ", results[[1]][["CENTRAL_Q_.975"]],"]***"))
+
+    cat(paste0("\n>> sigma_D:\t\t\t", results[[1]][["SIGMA"]],"\t", results[[1]][["SIGMA.SD"]], "\t",
+               "[",results[[1]][["SIGMA_Q_.16"]]," ; ", results[[1]][["SIGMA_Q_.84"]], "]**\t"))
+    cat(paste0("\n\t\t\t\t\t\t[",results[[1]][["SIGMA_Q_.025"]]," ; ", results[[1]][["SIGMA_Q_.975"]], "]***"))
+    cat(paste0("\n>> Final central De:\t\t", results[[1]][["DE_FINAL"]],"\t", round(results[[1]][["DE_FINAL.ERROR"]], digits = digits), "\t",
+               " - \t -"))
+    cat("\n------------------------------------------------------------------\n")
+    cat(
+      paste("(systematic error contribution to final De:",
+            format((1-results[[1]][["CENTRAL.SD"]]/results[[1]][["DE_FINAL.ERROR"]])*100, scientific = TRUE), "%)\n")
+    )
+    if(distribution == "log_normal"){
+     cat("* mean of the central dose is the geometric mean\n")
+    }
+    cat("** 68 % level | *** 95 % level\n")
+
+  }
+
+
+  # Plotting ------------------------------------------------------------------------------------
+  if(plot){
+
+    ##get colours from the package Luminescence
+    col <- get("col", pos = .LuminescenceEnv)
+
+    ##get list of variable names (we need them later)
+    varnames <- coda::varnames(results[[2]])
+
+    ##////////////////////////////////////////////////////////////////////////////////////////////
+    ##TRACE AND DENSITY PLOT
+    ####//////////////////////////////////////////////////////////////////////////////////////////
+    if(plot_reduced){
+      plot_check <- try(plot(results[[2]][,c("central_D","sigma_D"),drop = FALSE]), silent = TRUE)
+
+      ##show error
+      if(is(plot_check, "try-error")){
+        stop("[analyse_baSAR()] Plots for 'central_D' and 'sigma_D' could not be produced. You are probably monitoring the wrong variables!", .call = FALSE)
+
+      }
+
+    }else{
+      try(plot(results[[2]]))
+
+    }
+
+
+
+    ##////////////////////////////////////////////////////////////////////////////////////////////
+    ##TRUE DOSE PLOT AND DECISION MAKER
+    ####//////////////////////////////////////////////////////////////////////////////////////////
+    if (!plot.single) {
+      par(mfrow = c(2, 2))
+    }
+
+    ##get list with D values
+    ##get list out of it
+    plot_matrix <- as.matrix(results[[2]][,grep(x = varnames, pattern = "D[", fixed = TRUE)])
+
+    aliquot_quantiles <- t(matrixStats::colQuantiles(x = plot_matrix, probs = c(0.25,0.75)))
+
+    ##define boxplot colours ... we have red and orange
+    box.col <- vapply(1:ncol(aliquot_quantiles), function(x){
+      if(aliquot_quantiles[2,x] < results[[1]][,c("CENTRAL_Q_.025")] |
+         aliquot_quantiles[1,x] > results[[1]][,c("CENTRAL_Q_.975")]
+      ){
+        col[2]
+      }else if(aliquot_quantiles[2,x] < results[[1]][,c("CENTRAL_Q_.16")] |
+               aliquot_quantiles[1,x] > results[[1]][,c("CENTRAL_Q_.84")]){
+
+        "orange"
+      }else{
+        "white"
+      }
+
+    }, FUN.VALUE = vector(mode = "character", length = 1))
+
+    ##to assure a minium of quality not more then 15 boxes a plotted in each plot
+    i <- 1
+
+    while(i < ncol(plot_matrix)){
+
+      step <- if((i + 14) > ncol(plot_matrix)){ncol(plot_matrix)}else{i + 14}
+
+      plot_check <- try(boxplot(
+        x = plot_matrix[,i:step],
+        use.cols = TRUE,
+        horizontal = TRUE,
+        outline = TRUE,
+        col = box.col[i:step],
+        xlab = if(is.null(unlist(source_doserate))){"Dose [s]"}else{"Dose [Gy]"},
+        ylab = "Aliquot index",
+        yaxt = "n",
+        xlim = c(1,19),
+        main = paste0("Individual Doses | ALQ: ", i,":",step)
+      ))
+
+      if(!is(plot_check, "try-error")){
+      if(step == ncol(plot_matrix)){
+        axis(side = 2, at = 1:15, labels = as.character(c(i:step, rep(" ", length = 15 - length(i:step)))),
+             cex.axis = 0.8
+        )
+
+      }else{
+        axis(side = 2, at = 1:15, labels = as.character(i:step), cex.axis = 0.8)
+      }
+
+      ##add HPD with text
+      ##HPD - 68%
+      lines(
+        x = c(
+          results[[1]][, c("CENTRAL_Q_.16")], results[[1]][, c("CENTRAL_Q_.16")],
+          results[[1]][, c("CENTRAL_Q_.84")], results[[1]][, c("CENTRAL_Q_.84")]),
+        y = c(par()$usr[3], 16, 16, par()$usr[3]),
+        lty = 3,
+        col = col[3],
+        lwd = 1.5
+      )
+      text(
+        x = results[[1]][, c("CENTRAL")],
+        y = 16,
+        labels = "68 %",
+        pos = 3,
+        col = col[3],
+        cex = 0.9 * par()$cex
+      )
+
+      ##HPD - 98 %%
+      lines(
+        x = c(
+          results[[1]][, c("CENTRAL_Q_.025")], results[[1]][, c("CENTRAL_Q_.025")],
+          results[[1]][, c("CENTRAL_Q_.975")], results[[1]][, c("CENTRAL_Q_.975")]),
+        y = c(par()$usr[3], 17.5, 17.5, par()$usr[3]),
+        lty = 3,
+        col = col[2],
+        lwd = 1.5
+      )
+
+      text(
+        x = results[[1]][, c("CENTRAL")],
+        y = 17.5,
+        labels = "95 %",
+        pos = 3,
+        col = col[2],
+        cex = 0.9 * par()$cex)
+
+      }
+      ##update counter
+      i <- i + 15
+
+
+    }
+    rm(plot_matrix)
+
+    if(!plot.single){
+      par(mfrow = c(1,2))
+      on.exit(par(mfrow = c(1,1), bg = "white", xpd = FALSE))
+    }
+    ##////////////////////////////////////////////////////////////////////////////////////////////
+    ##DOSE RESPONSE CURVES AND Lx/Tx VALUES
+    ####//////////////////////////////////////////////////////////////////////////////////////////
+
+      ##define selection vector
+      selection <- c("a[", "b[", "c[", "g[", "Q[1,")
+
+      ##get list out of it
+      list_selection <- lapply(X = selection, FUN = function(x){
+        unlist(results[[2]][,grep(x = varnames, pattern = x, fixed = TRUE)])
+
+      })
+
+      ##create matrix
+      plot_matrix <- t(do.call(what = "cbind", args = list_selection))
+
+      ##free memory
+      rm(list_selection)
+
+
+      ##make selection according to the model for the curve plotting
+      if (fit.method == "EXP") {ExpoGC <- 1 ; LinGC <-  0 }
+      if (fit.method == "LIN") {ExpoGC <- 0 ; LinGC <-  1 }
+      if (fit.method == "EXP+LIN") {ExpoGC <- 1 ; LinGC <-  1 }
+      if (fit.force_through_origin == TRUE) {GC_Origin <- 1} else {GC_Origin <- 0}
+
+      ##add choise for own provided model
+      if(!is.null(baSAR_model)){
+        fit.method_plot <- paste(fit.method, "(user defined)")
+
+      }else{
+        fit.method_plot <- fit.method
+
+      }
+
+       ##open plot area
+        ##for the xlim and ylim we have to identify the proper ranges based on the input
+        xlim <- c(0, max(input_object[,grep(x = colnames(input_object), pattern = "DOSE")], na.rm = TRUE)*1.1)
+        ylim <- c(
+          min(input_object[,grep(x = colnames(input_object), pattern = "LxTx")], na.rm = TRUE),
+          max(input_object[,grep(x = colnames(input_object), pattern = "LxTx")], na.rm = TRUE)*1.1)
+
+        ##check for position of the legend ... we can do better
+        if(results[[1]][["CENTRAL_Q_.975"]] < max(xlim)/2){
+          legend_pos <- "topright"
+
+        }else{
+          legend_pos <- "topleft"
+
+        }
+
+        ##set plot area
+        plot_check <- try(plot(
+          NA,
+          NA,
+          ylim = ylim,
+          xlim = xlim,
+          ylab = expression(paste(L[x] / T[x])),
+          xlab = if(is.null(unlist(source_doserate))){"Dose [s]"}else{"Dose [Gy]"},
+          main = "baSAR Dose Response Curves"
+        ))
+
+
+        if (!is(plot_check, "try-error")) {
+          ##add mtext
+          mtext(side = 3, text = paste("Fit:", fit.method_plot))
+
+          ##check whether we have all data we need (might be not the case of the user
+          ##selects own variables)
+          if (ncol(plot_matrix) != 0) {
+            ##plot individual dose response curves
+            x <- NA
+            for (i in seq(1, ncol(plot_matrix), length.out = 1000)) {
+              curve(
+                GC_Origin * plot_matrix[4, i] + LinGC * (plot_matrix[3, i] * x) +
+                  ExpoGC * (plot_matrix[1, i] * (1 - exp (
+                    -x / plot_matrix[2, i]
+                  ))),
+                add = TRUE,
+                col = rgb(0, 0, 0, .1)
+              )
+
+            }
+          }else{
+            try(stop("[analyse_baSAR()] Wrong 'variable.names' monitored, dose responses curves could not be plotted!", call. = FALSE))
+
+          }
+
+          ##add dose points
+          n.col <-
+            length(input_object[, grep(x = colnames(input_object), pattern = "DOSE")])
+
+          ##add rug with natural Lx/Tx
+          rug(side = 2, x = input_object[[9 + n.col]])
+
+          ##plot Lx/Tx values .. without errors ... this is enough here
+          for (i in 2:length(input_object[, grep(x = colnames(input_object), pattern = "DOSE")])) {
+            ##add error bars
+            segments(
+              x0 = input_object[[8 + i]],
+              x1 = input_object[[8 + i]],
+              y0 = input_object[[8 + n.col + i]] - input_object[[8 + 2 * n.col + i]],
+              y1 = input_object[[8 + n.col + i]] + input_object[[8 + 2 * n.col + i]],
+              col = "grey"
+            )
+
+            ##add points in the top of it
+            points(
+              x = input_object[[8 + i]],
+              y = input_object[[8 + n.col + i]],
+              pch = 21,
+              col = col[11],
+              bg = "grey"
+            )
+          }
+
+          ##add ablines
+          abline(
+            v = results[[1]][, c("CENTRAL_Q_.16", "CENTRAL_Q_.84")],
+            lty = 3,
+            col = col[3],
+            lwd = 1.2
+          )
+          abline(v = results[[1]][, c("CENTRAL_Q_.025", "CENTRAL_Q_.975")], lty = 2, col = col[2])
+
+          ##add legend1
+          legend(
+            legend_pos,
+            bty = "n",
+            horiz = FALSE,
+            lty = c(3, 2),
+            col = c(col[3], col[2]),
+            legend = c("HPD - 68 %", "HPD - 95 %")
+          )
+
+          ##add legend2
+          legend(
+            "bottomright",
+            bty = "n",
+            horiz = FALSE,
+            pch = 21,
+            col = col[11],
+            bg = "grey",
+            legend = "measured dose points"
+          )
+
+        }
+      ##remove object, it might be rather big
+      rm(plot_matrix)
+
+      ##03 Abanico Plot
+      plot_check <- plot_AbanicoPlot(
+        data = input_object[, c("DE", "DE.SD")],
+        zlab = if(is.null(unlist(source_doserate))){expression(paste(D[e], " [s]"))}else{expression(paste(D[e], " [Gy]"))},
+        log.z = if (distribution != "log_normal") {
+          FALSE
+        } else{
+          TRUE
+        },
+        z.0 = results[[1]]$CENTRAL,
+        y.axis = FALSE,
+        polygon.col = FALSE,
+        line = results[[1]][,c(
+          "CENTRAL_Q_.16", "CENTRAL_Q_.84", "CENTRAL_Q_.025", "CENTRAL_Q_.975")],
+        line.col = c(col[3], col[3], col[2], col[2]),
+        line.lty = c(3,3,2,2),
+        output = TRUE,
+        mtext = paste0(
+          nrow(input_object) - length(which(is.na(input_object[, c("DE", "DE.SD")]))),
+          "/",
+          nrow(input_object),
+          " plotted (removed are NA values)"
+        )
+      )
+
+      if (!is.null(plot_check)) {
+        legend(
+          "topleft",
+          legend = c("Central dose", "HPD - 68%", "HPD - 95 %"),
+          lty = c(2, 3, 2),
+          col = c("black", col[3], col[2]),
+          bty = "n",
+          cex = par()$cex * 0.8
+        )
+
+      }
+
+      ##In case the Abanico plot will not work because of negative values
+      ##provide a KDE
+      if(is.null(plot_check)){
+        plot_check <- try(suppressWarnings(plot_KDE(
+          data = input_object[, c("DE", "DE.SD")],
+          xlab = if(is.null(unlist(source_doserate))){expression(paste(D[e], " [s]"))}else{expression(paste(D[e], " [Gy]"))},
+          mtext =   paste0(
+            nrow(input_object) - length(which(is.na(input_object[, c("DE", "DE.SD")]))),
+            "/",
+            nrow(input_object),
+            " (removed are NA values)"
+          )
+        )))
+
+        if(!is(plot_check, "try-error")) {
+          abline(v = results[[1]]$CENTRAL, lty = 2)
+          abline(
+            v = results[[1]][, c("CENTRAL_Q_.16", "CENTRAL_Q_.84")],
+            lty = 3,
+            col = col[3],
+            lwd = 1.2
+          )
+          abline(v = results[[1]][, c("CENTRAL_Q_.025", "CENTRAL_Q_.975")], lty = 2, col = col[2])
+
+          ##check for position of the legend
+          if(results[[1]][["CENTRAL_Q_.975"]] < max(xlim)/2){
+            legend_pos <- "right"
+
+          }else{
+            legend_pos <- "topleft"
+
+          }
+
+          legend(
+            legend_pos,
+            legend = c("Central dose", "HPD - 68%", "HPD - 95 %"),
+            lty = c(2, 3, 2),
+            col = c("black", col[3], col[2]),
+            bty = "n",
+            cex = par()$cex * 0.8
+
+          )
+
+        }
+
+      }
+  }
+
+  # Return --------------------------------------------------------------------------------------
+  return(set_RLum(
+    class = "RLum.Results",
+    data = list(
+      summary = results[[1]],
+      mcmc = results[[2]],
+      models = results[[3]],
+      input_object = input_object,
+      removed_aliquots = removed_aliquots
+      ),
+    info = list(call = sys.call())
+  ))
+
+}
diff --git a/R/analyse_pIRIRSequence.R b/R/analyse_pIRIRSequence.R
new file mode 100644
index 0000000..2f82b9d
--- /dev/null
+++ b/R/analyse_pIRIRSequence.R
@@ -0,0 +1,838 @@
+#' Analyse post-IR IRSL sequences
+#'
+#' The function performs an analysis of post-IR IRSL sequences including curve
+#' fitting on \code{\linkS4class{RLum.Analysis}} objects.
+#'
+#'
+#' To allow post-IR IRSL protocol (Thomsen et al., 2008) measurement analyses
+#' this function has been written as extended wrapper function for the function
+#' \code{\link{analyse_SAR.CWOSL}}, facilitating an entire sequence analysis in
+#' one run. With this, its functionality is strictly limited by the
+#' functionality of the function \code{\link{analyse_SAR.CWOSL}}.\cr
+#'
+#' \bold{If the input is a \code{list}}\cr
+#'
+#' If the input is a list of RLum.Analysis-objects, every argument can be provided as list to allow
+#' for different sets of parameters for every single input element.
+#' For further information see \code{\link{analyse_SAR.CWOSL}}.
+#'
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} (\bold{required}) or \code{\link{list}} of
+#' \code{\linkS4class{RLum.Analysis}} objects: input object containing data for analysis. If a \code{\link{list}}
+#' is provided the functions tries to iteratre over the list.
+#'
+#' @param signal.integral.min \code{\link{integer}} (\bold{required}): lower
+#' bound of the signal integral. Provide this value as vector for different
+#' integration limits for the different IRSL curves.
+#'
+#' @param signal.integral.max \code{\link{integer}} (\bold{required}): upper
+#' bound of the signal integral. Provide this value as vector for different
+#' integration limits for the different IRSL curves.
+#'
+#' @param background.integral.min \code{\link{integer}} (\bold{required}):
+#' lower bound of the background integral. Provide this value as vector for
+#' different integration limits for the different IRSL curves.
+#'
+#' @param background.integral.max \code{\link{integer}} (\bold{required}):
+#' upper bound of the background integral. Provide this value as vector for
+#' different integration limits for the different IRSL curves.
+#'
+#' @param dose.points \code{\link{numeric}} (optional): a numeric vector
+#' containing the dose points values. Using this argument overwrites dose point
+#' values in the signal curves.
+#'
+#' @param sequence.structure \link{vector} \link{character} (with default):
+#' specifies the general sequence structure. Allowed values are \code{"TL"} and
+#' any \code{"IR"} combination (e.g., \code{"IR50"},\code{"pIRIR225"}).
+#' Additionally a parameter \code{"EXCLUDE"} is allowed to exclude curves from
+#' the analysis (Note: If a preheat without PMT measurement is used, i.e.
+#' preheat as non TL, remove the TL step.)
+#'
+#' @param plot \code{\link{logical}} (with default): enables or disables plot
+#' output.
+#'
+#' @param plot.single \code{\link{logical}} (with default): single plot output
+#' (\code{TRUE/FALSE}) to allow for plotting the results in single plot
+#' windows. Requires \code{plot = TRUE}.
+#'
+#' @param \dots further arguments that will be passed to the function
+#' \code{\link{analyse_SAR.CWOSL}} and \code{\link{plot_GrowthCurve}}
+#'
+#' @return Plots (optional) and an \code{\linkS4class{RLum.Results}} object is
+#' returned containing the following elements:
+#'
+#' \tabular{lll}{
+#' \bold{DATA.OBJECT} \tab \bold{TYPE} \tab \bold{DESCRIPTION} \cr
+#' \code{..$data} : \tab  \code{data.frame} \tab Table with De values \cr
+#' \code{..$LnLxTnTx.table} : \tab \code{data.frame} \tab with the LnLxTnTx values \cr
+#' \code{..$rejection.criteria} : \tab \code{\link{data.frame}} \tab rejection criteria \cr
+#' \code{..$Formula} : \tab \code{\link{list}} \tab Function used for fitting of the dose response curve \cr
+#' \code{..$call} : \tab \code{\link{call}} \tab the original function call
+#' }
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}.
+#'
+#' @note Best graphical output can be achieved by using the function \code{pdf}
+#' with the following options:\cr \code{pdf(file = "...", height = 15, width =
+#' 15)}
+#'
+#' @section Function version: 0.2.2
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{analyse_SAR.CWOSL}}, \code{\link{calc_OSLLxTxRatio}},
+#' \code{\link{plot_GrowthCurve}}, \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}} \code{\link{get_RLum}}
+#'
+#' @references Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz
+#' using an improved single-aliquot regenerative-dose protocol. Radiation
+#' Measurements 32, 57-73. doi:10.1016/S1350-4487(99)00253-X
+#'
+#' Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory
+#' fading rates of various luminescence signals from feldspar-rich sediment
+#' extracts. Radiation Measurements 43, 1474-1486.
+#' doi:10.1016/j.radmeas.2008.06.002
+#'
+#' @keywords datagen plot
+#'
+#' @examples
+#'
+#'
+#' ### NOTE: For this example existing example data are used. These data are non pIRIR data.
+#' ###
+#' ##(1) Compile example data set based on existing example data (SAR quartz measurement)
+#' ##(a) Load example data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##(b) Transform the values from the first position in a RLum.Analysis object
+#' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+#'
+#' ##(c) Grep curves and exclude the last two (one TL and one IRSL)
+#' object <- get_RLum(object, record.id = c(-29,-30))
+#'
+#' ##(d) Define new sequence structure and set new RLum.Analysis object
+#' sequence.structure  <- c(1,2,2,3,4,4)
+#' sequence.structure <- as.vector(sapply(seq(0,length(object)-1,by = 4),
+#'                                        function(x){sequence.structure + x}))
+#'
+#' object <-  sapply(1:length(sequence.structure), function(x){
+#'
+#'   object[[sequence.structure[x]]]
+#'
+#' })
+#'
+#' object <- set_RLum(class = "RLum.Analysis", records = object, protocol = "pIRIR")
+#'
+#' ##(2) Perform pIRIR analysis (for this example with quartz OSL data!)
+#' ## Note: output as single plots to avoid problems with this example
+#' results <- analyse_pIRIRSequence(object,
+#'      signal.integral.min = 1,
+#'      signal.integral.max = 2,
+#'      background.integral.min = 900,
+#'      background.integral.max = 1000,
+#'      fit.method = "EXP",
+#'      sequence.structure = c("TL", "pseudoIRSL1", "pseudoIRSL2"),
+#'      main = "Pseudo pIRIR data set based on quartz OSL",
+#'      plot.single = TRUE)
+#'
+#'
+#' ##(3) Perform pIRIR analysis (for this example with quartz OSL data!)
+#' ## Alternative for PDF output, uncomment and complete for usage
+#' \dontrun{
+#' pdf(file = "...", height = 15, width = 15)
+#'   results <- analyse_pIRIRSequence(object,
+#'          signal.integral.min = 1,
+#'          signal.integral.max = 2,
+#'          background.integral.min = 900,
+#'          background.integral.max = 1000,
+#'          fit.method = "EXP",
+#'          main = "Pseudo pIRIR data set based on quartz OSL")
+#'
+#'   dev.off()
+#' }
+#'
+#' @export
+analyse_pIRIRSequence <- function(
+  object,
+  signal.integral.min,
+  signal.integral.max,
+  background.integral.min,
+  background.integral.max,
+  dose.points = NULL,
+  sequence.structure = c("TL", "IR50", "pIRIR225"),
+  plot = TRUE,
+  plot.single = FALSE,
+  ...
+){
+
+# SELF CALL -----------------------------------------------------------------------------------
+ if(is.list(object)){
+
+    ##make live easy
+    if(missing("signal.integral.min")){
+      signal.integral.min <- 1
+      warning("[analyse_pIRIRSequence()] 'signal.integral.min' missing, set to 1", call. = FALSE)
+    }
+
+    if(missing("signal.integral.max")){
+      signal.integral.max <- 2
+      warning("[analyse_pIRIRSequence()] 'signal.integral.max' missing, set to 2", call. = FALSE)
+    }
+
+
+    ##now we have to extend everything to allow list of arguments ... this is just consequent
+    signal.integral.min <- rep(list(signal.integral.min), length = length(object))
+    signal.integral.max <- rep(list(signal.integral.max), length = length(object))
+    background.integral.min <- rep(list(background.integral.min), length = length(object))
+    background.integral.max <- rep(list(background.integral.max), length = length(object))
+    sequence.structure <- rep(list(sequence.structure), length = length(object))
+
+    if(!is.null(dose.points)){
+
+      if(is(dose.points, "list")){
+        dose.points <- rep(dose.points, length = length(object))
+
+      }else{
+        dose.points <- rep(list(dose.points), length = length(object))
+
+      }
+
+    }else{
+      dose.points <- rep(list(NULL), length(object))
+
+    }
+
+    ##run analysis
+    temp <- lapply(1:length(object), function(x){
+
+      analyse_pIRIRSequence(object[[x]],
+                        signal.integral.min = signal.integral.min[[x]],
+                        signal.integral.max = signal.integral.max[[x]],
+                        background.integral.min = background.integral.min[[x]],
+                        background.integral.max = background.integral.max[[x]] ,
+                        dose.points = dose.points[[x]],
+                        sequence.structure = sequence.structure[[x]],
+                        plot = plot,
+                        plot.single = plot.single,
+                        main = ifelse("main"%in% names(list(...)), list(...)$main, paste0("ALQ #",x)),
+                        ...)
+
+    })
+
+    ##combine everything to one RLum.Results object as this as what was written ... only
+    ##one object
+
+    ##merge results and check if the output became NULL
+    results <- merge_RLum(temp)
+
+    ##DO NOT use invisible here, this will stop the function from stopping
+    if(length(results) == 0){
+      return(NULL)
+
+    }else{
+      return(results)
+
+    }
+
+  }
+
+
+# General Integrity Checks ---------------------------------------------------
+
+  ##GENERAL
+
+    ##MISSING INPUT
+    if(missing("object")==TRUE){
+      stop("[analyse_pIRIRSequence()] No value set for 'object'!")
+    }
+
+    ##INPUT OBJECTS
+    if(is(object, "RLum.Analysis")==FALSE){
+      stop("[analyse_pIRIRSequence()] Input object is not of type 'RLum.Analyis'!")
+    }
+
+    ##CHECK ALLOWED VALUES IN SEQUENCE STRUCTURE
+    temp.collect.invalid.terms <- paste(sequence.structure[
+      (!grepl("TL",sequence.structure)) &
+      (!grepl("IR",sequence.structure)) &
+      (!grepl("EXCLUDE",sequence.structure))],
+      collapse = ", ")
+
+    if(temp.collect.invalid.terms != ""){
+      stop("[analyse_pIRIRSequence()] ",
+        temp.collect.invalid.terms, " not allowed in sequence.strucutre!")
+    }
+
+
+# Deal with extra arguments -------------------------------------------------------------------
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  mtext.outer <- if("mtext.outer" %in% names(extraArgs)) {extraArgs$mtext.outer} else
+  {"MEASUREMENT INFO"}
+
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {""}
+
+  log <- if("log" %in% names(extraArgs)) {extraArgs$log} else
+  {""}
+
+  cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else
+  {.7}
+
+
+# Protocol Integrity Checks --------------------------------------------------
+
+  ##(1) Check structure and remove curves that fit not the recordType criteria
+
+  ##get sequence structure
+  temp.sequence.structure  <- structure_RLum(object)
+
+  ##remove data types that fit not to allow values
+  temp.sequence.rm.id <- temp.sequence.structure[
+    (!grepl("TL",temp.sequence.structure[, "recordType"])) &
+    (!grepl("OSL", temp.sequence.structure[, "recordType"])) &
+    (!grepl("IRSL", temp.sequence.structure[, "recordType"]))
+    ,"id"]
+
+  if(length(temp.sequence.rm.id)>0){
+
+  ##removed record from data set
+  object <- get_RLum(object, record.id = -temp.sequence.rm.id,
+        drop = FALSE
+      )
+
+  ##compile warning message
+  temp.sequence.rm.warning <- paste(
+    temp.sequence.structure[temp.sequence.rm.id, "recordType"], collapse = ", ")
+
+  temp.sequence.rm.warning <- paste(
+    "Record types are unrecognised and have been removed:", temp.sequence.rm.warning)
+
+  warning(temp.sequence.rm.warning)
+  }
+
+  ##(2) Apply user sequence structure
+
+  ##get sequence structure
+  temp.sequence.structure  <- structure_RLum(object)
+
+  ##set values to structure data.frame
+  temp.sequence.structure[, "protocol.step"] <- rep(
+    sequence.structure, nrow(temp.sequence.structure)/2/length(sequence.structure))
+
+  ##remove values that have been excluded
+  temp.sequence.rm.id <- temp.sequence.structure[
+    temp.sequence.structure[,"protocol.step"] == "EXCLUDE" ,"id"]
+
+  if(length(temp.sequence.rm.id)>0){
+
+    ##remove from object
+    object  <- get_RLum(
+      object, record.id = -temp.sequence.rm.id, drop = FALSE)
+
+    ##remove from sequence structure
+    sequence.structure  <- sequence.structure[sequence.structure != "EXCLUDE"]
+
+    ##set new structure
+    temp.sequence.structure  <- structure_RLum(object)
+
+    temp.sequence.structure[, "protocol.step"] <- rep(
+      sequence.structure, nrow(temp.sequence.structure)/2/length(temp.sequence.structure))
+
+    ##print warning message
+    warning(length(temp.sequence.rm.id), " records have been removed due to EXCLUDE!")
+
+  }
+
+##============================================================================##
+# Analyse data and plotting ----------------------------------------------------
+##============================================================================##
+
+  ##(1) find out how many runs are needed for the analysis by checking for "IR"
+  ##    now should by every signal except the TL curves
+  n.TL<- table(grepl("TL", sequence.structure))["TRUE"]
+  if(is.na(n.TL)) {n.TL<- 0}
+  n.loops <- as.numeric(length(grepl("TL", sequence.structure)) - n.TL)
+
+  ##grep ids of TL curves (we need them later on)
+  TL.curves.id <- temp.sequence.structure[
+    temp.sequence.structure[,"protocol.step"] == "TL","id"]
+
+  ##grep ids of all OSL curves (we need them later on)
+  IRSL.curves.id <- temp.sequence.structure[
+    grepl("IR", temp.sequence.structure[,"protocol.step"]),"id"]
+
+  ##grep information on the names of the IR curves, we need them later on
+  pIRIR.curve.names  <- unique(temp.sequence.structure[
+    temp.sequence.structure[IRSL.curves.id,"id"],"protocol.step"])
+
+  ##===========================================================================#
+  ## set graphic layout using the layout option
+  ## unfortunately a little bit more complicated then expected previously due
+  ## the order of the produced plots by the previous functions
+
+  if(plot.single == FALSE & plot == TRUE){
+
+    ##first (Tx,Tn, Lx,Ln)
+    temp.IRSL.layout.vector.first <- c(3,5,6,7,3,5,6,8)
+
+  ##middle (any other Lx,Ln)
+  if(n.loops > 2){
+    temp.IRSL.layout.vector.middle <-
+      vapply(
+        2:(n.loops - 1),
+        FUN = function(x) {
+
+          offset <- 5 * x - 1
+          c((offset):(offset + 3),
+            (offset):(offset + 2), offset + 4)
+
+        },
+        FUN.VALUE = vector(mode = "numeric", length = 8)
+      )
+  }
+
+  ##last (Lx,Ln and legend)
+  temp.IRSL.layout.vector.last <- c(
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 1,
+           max(temp.IRSL.layout.vector.first) + 1),
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 2,
+           max(temp.IRSL.layout.vector.first) + 2),
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 4,
+           max(temp.IRSL.layout.vector.first) + 4),
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 5,
+           max(temp.IRSL.layout.vector.first) + 5),
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 1,
+           max(temp.IRSL.layout.vector.first) + 1),
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 2,
+           max(temp.IRSL.layout.vector.first) + 2),
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 4,
+           max(temp.IRSL.layout.vector.first) + 4),
+    ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 6,
+           max(temp.IRSL.layout.vector.first) + 6))
+
+  ##options for different sets of curves
+  if(n.loops > 2){
+
+    temp.IRSL.layout.vector <- c(temp.IRSL.layout.vector.first,
+                                 temp.IRSL.layout.vector.middle,
+                                 temp.IRSL.layout.vector.last)
+
+  }else{
+
+    temp.IRSL.layout.vector <- c(temp.IRSL.layout.vector.first,
+                                 temp.IRSL.layout.vector.last)
+
+  }
+
+  ##get layout information
+  def.par <- par(no.readonly = TRUE)
+
+  ##set up layout matrix linked to the number of plot areas needed
+  layout.matrix  <- c(
+    rep(c(2,4,1,1),2), #header row with TL curves and info window
+    temp.IRSL.layout.vector, #IRSL curves,
+    rep((max(temp.IRSL.layout.vector)-3),8), #legend,
+    rep((max(temp.IRSL.layout.vector)+1),1), #GC
+    rep((max(temp.IRSL.layout.vector)+2),1), #TnTc
+    rep((max(temp.IRSL.layout.vector)+3),2), #Rejection criteria
+    rep((max(temp.IRSL.layout.vector)+1),1), #GC
+    rep((max(temp.IRSL.layout.vector)+2),1), #TnTc
+    rep((max(temp.IRSL.layout.vector)+3),2)) #Rejection criteria
+
+
+  ##set layout
+  nf <- layout(
+    matrix(layout.matrix,(max(layout.matrix)/2 +
+                            ifelse(n.loops > 2, 0,2)), 4, byrow = TRUE),
+     widths = c(rep(c(1,1,1,.75),6),c(1,1,1,1)),
+     heights = c(rep(c(1),(2+2*n.loops)),c(0.20, 0.20)))
+
+  ## show the regions that have been allocated to each plot for debug
+  #layout.show(nf)
+
+  }
+
+  ##(1) INFO PLOT
+  if (plot) {
+    plot(NA,NA,
+         ylim = c(0,1), xlab = "",
+         xlim = c(0,1), ylab = "",
+         axes = FALSE,
+         main = main)
+
+    text(0.5,0.5, paste(sequence.structure, collapse = "\n"), cex = cex *2)
+  }
+
+
+  ##(2) set loop
+  for(i in 1:n.loops){
+
+    ##compile record ids
+    temp.id.sel <-
+      sort(c(TL.curves.id, IRSL.curves.id[seq(i,length(IRSL.curves.id),by=n.loops)]))
+
+    ##(a) select data set (TL curves has to be considered for the data set)
+    temp.curves <- get_RLum(object, record.id = temp.id.sel, drop = FALSE)
+
+    ##(b) grep integral limits as they might be different for different curves
+    if(length(signal.integral.min)>1){
+
+      temp.signal.integral.min <- signal.integral.min[i]
+      temp.signal.integral.max <- signal.integral.max[i]
+      temp.background.integral.min <- background.integral.min[i]
+      temp.backbround.integral.max <- background.integral.max[i]
+
+    }else{
+
+      temp.signal.integral.min <- signal.integral.min
+      temp.signal.integral.max <- signal.integral.max
+      temp.background.integral.min <- background.integral.min
+      temp.background.integral.max <- background.integral.max
+
+    }
+
+    ##(c) call analysis sequence and plot
+
+    ## call single plots
+    if(i == 1){
+
+      temp.plot.single  <- c(1,2,3,4,6)
+
+    }else if(i == n.loops){
+
+      temp.plot.single  <- c(2,4,5,6)
+
+  }else{
+
+     temp.plot.single  <- c(2,4,6)
+
+  }
+
+    ##start analysis
+    temp.results <- analyse_SAR.CWOSL(
+      temp.curves,
+      signal.integral.min = temp.signal.integral.min,
+      signal.integral.max = temp.signal.integral.max,
+      background.integral.min = temp.background.integral.min,
+      background.integral.max = temp.background.integral.max,
+      plot = plot,
+      dose.points = dose.points,
+      plot.single = temp.plot.single,
+      output.plotExtended.single = TRUE,
+      cex.global = cex,
+      ...
+    ) ##TODO should be replaced be useful explizit arguments
+
+
+      ##check whether NULL was return
+      if (is.null(temp.results)) {
+        warning("[plot_pIRIRSequence()] An error occurred, analysis skipped. Check your sequence!", call. = FALSE)
+        return(NULL)
+      }
+
+      ##add signal information to the protocol step
+      temp.results.pIRIR.De <- as.data.frame(c(
+        get_RLum(temp.results, "data"),
+        data.frame(Signal = pIRIR.curve.names[i])
+      ))
+
+      temp.results.pIRIR.LnLxTnTx <- as.data.frame(c(
+        get_RLum(temp.results, "LnLxTnTx.table"),
+        data.frame(Signal = pIRIR.curve.names[i])
+      ))
+
+      temp.results.pIRIR.rejection.criteria <- as.data.frame(c(
+        get_RLum(temp.results, "rejection.criteria"),
+        data.frame(Signal = pIRIR.curve.names[i])
+      ))
+
+      temp.results.pIRIR.formula <- list(get_RLum(temp.results,
+                                                  "Formula"))
+      names(temp.results.pIRIR.formula)  <- pIRIR.curve.names[i]
+
+      ##create now object
+      temp.results  <- set_RLum(
+        class = "RLum.Results",
+        data = list(
+          data = temp.results.pIRIR.De,
+          LnLxTnTx.table = temp.results.pIRIR.LnLxTnTx,
+          rejection.criteria = temp.results.pIRIR.rejection.criteria,
+          Formula = temp.results.pIRIR.formula
+        ),
+        info = list(
+          call = sys.call()
+        )
+      )
+
+
+      ##merge results
+      if (exists("temp.results.final")) {
+        temp.results.final <- merge_RLum(list(temp.results.final, temp.results))
+
+      } else{
+        temp.results.final <- temp.results
+
+      }
+
+
+  }
+
+
+##============================================================================##
+# Plotting additionals--------------------------------------------------------
+##============================================================================##
+
+if(plot){
+
+  ##plot growth curves
+  plot(NA, NA,
+       xlim = range(get_RLum(temp.results.final, "LnLxTnTx.table")$Dose),
+       ylim = c(
+         if(min(get_RLum(temp.results.final, "LnLxTnTx.table")$LxTx)-
+            max(get_RLum(temp.results.final, "LnLxTnTx.table")$LxTx.Error) < 0){
+           min(get_RLum(temp.results.final, "LnLxTnTx.table")$LxTx)-
+             max(get_RLum(temp.results.final, "LnLxTnTx.table")$LxTx.Error)
+         }else{0},
+         max(get_RLum(temp.results.final, "LnLxTnTx.table")$LxTx)+
+         max(get_RLum(temp.results.final, "LnLxTnTx.table")$LxTx.Error)),
+       xlab = "Dose [s]",
+       ylab = expression(L[x]/T[x]),
+       main = "Summarised Dose Response Curves")
+
+
+    ##set x for expression evaluation
+    x <- seq(0,
+             max(get_RLum(temp.results.final, "LnLxTnTx.table")$Dose)*1.05,
+             length = 100)
+
+    for(j in 1:length(pIRIR.curve.names)){
+
+     ##dose points
+     temp.curve.points <- get_RLum(
+       temp.results.final,"LnLxTnTx.table")[,c("Dose", "LxTx", "LxTx.Error", "Signal")]
+
+     temp.curve.points <- temp.curve.points[
+       temp.curve.points[,"Signal"] == pIRIR.curve.names[j],
+       c("Dose", "LxTx", "LxTx.Error")]
+
+     points(temp.curve.points[-1,c("Dose", "LxTx")], col = j, pch = j)
+     segments(x0 = temp.curve.points[-1,c("Dose")],
+              y0 = temp.curve.points[-1,c("LxTx")] -
+                temp.curve.points[-1,c("LxTx.Error")],
+              x1 = temp.curve.points[-1,c("Dose")],
+              y1 = temp.curve.points[-1,c("LxTx")] +
+                temp.curve.points[-1,c("LxTx.Error")],
+              col = j)
+
+     ##De values
+     lines(c(0, get_RLum(temp.results.final, "data")[j,1]),
+           c(temp.curve.points[1,c("LxTx")], temp.curve.points[1,c("LxTx")]),
+           col = j,
+           lty = 2)
+
+     lines(c(rep(get_RLum(temp.results.final, "data")[j,1], 2)),
+           c(temp.curve.points[1,c("LxTx")], 0),
+           col = j,
+           lty = 2)
+
+     ##curve
+     temp.curve.formula  <- get_RLum(
+        temp.results.final, "Formula")[[pIRIR.curve.names[j]]]
+
+     try(lines(x, eval(temp.curve.formula), col = j), silent = TRUE)
+
+    }
+
+    rm(x)
+
+    ##plot legend
+    legend("bottomright", legend = pIRIR.curve.names,
+           lty = 1, col = c(1:length(pIRIR.curve.names)),
+           bty = "n",
+           pch = c(1:length(pIRIR.curve.names))
+           )
+
+    ##plot Tn/Tx curves
+    ##select signal
+    temp.curve.TnTx <-
+      get_RLum(temp.results.final, "LnLxTnTx.table")[, c("TnTx", "Signal")]
+
+    temp.curve.TnTx.matrix <- matrix(NA,
+                                    nrow = nrow(temp.curve.TnTx)/
+                                      length(pIRIR.curve.names),
+                                    ncol =  length(pIRIR.curve.names))
+
+    ##calculate normalised values
+    for(j in 1:length(pIRIR.curve.names)){
+
+      temp.curve.TnTx.sel <- temp.curve.TnTx[
+        temp.curve.TnTx[,"Signal"] == pIRIR.curve.names[j]
+        , "TnTx"]
+
+      temp.curve.TnTx.matrix[,j] <- temp.curve.TnTx.sel/temp.curve.TnTx.sel[1]
+
+    }
+
+    plot(NA, NA,
+       xlim = c(0,nrow(get_RLum(temp.results.final, "LnLxTnTx.table"))/
+                     n.loops),
+       ylim = range(temp.curve.TnTx.matrix),
+       xlab = "# Cycle",
+       ylab = expression(T[x]/T[n]),
+       main = "Sensitivity change")
+
+    ##zero line
+    abline(h = 1:nrow(temp.curve.TnTx.matrix), col = "gray")
+
+    for(j in 1:length(pIRIR.curve.names)){
+
+     lines(1:nrow(temp.curve.TnTx.matrix),
+           temp.curve.TnTx.matrix[,j],
+           type = "b",
+           col = j,
+           pch = j)
+    }
+
+   ##plot legend
+   legend("bottomleft", legend = pIRIR.curve.names,
+         lty = 1, col = c(1:length(pIRIR.curve.names)),
+         bty = "n",
+         pch = c(1:length(pIRIR.curve.names))
+         )
+
+
+   ##Rejection criteria
+   temp.rejection.criteria <- get_RLum(temp.results.final,
+                                               data.object = "rejection.criteria")
+
+   temp.rc.reycling.ratio <- temp.rejection.criteria[
+     grep("Recycling ratio",temp.rejection.criteria[,"Criteria"]),]
+
+   temp.rc.recuperation.rate <- temp.rejection.criteria[
+     grep("Recuperation rate",temp.rejection.criteria[,"Criteria"]),]
+
+   temp.rc.palaedose.error <- temp.rejection.criteria[
+     grep("Palaeodose error",temp.rejection.criteria[,"Criteria"]),]
+
+   plot(NA,NA,
+        xlim = c(-0.5,0.5),
+        ylim = c(0,30),
+        yaxt = "n", ylab = "",
+        xaxt = "n", xlab = "",
+        bty = "n",
+        main = "Rejection criteria")
+
+   axis(side = 1, at = c(-0.2,-0.1,0,0.1,0.2), labels = c("- 0.2", "- 0.1","0/1","+ 0.1", "+ 0.2"))
+   ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++##
+   ##polygon for recycling ratio
+   text(x = -.4, y = 30, "Recycling ratio", pos = 1, srt = 0)
+   polygon(x = c(-as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1],
+                -as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1],
+                as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1],
+                as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1]),
+          y = c(21,29,29,21), col = "gray", border = NA)
+    polygon(x = c(-0.3,-0.3,0.3,0.3) , y = c(21,29,29,21))
+
+
+   ##consider possibility of multiple pIRIR signals and multiple recycling ratios
+   col.id  <- 1
+
+   ##the conditional case might valid if no rejection criteria could be calculated
+   if(nrow(temp.rc.recuperation.rate)>0){
+
+   for(i in seq(1,nrow(temp.rc.recuperation.rate),
+                  length(unique(temp.rc.recuperation.rate[,"Criteria"])))){
+
+
+        for(j in 0:length(unique(temp.rc.recuperation.rate[,"Criteria"]))){
+         points(temp.rc.reycling.ratio[i+j, "Value"]-1,
+               y = 25,
+               pch = col.id,
+               col = col.id)
+
+        }
+        col.id <- col.id + 1
+   }
+   }#endif
+
+    rm(col.id)
+
+
+
+   ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++##
+   ##polygon for recuperation rate
+
+   text(x = -.4, y = 20, "Recuperation rate", pos = 1, srt = 0)
+
+   if(length(as.character(temp.rc.recuperation.rate$Threshold))>0){
+   polygon(x = c(0,
+                0,
+                as.numeric(as.character(temp.rc.recuperation.rate$Threshold))[1],
+                as.numeric(as.character(temp.rc.recuperation.rate$Threshold))[1]),
+          y = c(11,19,19,11), col = "gray", border = NA)
+
+   polygon(x = c(-0.3,-0.3,0.3,0.3) , y = c(11,19,19,11))
+   polygon(x = c(-0.3,-0.3,0,0) , y = c(11,19,19,11), border = NA, density = 10, angle = 45)
+
+
+  for(i in 1:nrow(temp.rc.recuperation.rate)){
+
+    points(temp.rc.palaedose.error[i, "Value"],
+           y = 15,
+           pch = i,
+           col = i)
+
+  }
+  }#endif
+
+   ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++##
+   ##polygon for palaeodose error
+   text(x = -.4, y = 10, "Palaeodose error", pos = 1, srt = 0)
+   polygon(x = c(0,
+                0,
+                as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1],
+                as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1]),
+          y = c(1,9,9,1), col = "gray", border = NA)
+   polygon(x = c(-0.3,-0.3,0.3,0.3) , y = c(1,9,9,1))
+   polygon(x = c(-0.3,-0.3,0,0) , y = c(1,9,9,1), border = NA, density = 10, angle = 45)
+
+
+   for(i in 1:nrow(temp.rc.palaedose.error)){
+
+     points(temp.rc.palaedose.error[i, "Value"],
+            y = 5,
+            pch = i,
+            col = i)
+
+   }
+
+   ##add 0 value
+   lines(x = c(0,0), y = c(0,19), lwd = 1.5*cex)
+   lines(x = c(0,0), y = c(20,29), lwd = 1.5*cex)
+
+  ##plot legend
+  legend("bottomright", legend = pIRIR.curve.names,
+         col = c(1:length(pIRIR.curve.names)),
+         bty = "n",
+         pch = c(1:length(pIRIR.curve.names)))
+
+
+   ##reset graphic settings
+   if(plot.single == FALSE){par(def.par)}
+
+}##end plot == TRUE
+
+
+##============================================================================##
+# Return Values -----------------------------------------------------------
+##============================================================================##
+
+  return(temp.results.final)
+
+
+}
diff --git a/R/app_RLum.R b/R/app_RLum.R
new file mode 100644
index 0000000..6afd0d8
--- /dev/null
+++ b/R/app_RLum.R
@@ -0,0 +1,24 @@
+#' Run Luminescence shiny apps (wrapper)
+#'
+#' Wrapper for the function \code{\link[RLumShiny]{app_RLum}} from the package
+#' \link[RLumShiny]{RLumShiny-package}. For further details and examples please
+#' see the manual of this package.
+#'
+#' @param app \code{\link{character}} (required): name of the application to start. See details for a list
+#' of available apps.
+#' @param ... further arguments to pass to \code{\link[shiny]{runApp}}
+#'
+#' @author Christoph Burow, University of Cologne (Germany)
+#'
+#' @section Function version: 0.1.0
+#'
+#' @export
+app_RLum <- function(app, ...) {
+  
+  if (!requireNamespace("RLumShiny", quietly = TRUE))
+    stop("Shiny applications require the 'RLumShiny' package. To install",
+         " this package run 'install.packages('RLumShiny')' in your R console.", 
+         call. = FALSE)
+  
+  RLumShiny::app_RLum(app, ...)
+}
\ No newline at end of file
diff --git a/R/apply_CosmicRayRemoval.R b/R/apply_CosmicRayRemoval.R
new file mode 100644
index 0000000..9f21767
--- /dev/null
+++ b/R/apply_CosmicRayRemoval.R
@@ -0,0 +1,316 @@
+#' Function to remove cosmic rays from an RLum.Data.Spectrum S4 class object
+#'
+#' The function provides several methods for cosmic ray removal and spectrum
+#' smoothing for an RLum.Data.Spectrum S4 class object
+#'
+#' \bold{\code{method = "Pych"}} \cr
+#'
+#' This method applies the cosmic-ray removal algorithm described by Pych
+#' (2003). Some aspects that are different to the publication: \itemize{
+#' \item For interpolation between neighbouring values the median and not the
+#' mean is used. \item The number of breaks to construct the histogram is set
+#' to: \code{length(number.of.input.values)/2} } For further details see
+#' references below.
+#'
+#' \bold{\code{method = "smooth"}} \cr
+#'
+#' Method uses the function \code{\link{smooth}} to remove cosmic rays.\cr
+#'
+#' Arguments that can be passed are: \code{kind}, \code{twiceit}\cr
+#'
+#' \bold{\code{method = "smooth.spline"}} \cr Method uses the function
+#' \code{\link{smooth.spline}} to remove cosmic rays.\cr Arguments that can be
+#' passed are: \code{spar}\cr
+#'
+#' \bold{How to combine methods?}\cr
+#'
+#' Different methods can be combined by applying the method repeatedly to the
+#' dataset (see example).
+#'
+#' @param object \code{\linkS4class{RLum.Data.Spectrum}} (\bold{required}): S4
+#' object of class \code{RLum.Data.Spectrum}
+#'
+#' @param method \code{\link{character}} (with default): Defines method that is
+#' applied for cosmic ray removal. Allowed methods are \code{smooth}, the default,
+#' (\code{\link{smooth}}), \code{smooth.spline} (\code{\link{smooth.spline}})
+#' and \code{Pych}. See details for further information.
+#'
+#' @param method.Pych.smoothing \code{\link{integer}} (with default): Smoothing
+#' parameter for cosmic ray removal according to Pych (2003). The value defines
+#' how many neighboring values in each frame are used for smoothing (e.g.,
+#' \code{2} means that the two previous and two following values are used).
+#'
+#' @param method.Pych.threshold_factor \code{\link{numeric}} (with default): Threshold
+#' for zero-bins in the histogram. Small values mean that more peaks are removed, but signal
+#' might be also affected by this removal.
+#'
+#' @param MARGIN \code{\link{integer}} (with default): on which part the function cosmic ray removal
+#' should be applied on: 1 = along the time axis (line by line), 2 = along the wavelength axis (column by
+#' column). Note: This argument currently only affects the methods \code{smooth} and \code{smooth.spline}
+#'
+#' @param verbose \code{\link{logical}} (with default): Option to suppress
+#' terminal output.,
+#'
+#' @param plot \code{\link{logical}} (with default): If \code{TRUE} the
+#' histograms used for the cosmic-ray removal are returned as plot including
+#' the used threshold. Note: A separat plot is returned for each frame!
+#' Currently only for \code{method = "Pych"} a graphical output is provided.
+#'
+#' @param \dots further arguments and graphical parameters that will be passed
+#' to the \code{smooth} function.
+#'
+#' @return Returns same object as input
+#' (\code{\linkS4class{RLum.Data.Spectrum}})
+#'
+#' @note -
+#'
+#' @section Function version: 0.2.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{smooth}},
+#' \code{\link{smooth.spline}}, \code{\link{apply_CosmicRayRemoval}}
+#'
+#' @references Pych, W., 2003. A Fast Algorithm for Cosmic-Ray Removal from
+#' Single Images. Astrophysics 116, 148-153.
+#' \url{http://arxiv.org/pdf/astro-ph/0311290.pdf?origin=publication_detail}
+#'
+#' @keywords manip
+#'
+#' @examples
+#'
+#'
+#' ##(1) - use with your own data and combine (uncomment for usage)
+#' ## run two times the default method and smooth with another method
+#' ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych")
+#' ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych")
+#' ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "smooth")
+#'
+#' @export
+apply_CosmicRayRemoval <- function(
+  object,
+  method = "smooth",
+  method.Pych.smoothing = 2,
+  method.Pych.threshold_factor = 3,
+  MARGIN = 2,
+  verbose = FALSE,
+  plot = FALSE,
+  ...
+){
+
+  # Integrity check -----------------------------------------------------------
+
+  ##check if object is of class RLum.Data.Spectrum
+  if(class(object) != "RLum.Data.Spectrum"){
+
+    stop("[apply_CosmicRayRemoval()] Input object is not of type RLum.Data.Spectrum")
+
+  }
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  kind <- if("kind" %in% names(extraArgs)) {extraArgs$kind} else
+  {"3RS3R"}
+
+  twiceit <- if("twiceit" %in% names(extraArgs)) {extraArgs$twiceit} else
+  {TRUE}
+
+  spar <- if("spar" %in% names(extraArgs)) {extraArgs$spar} else
+  {NULL}
+
+  # Apply method ------------------------------------------------------------
+
+  ## +++++++++++++++++++++++++++++++++++ (smooth) ++++++++++++++++++++++++++++##
+  if(method == "smooth"){
+
+    ##apply smooth
+    object.data.temp.smooth <- apply(
+      X = object at data,
+      MARGIN = MARGIN,
+      FUN = stats::smooth,
+      kind = kind,
+      twiceit = twiceit
+    )
+
+    ##rotate output matrix if necessary
+    if(MARGIN == 1){
+      object.data.temp.smooth <- t(object.data.temp.smooth)
+
+    }
+
+    ## +++++++++++++++++++++++++++++++++++ (smooth.spline) +++++++++++++++++++++##
+  }else if(method == "smooth.spline"){
+
+    ##write the function in a new function to acess the data more easily
+    temp_smooth.spline <- function(x, spar){
+      stats::smooth.spline(x, spar = spar)$y
+    }
+
+    ##apply smooth.spline
+    object.data.temp.smooth <-
+      apply(
+        X = object at data,
+        MARGIN = MARGIN,
+        FUN = temp_smooth.spline,
+        spar = spar
+      )
+
+    ##rotate output matrix if necessary
+    if(MARGIN == 1){
+      object.data.temp.smooth <- t(object.data.temp.smooth)
+
+    }
+
+    ## +++++++++++++++++++++++++++++++++++ (Pych) ++++++++++++++++++++++++++++++##
+  }else if(method == "Pych"){
+
+    ## grep data matrix
+    object.data.temp <- object at data
+
+    ## apply smoothing
+    object.data.temp.smooth <- sapply(X = 1:ncol(object.data.temp), function(x){
+
+      ##(1) - calculate sd for each subframe
+      temp.sd <- sd(object.data.temp[,x])
+
+      ##(2) - correct estimation of sd by 1-sigma clipping
+      temp.sd.corr <- sd(object.data.temp[
+
+        object.data.temp[,x] >= (mean(object.data.temp[,x]) - temp.sd) &
+          object.data.temp[,x] <= (mean(object.data.temp[,x]) + temp.sd)
+
+        , x])
+
+      ##(3) - construct histogram of count distribution
+      temp.hist <- hist(object.data.temp[,x],
+                        breaks = length(object.data.temp[,x])/2, plot = FALSE)
+
+      ##(4) - find mode of the histogram (e.g. peak)
+      temp.hist.max <- which.max(temp.hist$counts)
+
+      ##(5) - find gaps in the histogram (bins with zero value)
+      temp.hist.zerobin <- which(temp.hist$counts == 0)
+
+      ##(5.1)
+      ##select just values right from the peak
+      temp.hist.zerobin <- temp.hist.zerobin[
+        (temp.hist.max[1] + 1):length(temp.hist.zerobin)]
+
+      ##(5.2)
+      ##select non-zerobins
+      temp.hist.nonzerobin <- which(temp.hist$counts != 0)
+      temp.hist.nonzerobin <- temp.hist.nonzerobin[
+        temp.hist.nonzerobin >=  (temp.hist.zerobin[1]-1)]
+
+      ##(6) - find the first gap which is wider than the threshold
+      temp.hist.nonzerobin.diff <- diff(
+        temp.hist$breaks[temp.hist.nonzerobin])
+
+
+      ## select the first value where the thershold is reached
+      ## factor 3 is defined by Pych (2003)
+      temp.hist.thres <- which(
+        temp.hist.nonzerobin.diff >= method.Pych.threshold_factor * temp.sd.corr)[1]
+
+      ##(7) - use counts above the threshold and recalculate values
+      ## on all further values
+      if(!is.na(temp.hist.thres)){
+
+        object.data.temp[,x] <- sapply(1:nrow(object.data.temp), function(n){
+
+          if(c(n + method.Pych.smoothing) <= nrow(object.data.temp) &
+             (n - method.Pych.smoothing) >= 0){
+
+            ifelse(
+              object.data.temp[n,x] >= temp.hist$breaks[temp.hist.thres],
+              median(object.data.temp[(n-method.Pych.smoothing):
+                                        (n+method.Pych.smoothing),x]),
+              object.data.temp[n,x])
+
+          }else{
+
+            object.data.temp[n,x]
+
+          }
+
+        })
+
+      }
+
+      ##(8) - return histogram used for the removal as plot
+      if(plot){
+
+        plot(temp.hist,
+             xlab = "Signal intensity [a.u.]",
+             main = "Cosmic-ray removal histogram")
+
+        abline(v = temp.hist$breaks[temp.hist.thres],
+               col = "red")
+
+        if(!is.na(temp.hist$breaks[temp.hist.thres])){
+          legend("topright", "threshold" ,lty = 1, lwd = 1, col = "red", bty = "n")
+          mtext(side = 3, paste0("Frame: ", x, " (",
+                                 colnames(object.data.temp)[x],
+                                 ")"))
+
+        }else{
+          mtext(side = 3, paste0("Frame: ", x, " (",
+                                 colnames(object.data.temp)[x],
+                                 ") - no threshold applied!"))
+
+
+
+        }
+
+
+
+
+      }
+
+      ##(9) - return information on the amount of removed cosmic-rays
+
+      if(verbose){
+        #sum up removed counts values above the threshold
+        sum.corrected.channels <- try(
+          sum(temp.hist$counts[temp.hist.thres:length(temp.hist$counts)]),
+          silent = TRUE)
+
+        if(is(sum.corrected.channels)[1] == "try-error"){sum.corrected.channels <- 0}
+
+        cat("[apply_CosmicRayRemoval()] >> ")
+        cat(paste(sum.corrected.channels, " channels corrected in frame ", x, "\n", sep = ""))
+      }
+
+      ##return object
+      return(object.data.temp[,x])
+
+    })#end loop
+
+
+  }else{
+
+    stop("[apply_CosmicRayRemoval()] Unkown method for cosmic ray removal.")
+
+  }
+
+  # Correct row and column names --------------------------------------------
+
+  colnames(object.data.temp.smooth) <- colnames(object at data)
+  rownames(object.data.temp.smooth) <- rownames(object at data)
+
+
+
+  # Return Output------------------------------------------------------------
+
+  temp.output <- set_RLum(
+    class = "RLum.Data.Spectrum",
+    recordType = object at recordType,
+    curveType = object at curveType,
+    data = object.data.temp.smooth,
+    info = object at info)
+
+  invisible(temp.output)
+
+}
diff --git a/R/apply_EfficiencyCorrection.R b/R/apply_EfficiencyCorrection.R
new file mode 100644
index 0000000..abfdff0
--- /dev/null
+++ b/R/apply_EfficiencyCorrection.R
@@ -0,0 +1,110 @@
+#' Function to apply spectral efficiency correction to RLum.Data.Spectrum S4
+#' class objects
+#'
+#' The function allows spectral efficiency corrections for RLum.Data.Spectrum
+#' S4 class objects
+#'
+#' The efficiency correction is based on a spectral response dataset provided
+#' by the user. Usually the data set for the quantum efficiency is of lower
+#' resolution and values are interpolated for the required spectral resolution using
+#' the function \code{\link[stats]{approx}}
+#'
+#' If the energy calibration differes for both data set \code{NA} values are produces that
+#' will be removed from the matrix.
+#'
+#' @param object \code{\linkS4class{RLum.Data.Spectrum}} (\bold{required}): S4
+#' object of class \code{RLum.Data.Spectrum}
+#'
+#' @param spectral.efficiency \code{\link{data.frame}} (\bold{required}): Data
+#' set containing wavelengths (x-column) and relative spectral response values
+#' (y-column) in percentage
+#'
+#' @return Returns same object as input
+#' (\code{\linkS4class{RLum.Data.Spectrum}})
+#'
+#' @note Please note that the spectral efficiency data from the camera alone may not
+#' sufficiently correct for spectral efficiency of the entire optical system
+#' (e.g., spectrometer, camera ...).
+#'
+#' @section Function version: 0.1.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France),\cr Johannes Friedrich, University of Bayreuth (Germany)
+#'
+#' @seealso \code{\linkS4class{RLum.Data.Spectrum}}
+#'
+#' @references -
+#'
+#' @keywords manip
+#'
+#' @examples
+#'
+#'
+#' ##(1) - use with your own data (uncomment for usage)
+#' ## spectral.efficiency <- read.csv("your data")
+#' ##
+#' ## your.spectrum <- apply_EfficiencyCorrection(your.spectrum, )
+#'
+#' @export
+apply_EfficiencyCorrection <- function(
+  object,
+  spectral.efficiency
+){
+
+  # Integrity check -----------------------------------------------------------
+
+  ##check if object is of class RLum.Data.Spectrum
+  if(class(object) != "RLum.Data.Spectrum"){
+
+    stop("[apply_EfficiencyCorrection()] Input object is not of type RLum.Data.Spectrum")
+
+  }
+
+  if(class(spectral.efficiency) != "data.frame"){
+
+    stop("[apply_EfficiencyCorrection()] Input object is not of type data.frame")
+
+  }
+
+  ## grep data matrix
+  temp.matrix <- as(object, "matrix")
+
+  ## grep efficency values
+  temp.efficiency <- as.matrix(spectral.efficiency)
+
+  # Apply method ------------------------------------------------------------
+
+  #set data for interpolation
+  temp.efficiency.x <- as.numeric(row.names(temp.matrix))
+
+  temp.efficiency.interpolated  <- approx(
+    x = temp.efficiency[,1],
+    y = temp.efficiency[,2],
+    xout = temp.efficiency.x)
+
+
+  ##correct for quantum efficiency
+  temp.matrix <- vapply(X = 1:ncol(temp.matrix), FUN = function(x){
+    temp.matrix[,x]/temp.efficiency.interpolated$y*max(temp.efficiency.interpolated$y, na.rm = TRUE)
+
+  }, FUN.VALUE =  numeric(length = nrow(temp.matrix)))
+
+  ##remove NA values
+  temp.matrix <- na.exclude(temp.matrix)
+
+  ##correct colnames
+  colnames(temp.matrix) <- colnames(get_RLum(object))
+
+
+  # Return Output------------------------------------------------------------
+
+  temp.output <- set_RLum(
+    class = "RLum.Data.Spectrum",
+    recordType = object at recordType,
+    curveType = object at curveType,
+    data = temp.matrix,
+    info = object at info)
+
+  invisible(temp.output)
+
+}
diff --git a/R/bin_RLum.Data.R b/R/bin_RLum.Data.R
new file mode 100644
index 0000000..413c6ba
--- /dev/null
+++ b/R/bin_RLum.Data.R
@@ -0,0 +1,50 @@
+#' Channel binning - method dispatchter
+#'
+#' Function calls the object-specific bin functions for RLum.Data S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{RLum.Data}} objects.\cr Depending on the input object, the
+#' corresponding function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{RLum.Data}} class.
+#'
+#' @param object \code{\linkS4class{RLum.Data}} (\bold{required}): S4 object of
+#' class \code{RLum.Data}
+#'
+#' @param ... further arguments passed to the specifc class method
+#'
+#' @return An object of the same type as the input object is provided
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @note Currenlty only \code{RLum.Data} objects of class \code{RLum.Data.Curve} are supported!
+#'
+#' @seealso
+#' \code{\linkS4class{RLum.Data.Curve}}
+#'
+#' @examples
+#'
+#' ##load example data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' ##create RLum.Data.Curve object from this example
+#' curve <-
+#'   set_RLum(
+#'       class = "RLum.Data.Curve",
+#'       recordType = "OSL",
+#'       data = as.matrix(ExampleData.CW_OSL_Curve)
+#'   )
+#'
+#' ##plot data without and with 2 and 4 channel binning
+#' plot_RLum(curve)
+#' plot_RLum(bin_RLum.Data(curve, bin_size = 2))
+#' plot_RLum(bin_RLum.Data(curve, bin_size = 4))
+#'
+#' @keywords utilities
+#'
+#' @export
+setGeneric("bin_RLum.Data", function(object, ...) {
+  standardGeneric("bin_RLum.Data")
+})
diff --git a/R/calc_AliquotSize.R b/R/calc_AliquotSize.R
new file mode 100644
index 0000000..ba7b1de
--- /dev/null
+++ b/R/calc_AliquotSize.R
@@ -0,0 +1,416 @@
+#' Estimate the amount of grains on an aliquot
+#'
+#' Estimate the number of grains on an aliquot. Alternatively, the packing
+#' density of an aliquot is computed.
+#'
+#' This function can be used to either estimate the number of grains on an
+#' aliquot or to compute the packing density depending on the the arguments
+#' provided. \cr The following function is used to estimate the number of
+#' grains \code{n}: \cr \deqn{n = (\pi*x^2)/(\pi*y^2)*d} where \code{x} is the
+#' radius of the aliquot size (microns), \code{y} is the mean radius of the
+#' mineral grains (mm) and \code{d} is the packing density (value between 0 and
+#' 1). \cr
+#'
+#' \bold{Packing density} \cr\cr The default value for \code{packing.density}
+#' is 0.65, which is the mean of empirical values determined by Heer et al.
+#' (2012) and unpublished data from the Cologne luminescence laboratory. If
+#' \code{packing.density = "inf"} a maximum density of \eqn{\pi/\sqrt12 =
+#' 0.9068\ldots} is used. However, note that this value is not appropriate as
+#' the standard preparation procedure of aliquots resembles a PECC ("Packing
+#' Equal Circles in a Circle") problem where the maximum packing density is
+#' asymptotic to about 0.87. \cr
+#'
+#' \bold{Monte Carlo simulation} \cr\cr The number of grains on an aliquot can
+#' be estimated by Monte Carlo simulation when setting \code{MC = TRUE}. Each
+#' of the parameters necessary to calculate \code{n} (\code{x}, \code{y},
+#' \code{d}) are assumed to be normally distributed with means \eqn{\mu_x,
+#' \mu_y, \mu_d} and standard deviations \eqn{\sigma_x, \sigma_y, \sigma_d}.
+#' \cr\cr For the mean grain size random samples are taken first from
+#' \eqn{N(\mu_y, \sigma_y)}, where \eqn{\mu_y = mean.grain.size} and
+#' \eqn{\sigma_y = (max.grain.size-min.grain.size)/4} so that 95\% of all
+#' grains are within the provided the grain size range. This effectively takes
+#' into account that after sieving the sample there is still a small chance of
+#' having grains smaller or larger than the used mesh sizes. For each random
+#' sample the mean grain size is calculated, from which random subsamples are
+#' drawn for the Monte Carlo simulation. \cr\cr The packing density is assumed
+#' to be normally distributed with an empirically determined \eqn{\mu = 0.65}
+#' (or provided value) and \eqn{\sigma = 0.18}. The normal distribution is
+#' truncated at \code{d = 0.87} as this is approximately the maximum packing
+#' density that can be achieved in PECC problem. \cr\cr The sample diameter has
+#' \eqn{\mu = sample.diameter} and \eqn{\sigma = 0.2} to take into account
+#' variations in sample disc preparation (i.e. applying silicon spray to the
+#' disc). A lower truncation point at \code{x = 0.5} is used, which assumes
+#' that aliqouts with smaller sample diameters of 0.5 mm are discarded.
+#' Likewise, the normal distribution is truncated at 9.8 mm, which is the
+#' diameter of the sample disc. \cr\cr For each random sample drawn from the
+#' normal distributions the amount of grains on the aliquot is calculated. By
+#' default, \code{10^5} iterations are used, but can be reduced/increased with
+#' \code{MC.iter} (see \code{...}). The results are visualised in a bar- and
+#' boxplot together with a statistical summary.
+#'
+#' @param grain.size \code{\link{numeric}} (\bold{required}): mean grain size
+#' (microns) or a range of grain sizes from which the mean grain size is
+#' computed (e.g. \code{c(100,200)}).
+#' @param sample.diameter \code{\link{numeric}} (\bold{required}): diameter
+#' (mm) of the targeted area on the sample carrier.
+#' @param packing.density \code{\link{numeric}} (with default) empirical value
+#' for mean packing density. \cr If \code{packing.density = "inf"} a hexagonal
+#' structure on an infinite plane with a packing density of \eqn{0.906\ldots}
+#' is assumed.
+#' @param MC \code{\link{logical}} (optional): if \code{TRUE} the function
+#' performs a monte carlo simulation for estimating the amount of grains on the
+#' sample carrier and assumes random errors in grain size distribution and
+#' packing density. Requires a vector with min and max grain size for
+#' \code{grain.size}. For more information see details.
+#' @param grains.counted \code{\link{numeric}} (optional) grains counted on a
+#' sample carrier. If a non-zero positive integer is provided this function
+#' will calculate the packing density of the aliquot. If more than one value is
+#' provided the mean packing density and its standard deviation is calculated.
+#' Note that this overrides \code{packing.density}.
+#' @param plot \code{\link{logical}} (with default): plot output
+#' (\code{TRUE}/\code{FALSE})
+#' @param \dots further arguments to pass (\code{main, xlab, MC.iter}).
+#' @return Returns a terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following element:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant calculation
+#' results.} \item{args}{\link{list} used arguments} \item{call}{\link{call}
+#' the function call} \item{MC}{\link{list} results of the Monte Carlo
+#' simulation}
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @section Function version: 0.31
+#' @author Christoph Burow, University of Cologne (Germany)
+#' @references Duller, G.A.T., 2008. Single-grain optical dating of Quaternary
+#' sediments: why aliquot size matters in luminescence dating. Boreas 37,
+#' 589-612.  \cr\cr Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains
+#' are there on a single aliquot?. Ancient TL 30, 9-16. \cr\cr \bold{Further
+#' reading} \cr\cr Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's
+#' Theorem on Circle Packing. \url{http://arxiv.org/pdf/1009.4322v1.pdf},
+#' 2013-09-13. \cr\cr Graham, R.L., Lubachevsky, B.D., Nurmela, K.J.,
+#' Oestergard, P.R.J., 1998.  Dense packings of congruent circles in a circle.
+#' Discrete Mathematics 181, 139-154. \cr\cr Huang, W., Ye, T., 2011. Global
+#' optimization method for finding dense packings of equal circles in a circle.
+#' European Journal of Operational Research 210, 474-481.
+#' @examples
+#'
+#' ## Estimate the amount of grains on a small aliquot
+#' calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, MC.iter = 100)
+#'
+#' ## Calculate the mean packing density of large aliquots
+#' calc_AliquotSize(grain.size = c(100,200), sample.diameter = 8,
+#'                  grains.counted = c(2525,2312,2880), MC.iter = 100)
+#'
+#' @export
+calc_AliquotSize <- function(
+  grain.size,
+  sample.diameter,
+  packing.density = 0.65,
+  MC = TRUE,
+  grains.counted,
+  plot=TRUE,
+  ...
+){
+  ##==========================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ##==========================================================================##
+
+  if(length(grain.size) == 0 | length(grain.size) > 2) {
+    cat(paste("\nPlease provide the mean grain size or a range",
+              "of grain sizes (in microns).\n"), fill = FALSE)
+    stop(domain=NA)
+  }
+
+  if(packing.density < 0 | packing.density > 1) {
+    if(packing.density == "inf") {
+    } else {
+      cat(paste("\nOnly values between 0 and 1 allowed for packing density!\n"))
+      stop(domain=NA)
+    }
+  }
+
+  if(sample.diameter < 0) {
+    cat(paste("\nPlease provide only positive integers.\n"))
+    stop(domain=NA)
+  }
+
+  if (sample.diameter > 9.8)
+    warning("\n A sample diameter of ", sample.diameter ," mm was specified, but common sample discs are 9.8 mm in diameter.", call. = FALSE)
+
+  if(missing(grains.counted) == FALSE) {
+    if(MC == TRUE) {
+      MC = FALSE
+      cat(paste("\nMonte Carlo simulation is only available for estimating the",
+                "amount of grains on the sample disc. Automatically set to",
+                "FALSE.\n"))
+    }
+  }
+
+  if(MC == TRUE && length(grain.size) != 2) {
+    cat(paste("\nPlease provide a vector containing the min and max grain",
+              "grain size(e.g. c(100,150) when using Monte Carlo simulations.\n"))
+    stop(domain=NA)
+  }
+
+
+  ##==========================================================================##
+  ## ... ARGUMENTS
+  ##==========================================================================##
+
+  extraArgs <- list(...)
+
+  ## set number of Monte Carlo iterations
+  if("MC.iter" %in% names(extraArgs)) {
+    MC.iter<- extraArgs$MC.iter
+  } else {
+    MC.iter<- 10^4
+  }
+
+  ##==========================================================================##
+  ## CALCULATIONS
+  ##==========================================================================##
+
+  # calculate the mean grain size
+  range.flag<- FALSE
+  if(length(grain.size) == 2) {
+    gs.range<- grain.size
+    grain.size<- mean(grain.size)
+    range.flag<- TRUE
+  }
+
+  # use ~0.907... from Thue's Theorem as packing density
+  if(packing.density == "inf") {
+    packing.density = pi/sqrt(12)
+  }
+
+  # function to calculate the amount of grains
+  calc_n<- function(sd, gs, d) {
+    n<- ((pi*(sd/2)^2)/
+           (pi*(gs/2000)^2))*d
+    return(n)
+  }
+
+  # calculate the amount of grains on the aliquot
+  if(missing(grains.counted) == TRUE) {
+    n.grains<- calc_n(sample.diameter, grain.size, packing.density)
+
+    ##========================================================================##
+    ## MONTE CARLO SIMULATION
+
+    if(MC == TRUE && range.flag == TRUE) {
+
+      # create a random set of packing densities assuming a normal
+      # distribution with the empirically determined standard deviation of
+      # 0.18.
+      d.mc<- rnorm(MC.iter, packing.density, 0.18)
+
+      # in a PECC the packing density can not be larger than ~0.87
+      d.mc[which(d.mc > 0.87)]<- 0.87
+      d.mc[which(d.mc < 0.25)]<- 0.25
+
+      # create a random set of sample diameters assuming a normal
+      # distribution with an assumed standard deviation of
+      # 0.2. For a more conservative estimate this is divided by 2.
+      sd.mc<- rnorm(MC.iter, sample.diameter, 0.2)
+
+      # it is assumed that sample diameters < 0.5 mm either do not
+      # occur, or are discarded. Either way, any smaller sample
+      # diameter is capped at 0.5.
+      # Also, the sample diameter can not be larger than the sample
+      # disc, i.e. 9.8 mm.
+      sd.mc[which(sd.mc <0.5)]<- 0.5
+      if (sample.diameter <= 9.8)
+        sd.mc[which(sd.mc >9.8)]<- 9.8
+
+      # create random samples assuming a normal distribution
+      # with the mean grain size as mean and half the range (min:max)
+      # as standard deviation. For a more conservative estimate this
+      # is further devided by 2, so half the range is regarded as
+      # two sigma.
+      gs.mc<- rnorm(MC.iter, grain.size, diff(gs.range)/4)
+
+      # draw random samples from the grain size spectrum (gs.mc) and calculate
+      # the mean for each sample. This gives an approximation of the variation
+      # in mean grain size on the sample disc
+      gs.mc.sampleMean<- vector(mode = "numeric")
+
+
+      for(i in 1:length(gs.mc)) {
+        gs.mc.sampleMean[i]<- mean(sample(gs.mc, calc_n(
+          sample(sd.mc, size = 1),
+          grain.size,
+          sample(d.mc, size = 1)
+        ), replace = TRUE))
+      }
+
+      # create empty vector for MC estimates of n
+      MC.n<- vector(mode="numeric")
+
+      # calculate n for each MC data set
+      for(i in 1:length(gs.mc)) {
+        MC.n[i]<- calc_n(sd.mc[i],
+                         gs.mc.sampleMean[i],
+                         d.mc[i])
+      }
+
+      # summarize MC estimates
+      MC.q<- quantile(MC.n, c(0.05,0.95))
+      MC.n.kde<- density(MC.n, n = 10000)
+
+      # apply student's t-test
+      MC.t.test<- t.test(MC.n)
+      MC.t.lower<- MC.t.test["conf.int"]$conf.int[1]
+      MC.t.upper<- MC.t.test["conf.int"]$conf.int[2]
+      MC.t.se<- (MC.t.upper-MC.t.lower)/3.92
+
+
+      # get unweighted statistics from calc_Statistics() function
+      MC.stats<- calc_Statistics(as.data.frame(cbind(MC.n,0.0001)))$unweighted
+
+    }
+  }#EndOf:estimate number of grains
+
+
+  ##========================================================================##
+  ## CALCULATE PACKING DENSITY
+
+  if(missing(grains.counted) == FALSE) {
+
+    area.container<- pi*sample.diameter^2
+
+    if(length(grains.counted) == 1) {
+      area.grains<- (pi*(grain.size/1000)^2)*grains.counted
+      packing.density<- area.grains/area.container
+    }
+    else {
+      packing.densities<- length(grains.counted)
+      for(i in 1:length(grains.counted)) {
+        area.grains<- (pi*(grain.size/1000)^2)*grains.counted[i]
+        packing.densities[i]<- area.grains/area.container
+      }
+      std.d<- sd(packing.densities)
+    }
+  }
+
+  ##==========================================================================##
+  ##TERMINAL OUTPUT
+  ##==========================================================================##
+
+  cat("\n [calc_AliquotSize]")
+  cat(paste("\n\n ---------------------------------------------------------"))
+  cat(paste("\n mean grain size (microns)  :", grain.size))
+  cat(paste("\n sample diameter (mm)       :", sample.diameter))
+  if(missing(grains.counted) == FALSE) {
+    if(length(grains.counted) == 1) {
+      cat(paste("\n counted grains             :", grains.counted))
+    } else {
+      cat(paste("\n mean counted grains        :", round(mean(grains.counted))))
+    }
+  }
+  if(missing(grains.counted) == TRUE) {
+    cat(paste("\n packing density            :", round(packing.density,3)))
+  }
+  if(missing(grains.counted) == FALSE) {
+    if(length(grains.counted) == 1) {
+      cat(paste("\n packing density            :", round(packing.density,3)))
+    } else {
+      cat(paste("\n mean packing density       :", round(mean(packing.densities),3)))
+      cat(paste("\n standard deviation         :", round(std.d,3)))
+    }
+  }
+  if(missing(grains.counted) == TRUE) {
+    cat(paste("\n number of grains           :", round(n.grains,0)))
+  }
+
+
+
+  if(MC == TRUE && range.flag == TRUE) {
+    cat(paste(cat(paste("\n\n --------------- Monte Carlo Estimates -------------------"))))
+    cat(paste("\n number of iterations (n)     :", MC.iter))
+    cat(paste("\n median                       :", round(MC.stats$median)))
+    cat(paste("\n mean                         :", round(MC.stats$mean)))
+    cat(paste("\n standard deviation (mean)    :", round(MC.stats$sd.abs)))
+    cat(paste("\n standard error (mean)        :", round(MC.stats$se.abs, 1)))
+    cat(paste("\n 95% CI from t-test (mean)    :", round(MC.t.lower), "-", round(MC.t.upper)))
+    cat(paste("\n standard error from CI (mean):", round(MC.t.se, 1)))
+    cat(paste("\n ---------------------------------------------------------\n"))
+
+  } else {
+    cat(paste("\n ---------------------------------------------------------\n"))
+  }
+
+  ##==========================================================================##
+  ##RETURN VALUES
+  ##==========================================================================##
+
+
+  # prepare return values for mode: estimate grains
+  if(missing(grains.counted) == TRUE) {
+    summary<- data.frame(grain.size = grain.size,
+                         sample.diameter = sample.diameter,
+                         packing.density = packing.density,
+                         n.grains = round(n.grains,0),
+                         grains.counted = NA)
+  }
+
+  # prepare return values for mode: estimate packing density/densities
+  if(missing(grains.counted) == FALSE) {
+
+    # return values if only one value for counted.grains is provided
+    if(length(grains.counted) == 1) {
+      summary<- data.frame(grain.size = grain.size,
+                           sample.diameter = sample.diameter,
+                           packing.density = packing.density,
+                           n.grains = NA,
+                           grains.counted = grains.counted)
+    } else {
+      # return values if more than one value for counted.grains is provided
+      summary<- data.frame(rbind(1:5))
+      colnames(summary)<- c("grain.size", "sample.diameter", "packing.density",
+                            "n.grains","grains.counted")
+      for(i in 1:length(grains.counted)) {
+        summary[i,]<- c(grain.size, sample.diameter, packing.densities[i],
+                        n.grains = NA, grains.counted[i])
+      }
+    }
+  }
+
+  if(MC == FALSE) {
+    MC.n<- NULL
+    MC.stats<- NULL
+    MC.n.kde<- NULL
+    MC.t.test<- NULL
+    MC.q<- NULL
+  }
+
+  if(missing(grains.counted)) grains.counted<- NA
+
+  call<- sys.call()
+  args<- list(grain.size = grain.size, sample.diameter = sample.diameter, packing.density = packing.density, MC = MC, grains.counted = grains.counted, MC.iter=MC.iter)
+
+  # create S4 object
+  newRLumResults.calc_AliquotSize <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      summary=summary,
+      args=args,
+      call=call,
+      MC=list(estimates=MC.n,
+              statistics=MC.stats,
+              kde=MC.n.kde,
+              t.test=MC.t.test,
+              quantile=MC.q)
+    ))
+
+  ##=========##
+  ## PLOTTING
+  if(plot==TRUE) {
+    try(plot_RLum.Results(newRLumResults.calc_AliquotSize, ...))
+  }
+
+  # Return values
+  invisible(newRLumResults.calc_AliquotSize)
+
+}
diff --git a/R/calc_CentralDose.R b/R/calc_CentralDose.R
new file mode 100644
index 0000000..1e0c4aa
--- /dev/null
+++ b/R/calc_CentralDose.R
@@ -0,0 +1,259 @@
+#' Apply the central age model (CAM) after Galbraith et al. (1999) to a given
+#' De distribution
+#'
+#' This function calculates the central dose and dispersion of the De
+#' distribution, their standard errors and the profile log likelihood function
+#' for sigma.
+#'
+#' This function uses the equations of Galbraith & Roberts (2012). The
+#' parameters \code{delta} and \code{sigma} are estimated by numerically solving
+#' eq. 15 and 16. Their standard errors are approximated using eq. 17.
+#' In addition, the profile log-likelihood function for \code{sigma} is
+#' calculated using eq. 18 and presented as a plot. Numerical values of the 
+#' maximum likelihood approach are \bold{only} presented in the plot and \bold{not}
+#' in the console. A detailed explanation on maximum likelihood estimation can be found in the
+#' appendix of Galbraith & Laslett (1993, 468-470) and Galbraith & Roberts
+#' (2012, 15)
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#' 
+#' @param sigmab \code{\link{numeric}} (with default): spread in De values
+#' given as a fraction (e.g. 0.2). This value represents the expected
+#' overdispersion in the data should the sample be well-bleached (Cunningham &
+#' Walling 2012, p. 100).
+#' 
+#' @param log \code{\link{logical}} (with default): fit the (un-)logged central
+#' age model to De data
+#' 
+#' @param plot \code{\link{logical}} (with default): plot output
+#' 
+#' @param \dots further arguments (\code{trace, verbose}).
+#' 
+#' @return Returns a plot (optional) and terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following element:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant model results.}
+#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+#' used arguments} \item{call}{\link{call} the function call}
+#' \item{profile}{\link{data.frame} the log likelihood profile for sigma}
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @section Function version: 1.3.1
+#' @author Christoph Burow, University of Cologne (Germany) \cr Based on a
+#' rewritten S script of Rex Galbraith, 2010 \cr
+#' @seealso \code{\link{plot}}, \code{\link{calc_CommonDose}},
+#' \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
+#' \code{\link{calc_MinDose}}
+#' @references Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for
+#' mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470.
+#' \cr \cr Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley,
+#' J.M., 1999. Optical dating of single grains of quartz from Jinmium rock
+#' shelter, northern Australia. Part I: experimental design and statistical
+#' models.  Archaeometry 41, 339-364. \cr \cr Galbraith, R.F. & Roberts, R.G.,
+#' 2012. Statistical aspects of equivalent dose and error calculation and
+#' display in OSL dating: An overview and some recommendations. Quaternary
+#' Geochronology 11, 1-27. \cr \cr \bold{Further reading} \cr \cr Arnold, L.J.
+#' & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+#' (De) distributions: Implications for OSL dating of sediment mixtures.
+#' Quaternary Geochronology 4, 204-230. \cr \cr Bailey, R.M. & Arnold, L.J.,
+#' 2006. Statistical modelling of single grain quartz De distributions and an
+#' assessment of procedures for estimating burial dose. Quaternary Science
+#' Reviews 25, 2475-2502. \cr \cr Cunningham, A.C. & Wallinga, J., 2012.
+#' Realizing the potential of fluvial archives using robust OSL chronologies.
+#' Quaternary Geochronology 12, 98-106. \cr \cr Rodnight, H., Duller, G.A.T.,
+#' Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+#' of optical dating of fluvial deposits.  Quaternary Geochronology, 1 109-120.
+#' \cr \cr Rodnight, H., 2008. How many equivalent dose values are needed to
+#' obtain a reproducible distribution?. Ancient TL 26, 3-10.
+#' @examples
+#'
+#' ##load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ##apply the central dose model
+#' calc_CentralDose(ExampleData.DeValues$CA1)
+#'
+#' @export
+calc_CentralDose <- function(data, sigmab, log = TRUE, plot = TRUE, ...) {
+  
+  ## ============================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ## ============================================================================##
+  
+  if (!missing(data)) {
+    if (!is(data, "data.frame") & !is(data, "RLum.Results")) {
+      stop("[calc_CentralDose] Error: 'data' object has to be of type\n 
+           'data.frame' or 'RLum.Results'!")
+    } else {
+      if (is(data, "RLum.Results")) {
+        data <- get_RLum(data, "data")
+      }
+    }
+  }
+  try(colnames(data) <- c("ED", "ED_Error"), silent = TRUE)
+  if (colnames(data[1]) != "ED" || colnames(data[2]) != "ED_Error") {
+    cat(paste("Columns must be named 'ED' and 'ED_Error'"), fill = FALSE)
+    stop(domain = NA)
+  }
+  if (!missing(sigmab)) {
+    if (sigmab < 0 | sigmab > 1 & log) {
+      cat(paste("sigmab needs to be given as a fraction between", "0 and 1 (e.g. 0.2)"), 
+          fill = FALSE)
+      stop(domain = NA)
+    }
+  }
+  
+  ## ============================================================================##
+  ## ... ARGUMENTS
+  ## ============================================================================##
+  
+  options <- list(verbose = TRUE,
+                  trace = FALSE)
+  
+  options <- modifyList(options, list(...))
+  
+  
+  ## ============================================================================##
+  ## CALCULATIONS
+  ## ============================================================================##
+  
+  # set default value of sigmab
+  if (missing(sigmab)) 
+    sigmab <- 0
+  
+  # calculate yu = log(ED) and su = se(logED)
+  if (log) {
+    yu <- log(data$ED)
+    su <- sqrt((data$ED_Error / data$ED)^2 + sigmab^2)
+  } else {
+    yu <- data$ED
+    su<- sqrt((data$ED_Error)^2 + sigmab^2)
+  }
+
+  
+  # calculate starting values and weights
+  sigma <- 0.15
+  wu <- 1 / (sigma^2 + su^2)
+  delta <- sum(wu * yu) / sum(wu)
+  n <- length(yu)
+  
+  # compute mle's
+  for (j in 1:200) {
+    delta <- sum(wu * yu) / sum(wu)
+    sigma <- sigma * sqrt(sum((wu^2) * (yu - delta)^2 / sum(wu)))
+    wu <- 1 / (sigma^2 + su^2)
+    
+    # print iterations
+    if (options$trace) 
+      print(round(c(delta, sigma), 4))
+  }
+  
+  # save parameters for terminal output
+  out.delta <- ifelse(log, exp(delta), delta)
+  out.sigma <- ifelse(log, sigma * 100, sigma / out.delta * 100)
+  
+  # log likelihood
+  llik <- 0.5 * sum(log(wu)) - 0.5 * sum(wu * (yu - delta)^2)
+  # save parameter for terminal output
+  out.llik <- round(llik, 4)
+  Lmax <- llik
+  
+  # standard errors
+  sedelta <- 1 / sqrt(sum(wu))
+  sesigma <- 1 / sqrt(2 * sigma^2 * sum(wu^2))
+  
+  # save parameters for terminal output
+  if (log) {
+    out.sedelta <- sedelta * 100
+    out.sesigma <- sesigma
+  } else {
+    out.sedelta <- sedelta / out.delta * 100
+    out.sesigma <- sqrt((sedelta / delta)^2 + 
+                          (sesigma / out.delta * 100 / out.sigma)^2) * out.sigma / 100
+    
+  }
+  
+  # profile log likelihood
+  sigmax <- sigma
+  llik <- 0
+  sig0 <- max(0, sigmax - 8 * sesigma)
+  sig1 <- sigmax + 9.5 * sesigma
+  sig <- try(seq(sig0, sig1, sig1 / 1000), silent = TRUE)
+  
+  if (class(sig) != "try-error") {
+    # TODO: rewrite this loop as a function and maximise with mle2 ll is the actual
+    # log likelihood, llik is a vector of all ll
+    for (s in sig) {
+      wu <- 1 / (s^2 + su^2)
+      mu <- sum(wu * yu)/sum(wu)
+      ll <- 0.5 * sum(log(wu)) - 0.5 * sum(wu * (yu - mu)^2)
+      llik <- c(llik, ll)
+    }
+    llik <- llik[-1] - Lmax
+  } #endif::try-error
+  
+  
+  ## ============================================================================##
+  ## TERMINAL OUTPUT
+  ## ============================================================================##
+  
+  if (options$verbose) {
+    cat("\n [calc_CentralDose]")
+    cat(paste("\n\n----------- meta data ----------------"))
+    cat(paste("\n n:                      ", n))
+    cat(paste("\n log:                    ", log))
+    cat(paste("\n----------- dose estimate ------------"))
+    cat(paste("\n central dose [Gy]:      ", format(out.delta, digits = 2, nsmall = 2)))
+    cat(paste("\n SE [Gy]:                ", format(out.delta * out.sedelta/100, 
+                                                   digits = 2, nsmall = 2)))
+    cat(paste("\n rel. SE [%]:            ", format(out.sedelta, digits = 2, nsmall = 2)))
+    cat(paste("\n----------- overdispersion -----------"))
+    cat(paste("\n OD [Gy]:                ", format(ifelse(log, sigma * out.delta, sigma), digits = 2, nsmall = 2)))
+    cat(paste("\n SE [Gy]:                ", format(ifelse(log, sesigma * out.delta, sesigma), digits = 2, nsmall = 2)))
+    cat(paste("\n OD [%]:                 ", format(out.sigma, digits = 2, nsmall = 2)))
+    cat(paste("\n SE [%]:                 ", if (class(sig) != "try-error") {
+      format(out.sesigma * 100, digits = 2, nsmall = 2)
+    } else {
+      "-"
+    }))
+    cat(paste("\n-------------------------------------\n\n"))
+  }
+  
+  ## ============================================================================##
+  ## RETURN VALUES
+  ## ============================================================================##
+  
+  if (class(sig) == "try-error") {
+    out.sigma <- 0
+    out.sesigma <- NA
+  }
+  
+  if(!log)
+    sig <- sig / delta
+    
+  
+  summary <- data.frame(de = out.delta, de_err = out.delta * out.sedelta / 100, 
+                        OD = out.sigma, OD_err = out.sesigma * 100, Lmax = Lmax)
+  
+  call <- sys.call()
+  args <- list(log = "TRUE", sigmab = sigmab)
+  
+  newRLumResults.calc_CentralDose <- set_RLum(class = "RLum.Results", 
+                                              data = list(summary = summary, 
+                                                          data = data, 
+                                                          args = args,
+                                                          call = call, 
+                                                          profile = data.frame(sig = sig, 
+                                                                               llik = llik)))
+  
+  ## =========## PLOTTING
+  if (plot && class(sig) != "try-error") 
+    try(plot_RLum.Results(newRLumResults.calc_CentralDose, ...))
+
+  invisible(newRLumResults.calc_CentralDose)
+}
+
diff --git a/R/calc_CommonDose.R b/R/calc_CommonDose.R
new file mode 100644
index 0000000..863ea8b
--- /dev/null
+++ b/R/calc_CommonDose.R
@@ -0,0 +1,178 @@
+#' Apply the (un-)logged common age model after Galbraith et al. (1999) to a
+#' given De distribution
+#'
+#' Function to calculate the common dose of a De distribution.
+#'
+#' \bold{(Un-)logged model} \cr\cr When \code{log = TRUE} this function
+#' calculates the weighted mean of logarithmic De values. Each of the estimates
+#' is weighted by the inverse square of its relative standard error. The
+#' weighted mean is then transformed back to the dose scale (Galbraith &
+#' Roberts 2012, p. 14).\cr\cr The log transformation is not applicable if the
+#' De estimates are close to zero or negative. In this case the un-logged model
+#' can be applied instead (\code{log = FALSE}). The weighted mean is then
+#' calculated using the un-logged estimates of De and their absolute standard
+#' error (Galbraith & Roberts 2012, p. 14).
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#' @param sigmab \code{\link{numeric}} (with default): spread in De values
+#' given as a fraction (e.g. 0.2). This value represents the expected
+#' overdispersion in the data should the sample be well-bleached (Cunningham &
+#' Walling 2012, p. 100).
+#' @param log \code{\link{logical}} (with default): fit the (un-)logged common
+#' age model to De data
+#' @param \dots currently not used.
+#' @return Returns a terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following element:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant model results.}
+#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+#' used arguments} \item{call}{\link{call} the function call}
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @section Function version: 0.1
+#' @author Christoph Burow, University of Cologne (Germany)
+#' @seealso \code{\link{calc_CentralDose}}, \code{\link{calc_FiniteMixture}},
+#' \code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
+#' @references Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for
+#' mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470.
+#' \cr\cr Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley,
+#' J.M., 1999. Optical dating of single grains of quartz from Jinmium rock
+#' shelter, northern Australia. Part I: experimental design and statistical
+#' models.  Archaeometry 41, 339-364. \cr\cr Galbraith, R.F. & Roberts, R.G.,
+#' 2012. Statistical aspects of equivalent dose and error calculation and
+#' display in OSL dating: An overview and some recommendations. Quaternary
+#' Geochronology 11, 1-27. \cr\cr \bold{Further reading} \cr\cr Arnold, L.J. &
+#' Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+#' (De) distributions: Implications for OSL dating of sediment mixtures.
+#' Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold, L.J.,
+#' 2006. Statistical modelling of single grain quartz De distributions and an
+#' assessment of procedures for estimating burial dose. Quaternary Science
+#' Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
+#' Realizing the potential of fluvial archives using robust OSL chronologies.
+#' Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
+#' Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+#' of optical dating of fluvial deposits. Quaternary Geochronology 1,
+#' 109-120.\cr\cr Rodnight, H., 2008. How many equivalent dose values are
+#' needed to obtain a reproducible distribution?. Ancient TL 26, 3-10.
+#' @examples
+#'
+#' ## load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## apply the common dose model
+#' calc_CommonDose(ExampleData.DeValues$CA1)
+#'
+#' @export
+calc_CommonDose <- function(
+  data,
+  sigmab,
+  log=TRUE,
+  ...
+) {
+  
+  ##============================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ##============================================================================##
+  
+  if(missing(data)==FALSE){
+    if(is(data, "data.frame") == FALSE & is(data,"RLum.Results") == FALSE){
+      stop("[calc_CentralDose] Error: 'data' object has to be of type
+           'data.frame' or 'RLum.Results'!")
+    }else{
+      if(is(data, "RLum.Results") == TRUE){
+        data <- get_RLum(data, "data")
+      }
+    }
+  }
+  try(colnames(data)<- c("ED","ED_Error"), silent = TRUE)
+  if(colnames(data[1])!="ED"||colnames(data[2])!="ED_Error") {
+    cat(paste("Columns must be named 'ED' and 'ED_Error'"), fill = FALSE)
+    stop(domain=NA)
+  }
+  if(!missing(sigmab)) {
+    if(sigmab <0 | sigmab >1) {
+      cat(paste("sigmab needs to be given as a fraction between",
+                "0 and 1 (e.g. 0.2)"), fill = FALSE)
+      stop(domain=NA)
+    }
+  }
+  
+  
+  ##============================================================================##
+  ## ADDITIONAL ARGUMENTS
+  ##============================================================================##
+  settings <- list(verbose = TRUE)
+  settings <- modifyList(settings, list(...))
+  
+  ##============================================================================##
+  ## CALCULATIONS
+  ##============================================================================##
+  
+  # set default value of sigmab
+  if (missing(sigmab)) sigmab<- 0
+  
+  # calculate  yu = log(ED) and su = se(logED)
+  if (log) {
+    yu<- log(data$ED)
+    su<- sqrt( (data$ED_Error/data$ED)^2 + sigmab^2 )
+  }
+  else {
+    yu<- data$ED
+    su<- sqrt((data$ED_Error)^2 + sigmab^2)
+  }
+  
+  # calculate weights
+  wu<- 1/su^2
+  delta<- sum(wu*yu)/sum(wu)
+  n<- length(yu)
+  
+  #standard error
+  sedelta<- 1/sqrt(sum(wu))
+  if (!log) {
+    sedelta<- sedelta/delta
+  }
+  
+  if (log){
+    delta<- exp(delta)
+  }
+  
+  ##============================================================================##
+  ## TERMINAL OUTPUT
+  ##============================================================================##
+  
+  if (settings$verbose) {
+    cat("\n [calc_CommonDose]")
+    cat(paste("\n\n----------- meta data --------------"))
+    cat(paste("\n n:                      ",n))
+    cat(paste("\n log:                    ",if(log==TRUE){"TRUE"}else{"FALSE"}))
+    cat(paste("\n----------- dose estimate ----------"))
+    cat(paste("\n common dose:            ", round(delta,2)))
+    cat(paste("\n SE:                     ", round(delta*sedelta, 2)))
+    cat(paste("\n rel. SE [%]:            ", round(sedelta*100,2)))
+    cat(paste("\n------------------------------------\n\n"))
+  }
+  
+  ##============================================================================##
+  ## RETURN VALUES
+  ##============================================================================##
+  
+  summary<- data.frame(de=delta,
+                       de_err=delta*sedelta)
+  
+  call<- sys.call()
+  args<- list(log=log, sigmab=sigmab)
+  
+  newRLumResults.calc_CommonDose<- set_RLum(
+    class = "RLum.Results",
+    data = list(summary = summary,
+                data = data,
+                args = args,
+                call = call))
+  
+  invisible(newRLumResults.calc_CommonDose)
+  
+}
diff --git a/R/calc_CosmicDoseRate.R b/R/calc_CosmicDoseRate.R
new file mode 100644
index 0000000..ff08c41
--- /dev/null
+++ b/R/calc_CosmicDoseRate.R
@@ -0,0 +1,574 @@
+#' Calculate the cosmic dose rate
+#'
+#' This function calculates the cosmic dose rate taking into account the soft-
+#' and hard-component of the cosmic ray flux and allows corrections for
+#' geomagnetic latitude, altitude above sea-level and geomagnetic field
+#' changes.
+#'
+#' This function calculates the total cosmic dose rate considering both the
+#' soft- and hard-component of the cosmic ray flux.\cr
+#'
+#' \bold{Internal calculation steps}
+#'
+#' (1) Calculate total depth of all absorber in hg/cm^2 (1 hg/cm^2 = 100
+#' g/cm^2)
+#'
+#' \deqn{absorber = depth_1*density_1 + depth_2*density_2 + ... + depth_n*
+#' density_n}
+#'
+#' (2) If \code{half.depth = TRUE}
+#'
+#' \deqn{absorber = absorber/2}
+#'
+#' (3) Calculate cosmic dose rate at sea-level and 55 deg. latitude
+#'
+#' a) If absorber is > 167 g/cm^2 (only hard-component; Allkofer et al.  1975):
+#' apply equation given by Prescott & Hutton (1994) (c.f. Barbouti & Rastin
+#' 1983)
+#'
+#' \deqn{D0 = C/(((absorber+d)^\alpha+a)*(absober+H))*exp(-B*absorber)}
+#'
+#' b) If absorber is < 167 g/cm^2 (soft- and hard-component): derive D0 from
+#' Fig. 1 in Prescott & Hutton (1988).
+#'
+#' (4) Calculate geomagnetic latitude (Prescott & Stephan 1982, Prescott &
+#' Hutton 1994)
+#'
+#' \deqn{\lambda = arcsin(0.203*cos(latitude)*cos(longitude-291)+0.979*
+#' sin(latitude))}
+#'
+#' (5) Apply correction for geomagnetic latitude and altitude above sea-level.
+#' Values for F, J and H were read from Fig. 3 shown in Prescott & Stephan
+#' (1982) and fitted with 3-degree polynomials for lambda < 35 degree and a
+#' linear fit for lambda > 35 degree.
+#'
+#' \deqn{Dc = D0*(F+J*exp((altitude/1000)/H))}
+#'
+#' (6) Optional: Apply correction for geomagnetic field changes in the last
+#' 0-80 ka (Prescott & Hutton 1994). Correction and altitude factors are given
+#' in Table 1 and Fig. 1 in Prescott & Hutton (1994). Values for altitude
+#' factor were fitted with a 2-degree polynomial. The altitude factor is
+#' operated on the decimal part of the correction factor.
+#'
+#' \deqn{Dc' = Dc*correctionFactor}
+#'
+#' \bold{Usage of \code{depth} and \code{density}}
+#'
+#' (1) If only one value for depth and density is provided, the cosmic dose
+#' rate is calculated for exactly one sample and one absorber as overburden
+#' (i.e. \code{depth*density}).
+#'
+#' (2) In some cases it might be useful to calculate the cosmic dose rate for a
+#' sample that is overlain by more than one absorber, e.g. in a profile with
+#' soil layers of different thickness and a distinct difference in density.
+#' This can be calculated by providing a matching number of values for
+#' \code{depth} and \code{density} (e.g. \code{depth = c(1, 2), density =
+#' c(1.7, 2.4)})
+#'
+#' (3) Another possibility is to calculate the cosmic dose rate for more than
+#' one sample of the same profile. This is done by providing more than one
+#' values for \code{depth} and only one for \code{density}. For example,
+#' \code{depth = c(1, 2, 3), density = 1.7} will calculate the cosmic dose rate
+#' for three samples in 1, 2 and 3 m depth in a sediment of density 1.7 g/cm^3.
+#'
+#' @param depth \code{\link{numeric}} (\bold{required}): depth of overburden
+#' (m).  For more than one absorber use \cr \code{c(depth_1, depth_2, ...,
+#' depth_n)}
+#' @param density \code{\link{numeric}} (\bold{required}): average overburden
+#' density (g/cm^3). For more than one absorber use \cr \code{c(density_1,
+#' density_2, ..., density_n)}
+#' @param latitude \code{\link{numeric}} (\bold{required}): latitude (decimal
+#' degree), N positive
+#' @param longitude \code{\link{numeric}} (\bold{required}): longitude (decimal
+#' degree), E positive
+#' @param altitude \code{\link{numeric}} (\bold{required}): altitude (m above
+#' sea-level)
+#' @param corr.fieldChanges \code{\link{logical}} (with default): correct for
+#' geomagnetic field changes after Prescott & Hutton (1994). Apply only when
+#' justified by the data.
+#' @param est.age \code{\link{numeric}} (with default): estimated age range
+#' (ka) for geomagnetic field change correction (0-80 ka allowed)
+#' @param half.depth \code{\link{logical}} (with default): How to overcome with
+#' varying overburden thickness. If \code{TRUE} only half the depth is used for
+#' calculation. Apply only when justified, i.e. when a constant sedimentation
+#' rate can safely be assumed.
+#' @param error \code{\link{numeric}} (with default): general error
+#' (percentage) to be implemented on corrected cosmic dose rate estimate
+#' @return Returns a terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following element:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant calculation
+#' results.} \item{args}{\link{list} used arguments} \item{call}{\link{call}
+#' the function call}
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @note Despite its universal use the equation to calculate the cosmic dose
+#' rate provided by Prescott & Hutton (1994) is falsely stated to be valid from
+#' the surface to 10^4 hg/cm^2 of standard rock. The original expression by
+#' Barbouti & Rastin (1983) only considers the muon flux (i.e. hard-component)
+#' and is by their own definition only valid for depths between 10-10^4
+#' hg/cm^2.
+#'
+#' Thus, for near-surface samples (i.e. for depths < 167 g/cm^2) the equation
+#' of Prescott & Hutton (1994) underestimates the total cosmic dose rate, as it
+#' neglects the influence of the soft-component of the cosmic ray flux. For
+#' samples at zero depth and at sea-level the underestimation can be as large
+#' as ~0.1 Gy/ka. In a previous article, Prescott & Hutton (1988) give another
+#' approximation of Barbouti & Rastins equation in the form of
+#'
+#' \deqn{D = 0.21*exp(-0.070*absorber+0.0005*absorber^2)}
+#'
+#' which is valid for depths between 150-5000 g/cm^2. For shallower depths (<
+#' 150 g/cm^2) they provided a graph (Fig. 1) from which the dose rate can be
+#' read.
+#'
+#' As a result, this function employs the equation of Prescott & Hutton (1994)
+#' only for depths > 167 g/cm^2, i.e. only for the hard-component of the cosmic
+#' ray flux. Cosmic dose rate values for depths < 167 g/cm^2 were obtained from
+#' the "AGE" programm (Gruen 2009) and fitted with a 6-degree polynomial curve
+#' (and hence reproduces the graph shown in Prescott & Hutton 1988). However,
+#' these values assume an average overburden density of 2 g/cm^3.
+#'
+#' It is currently not possible to obtain more precise cosmic dose rate values
+#' for near-surface samples as there is no equation known to the author of this
+#' function at the time of writing.
+#' @section Function version: 0.5.2
+#' @author Christoph Burow, University of Cologne (Germany)
+#' @seealso \code{\link{BaseDataSet.CosmicDoseRate}}
+#' @references Allkofer, O.C., Carstensen, K., Dau, W.D., Jokisch, H., 1975.
+#' Letter to the editor. The absolute cosmic ray flux at sea level. Journal of
+#' Physics G: Nuclear and Particle Physics 1, L51-L52. \cr\cr Barbouti, A.I.,
+#' Rastin, B.C., 1983. A study of the absolute intensity of muons at sea level
+#' and under various thicknesses of absorber. Journal of Physics G: Nuclear and
+#' Particle Physics 9, 1577-1595. \cr\cr Crookes, J.N., Rastin, B.C., 1972. An
+#' investigation of the absolute intensity of muons at sea-level. Nuclear
+#' Physics B 39, 493-508.  \cr\cr Gruen, R., 2009. The "AGE" program for the
+#' calculation of luminescence age estimates. Ancient TL 27, 45-46. \cr\cr
+#' Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for
+#' TL and ESR. Nuclear Tracks and Radiation Measurements 14, \cr\cr 223-227.
+#' Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates
+#' for luminescence and ESR dating: large depths and long-term time variations.
+#' Radiation Measurements 23, 497-500. \cr\cr Prescott, J.R., Stephan, L.G.,
+#' 1982. The contribution of cosmic radiation to the environmental dose for
+#' thermoluminescence dating. Latitude, altitude and depth dependences. PACT 6,
+#' 17-25.
+#' @examples
+#'
+#' ##(1) calculate cosmic dose rate (one absorber)
+#' calc_CosmicDoseRate(depth = 2.78, density = 1.7,
+#'                     latitude = 38.06451, longitude = 1.49646,
+#'                     altitude = 364, error = 10)
+#'
+#' ##(2a) calculate cosmic dose rate (two absorber)
+#' calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7),
+#'                     latitude = 38.06451, longitude = 1.49646,
+#'                     altitude = 364, error = 10)
+#'
+#' ##(2b) calculate cosmic dose rate (two absorber) and
+#' ##correct for geomagnetic field changes
+#' calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7),
+#'                     latitude = 12.04332, longitude = 4.43243,
+#'                     altitude = 364, corr.fieldChanges = TRUE,
+#'                     est.age = 67, error = 15)
+#'
+#'
+#' ##(3) calculate cosmic dose rate and export results to .csv file
+#' #calculate cosmic dose rate and save to variable
+#' results<- calc_CosmicDoseRate(depth = 2.78, density = 1.7,
+#'                               latitude = 38.06451, longitude = 1.49646,
+#'                               altitude = 364, error = 10)
+#'
+#' # the results can be accessed by
+#' get_RLum(results, "summary")
+#'
+#' #export results to .csv file - uncomment for usage
+#' #write.csv(results, file = "c:/users/public/results.csv")
+#'
+#' ##(4) calculate cosmic dose rate for 6 samples from the same profile
+#' ##    and save to .csv file
+#' #calculate cosmic dose rate and save to variable
+#' results<- calc_CosmicDoseRate(depth = c(0.1, 0.5 , 2.1, 2.7, 4.2, 6.3),
+#'                               density = 1.7, latitude = 38.06451,
+#'                               longitude = 1.49646, altitude = 364,
+#'                               error = 10)
+#'
+#' #export results to .csv file - uncomment for usage
+#' #write.csv(results, file = "c:/users/public/results_profile.csv")
+#'
+#' @export
+calc_CosmicDoseRate<- function(
+  depth,
+  density,
+  latitude,
+  longitude,
+  altitude,
+  corr.fieldChanges = FALSE,
+  est.age = NA,
+  half.depth = FALSE,
+  error = 10
+) {
+
+  ##============================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ##============================================================================##
+
+  if(depth < 0 || density < 0) {
+    cat(paste("\nNo negative values allowed for depth and density"))
+    stop(domain=NA)
+  }
+
+  if(corr.fieldChanges == TRUE) {
+    if(is.na(est.age) == TRUE) {
+      cat(paste("\nCorrection for geomagnetic field changes requires",
+                "an age estimate."), fill = FALSE)
+      stop(domain=NA)
+    }
+    if(est.age > 80) {
+      cat(paste("\nCAUTION: No geomagnetic field change correction for samples",
+                "older >80 ka possible!"), fill = FALSE)
+      corr.fieldChanges<- FALSE
+    }
+  }
+  
+  if(length(density) > length(depth)) {
+    stop("\nIf you provide more than one value for density please", 
+         " provide an equal number of values for depth.", call. = FALSE)
+  }
+
+
+  ##============================================================================##
+  ## CALCULATIONS
+  ##============================================================================##
+
+
+  # initialize parameter for Prescott & Hutton (1994) equation
+
+  C<- 6072
+  B<- 0.00055
+  d<- 11.6
+  alpha<- 1.68
+  a<- 75
+  H<- 212
+
+  #variable needed to check if cosmic dose rate is calculated for more
+  #than one sample
+
+  profile.mode<- FALSE
+
+  #calculate absorber (hgcm) of one depth and one absorber [single sample]
+  if(length(depth)==1) {
+    hgcm<- depth*density
+    if(half.depth == TRUE) {
+      hgcm<- hgcm/2
+    }
+  }
+
+  #calculate total absorber of n depths and n densities [single sample]
+  if(length(depth)==length(density)){
+
+    hgcm<- 0
+
+    for(i in 1:length(depth)) {
+      hgcm<- hgcm + depth[i]*density[i]
+    }
+    if(half.depth == TRUE) {
+      hgcm<- hgcm/2
+    }
+  }
+
+  #if there are >1 depths and only one density, calculate
+  #absorber for each sample [multi sample]
+  if(length(depth) > length(density) & length(density) == 1) {
+    profile.mode<- TRUE
+    hgcm<- 1:length(depth)
+    for(i in 1:length(depth)) {
+      hgcm[i]<- depth[i]*density
+    }
+    if(half.depth == TRUE) {
+      hgcm<- hgcm/2
+    }
+    profile.results<- data.frame(rbind(c(1:3)),cbind(1:length(depth)))
+    colnames(profile.results)<- c("depth (m)", "d0 (Gy/ka)",
+                                  "dc (Gy/ka)","dc_error (Gy/ka)")
+  }
+
+
+  for(i in 1:length(hgcm)) {
+
+
+    # calculate cosmic dose rate at sea-level for geomagnetic latitude 55 degrees
+
+    if(hgcm[i]*100 >= 167) {
+
+      d0<- (C/((((hgcm[i]+d)^alpha)+a)*(hgcm[i]+H)))*exp(-B*hgcm[i])
+
+    }
+    if(hgcm[i]*100 < 167) {
+
+      temp.hgcm<- hgcm[i]*100
+      d0.ph<- (C/((((hgcm[i]+d)^alpha)+a)*(hgcm[i]+H)))*exp(-B*hgcm[i])
+
+      if(hgcm[i]*100 < 40) {
+        d0<- -6*10^-8*temp.hgcm^3+2*10^-5*temp.hgcm^2-0.0025*temp.hgcm+0.2969
+      }
+      else {
+        d0<- 2*10^-6*temp.hgcm^2-0.0008*temp.hgcm+0.2535
+      }
+      if(d0.ph > d0) {
+        d0<- d0.ph
+      }
+    }
+    # Calculate geomagnetic latitude
+    gml.temp<- 0.203*cos((pi/180)*latitude)*
+      cos(((pi/180)*longitude)-(291*pi/180))+0.979*
+      sin((pi/180)*latitude)
+    true.gml<- asin(gml.temp)/(pi/180)
+    gml<- abs(asin(gml.temp)/(pi/180))
+
+    # Find values for F, J and H from graph shown in Prescott & Hutton (1994)
+    # values were read from the graph and fitted with 3 degree polynomials and a
+    # linear part
+
+    if(gml < 36.5) { # Polynomial fit
+
+      F_ph<- -7*10^-7*gml^3-8*10^-5*gml^2-0.0009*gml+0.3988
+    }
+    else { # Linear fit
+
+      F_ph<- -0.0001*gml + 0.2347
+
+    }
+
+    if(gml < 34) { # Polynomial fit
+
+      J_ph<- 5*10^-6*gml^3-5*10^-5*gml^2+0.0026*gml+0.5177
+
+    }
+    else { # Linear fit
+      J_ph<- 0.0005*gml + 0.7388
+    }
+
+    if(gml < 36) { # Polynomial fit
+
+      H_ph<- -3*10^-6*gml^3-5*10^-5*gml^2-0.0031*gml+4.398
+
+    }
+    else { # Linear fit
+
+      H_ph<- 0.0002*gml + 4.0914
+
+    }
+
+    # Apply correction for geomagnetic latitude and altitude according to
+    # Prescott & Hutton (1994)
+
+    dc<- d0*(F_ph + J_ph*exp((altitude/1000)/H_ph))
+
+
+    ## Additional correction for geomagnetic field change
+
+    if(corr.fieldChanges==TRUE) {
+
+      if(gml <= 35) {
+
+        # Correction matrix for geomagnetic field changes at
+        # sea-level (Prescott & Hutton (1994), Table 1)
+
+        corr.matrix<- data.frame(rbind(1:5),1:7)
+        colnames(corr.matrix)<- c(0, 10, 20, 30, 35, ">35")
+        rownames(corr.matrix)<- c("0-5","5-10","10-15","15-20","20-35","35-50",
+                                  "50-80")
+
+        corr.matrix[1,]<- c(0.97, 0.97, 0.98, 0.98, 0.98, 1.00)
+        corr.matrix[2,]<- c(0.99, 0.99, 0.99, 0.99, 0.99, 1.00)
+        corr.matrix[3,]<- c(1.00, 1.00, 1.00, 1.00, 1.00, 1.00)
+        corr.matrix[4,]<- c(1.01, 1.01, 1.01, 1.00, 1.00, 1.00)
+        corr.matrix[5,]<- c(1.02, 1.02, 1.02, 1.01, 1.00, 1.00)
+        corr.matrix[6,]<- c(1.03, 1.03, 1.02, 1.01, 1.00, 1.00)
+        corr.matrix[7,]<- c(1.02, 1.02, 1.02, 1.01, 1.00, 1.00)
+
+        # Find corresponding correction factor for given geomagnetic latitude
+
+        # determine column
+        if(gml <= 5) { corr.c<- 1 }
+        if(5 < gml) {
+          if(gml <= 15) { corr.c<- 2 }
+        }
+        if(15 < gml){
+          if(gml <= 25) { corr.c<- 3 }
+        }
+        if(25 < gml){
+          if(gml <= 32.5) { corr.c<- 4 }
+        }
+        if(32.5 < gml){
+          if(gml <= 35) { corr.c<- 5 }
+        }
+
+        # find row
+        if(est.age <= 5) { corr.fac<- corr.matrix[1,corr.c] }
+        if(5 < est.age) {
+          if(est.age <= 10) { corr.fac<- corr.matrix[2,corr.c] }
+        }
+        if(10 < est.age){
+          if(est.age <= 15) { corr.fac<- corr.matrix[3,corr.c] }
+        }
+        if(15 < est.age){
+          if(est.age <= 20) { corr.fac<- corr.matrix[4,corr.c] }
+        }
+        if(20 < est.age){
+          if(est.age <= 35) { corr.fac<- corr.matrix[5,corr.c] }
+        }
+        if(35 < est.age){
+          if(est.age <= 50) { corr.fac<- corr.matrix[6,corr.c] }
+        }
+        if(50 < est.age){
+          if(est.age <= 80) { corr.fac<- corr.matrix[7,corr.c] }
+        }
+
+        # Find altitude factor via fitted function 2-degree polynomial
+        # This factor is only available for positive altitudes
+        if(altitude > 0) {
+
+          alt.fac<- -0.026*(altitude/1000)^2 + 0.6628*altitude/1000 + 1.0435
+
+          # Combine geomagnetic latitude correction with altitude
+          # correction (figure caption of Fig. 1 in Precott and Hutton (1994))
+
+
+          diff.one<- corr.fac - 1
+          corr.fac<- corr.fac + diff.one * alt.fac
+
+        }
+
+        # Final correction of cosmic dose rate
+
+        dc<- dc * corr.fac
+
+        print(paste("corr.fac",corr.fac,"diff.one",diff.one,"alt.fac",alt.fac))
+
+      }
+
+      else {
+        cat(paste("\n No geomagnetic field change correction necessary for
+                geomagnetic latitude >35 degrees!"))
+      }
+    }
+
+    # calculate error
+    dc.err<- dc*error/100
+
+    # save intermediate results before next sample is calculated
+    if(profile.mode==TRUE) {
+      profile.results[i,1]<- round(depth[i],2)
+      profile.results[i,2]<- round(d0,4)
+      profile.results[i,3]<- round(dc,4)
+      profile.results[i,4]<- round(dc.err,4)
+    }
+
+  }#END.OF.LOOP
+
+  call<- sys.call()
+  args<- list(depth = depth, density = density, latitude = latitude, longitude = longitude,
+              altitude = altitude, corr.fieldChanges = corr.fieldChanges, est.age = est.age,
+              half.depth = half.depth, error = error)
+
+  if(length(hgcm)==1) {
+
+    ##============================================================================##
+    ##TERMINAL OUTPUT
+    ##============================================================================##
+
+    cat("\n\n [calc_CosmicDoseRate]")
+    cat(paste("\n\n ---------------------------------------------------------"))
+    cat(paste("\n depth (m)              :", depth))
+    cat(paste("\n density (g cm^-3)      :", density))
+    cat(paste("\n latitude (N deg.)      :", latitude))
+    cat(paste("\n longitude (E deg.)     :", longitude))
+    cat(paste("\n altitude (m)           :", altitude))
+    cat(paste("\n ---------------------------------------------------------"))
+    cat(paste("\n total absorber (g cm^-2)       :", round(hgcm[i]*100,3)))
+    cat(paste("\n"))
+    cat(paste("\n cosmic dose rate (Gy ka^-1)    :", round(d0,4)))
+    cat(paste("\n  [@sea-level & 55 deg. N G.lat]"))
+    cat(paste("\n"))
+    cat(paste("\n geomagnetic latitude (deg.)    :", round(true.gml,1)))
+    cat(paste("\n"))
+    cat(paste("\n cosmic dose rate (Gy ka^-1)    :", round(dc,4),"+-",
+              round(dc.err,4)))
+    cat(paste("\n  [corrected]                 "))
+    cat(paste("\n ---------------------------------------------------------\n\n"))
+
+    ##============================================================================##
+    ##RETURN VALUES
+    ##============================================================================##
+
+    if(length(depth)==1) {
+      temp1<- data.frame(depth=depth,density=density)
+    } else {
+
+      temp1a<- data.frame(rbind(c(1:length(depth))))
+      tmpcoln1<- 1:length(depth)
+
+      for(i in 1:length(depth)) {
+        temp1a[i]<- depth[i]
+        tmpcoln1[i]<- paste("depth",i)
+      }
+
+      temp1b<- data.frame(rbind(c(1:length(density))))
+      tmpcoln2<- 1:length(density)
+
+      for(i in 1:length(density)) {
+        temp1b[i]<- density[i]
+        tmpcoln2[i]<- paste("density",i)
+      }
+
+      colnames(temp1a)<- tmpcoln1
+      colnames(temp1b)<- tmpcoln2
+      temp1<- cbind(temp1a,temp1b)
+    }
+
+    temp2<- data.frame(latitude=latitude,longitude=longitude,
+                       altitude=altitude,total_absorber.gcm2=hgcm*100,
+                       d0=d0,geom_lat=true.gml,dc=dc)
+
+    summary<- data.frame(cbind(temp1,temp2))
+
+    newRLumResults.calc_CosmicDoseRate <- set_RLum(
+      class = "RLum.Results",
+      data = list(summary=summary,
+                  args=args,
+                  call=call))
+
+    # Return values
+    invisible(newRLumResults.calc_CosmicDoseRate)
+
+  } else {
+
+    #terminal output
+    cat("\n\n [calc_CosmicDoseRate]")
+    cat(paste("\n\n Calculating cosmic dose rate for",length(depth),
+              "samples. \n\n"))
+    print(profile.results)
+
+    #return value
+    add.info<- data.frame(latitude=latitude,longitude=longitude,
+                          altitude=altitude,total_absorber.gcm2=hgcm*100,
+                          geom_lat=true.gml)
+    add.info<- rbind(add.info*length(i))
+    colnames(profile.results)<- c("depth","d0","dc","dc_err")
+
+    summary<- data.frame(cbind(profile.results,add.info))
+
+    newRLumResults.calc_CosmicDoseRate <- set_RLum(
+      class = "RLum.Results",
+      data = list(summary=summary,
+                  args=args,
+                  call=call))
+
+    # Return values
+    invisible(newRLumResults.calc_CosmicDoseRate)
+
+  }
+}
diff --git a/R/calc_FadingCorr.R b/R/calc_FadingCorr.R
new file mode 100644
index 0000000..0bef641
--- /dev/null
+++ b/R/calc_FadingCorr.R
@@ -0,0 +1,452 @@
+#' Apply a fading correction according to Huntley & Lamothe (2001) for a given
+#' g-value and a given tc
+#'
+#' This function solves the equation used for correcting the fading affected age
+#' including the error for a given g-value according to Huntley & Lamothe (2001).
+#'
+#' As the g-value sligthly depends on the time between irradiation and the prompt measurement,
+#' this is tc, always a tc value needs to be provided. If the g-value was normalised to a distinct
+#' time or evaluated with a different tc value (e.g., external irradiation), also the tc value
+#' for the g-value needs to be provided (argument \code{tc.g_value} and then the g-value is recalcualted
+#' to tc of the measurement used for estimating the age applying the following equation:
+#'
+#' \deqn{\kappa_{tc} = \kappa_{tc.g} / (1 - \kappa_{tc.g} * log(tc/tc.g))}
+#'
+#' where
+#'
+#' \deqn{\kappa_{tc.g} = g / 100 / log(10)}
+#'
+#' with \eqn{log} the natural logarithm.
+#'
+#'
+#' The error of the fading-corrected age is determined using a Monte Carlo
+#' simulation approach. Solving of the equation is realised using
+#' \code{\link{uniroot}}. Large values for \code{n.MC} will significantly
+#' increase the computation time.\cr
+#'
+#' \bold{\code{n.MC = 'auto'}}
+#'
+#' The error estimation based on a stochastic process, i.e. for a small number of MC runs the calculated
+#' error varies considerably every time the function is called, even with the same input values.
+#' The argument option \code{n.MC = 'auto'} tries to find a stable value for the standard error, i.e.
+#' the standard deviation of values calculated during the MC runs (\code{age.corr.MC}),
+#' within a given precision (2 digits) by increasing the number of MC runs stepwise and
+#' calculating the corresponding error.
+#'
+#' If the determined error does not differ from the 9 values calculated previously
+#' within a precision of (here) 3 digits the calculation is stopped as it is assumed that the error
+#' is stable. Please note that (a) the duration depends on the input values as well as on
+#' the provided computation ressources and it may take a while, (b) the length (size) of the output
+#' vector \code{age.corr.MC}, where all the single values produced during the MC runs are stored,
+#' equals the number of MC runs (here termed observations).
+#'
+#' To avoid an endless loop the calculation is stopped if the number of observations exceeds 10^7.
+#' This limitation can be overwritten by setting the number of MC runs manually,
+#' e.g. \code{n.MC = 10000001}. Note: For this case the function is not checking whether the calculated
+#' error is stable.\cr
+#'
+#'
+#' \bold{\code{seed}}
+#'
+#' This option allows to recreate previously calculated results by setting the seed
+#' for the R random number generator (see \code{\link{set.seed}} for details). This option
+#' should not be mixed up with the option \bold{\code{n.MC = 'auto'}}. The results may
+#' appear similar, but they are not comparable!\cr
+#'
+#' \bold{FAQ}\cr
+#' Q: Which tc value is expected?\cr
+#' A: tc is the time in seconds between irradiation and the prompt measurement applied during your
+#' De measurement. However, this tc might differ from the tc used for estimating the g-value. In the
+#' case of an SAR measurement tc should be similar, however, if it differs, you have to provide this
+#' tc value (the one used for estimating the g-value) using the argument \code{tc.g_value}.\cr
+#'
+#' @param age.faded \code{\link{numeric}} \code{\link{vector}} (\bold{required}): uncorrected
+#' age with error in ka (see example)
+#'
+#' @param g_value \code{\link{vector}} (\bold{required}): g-value and error obtained
+#' from separate fading measurements (see example). Alternatively an \code{\linkS4class{RLum.Results}} object
+#' can be provided produced by the function \code{analyse_FadingMeasurement}, in this case tc is set
+#' automatically
+#'
+#' @param tc \code{\link{numeric}} (\bold{required}): time in seconds between
+#' irradiation and the prompt measurement (cf. Huntley & Lamothe 2001). Argument will be ignored
+#' if \code{g_value} was an \code{RLum.Results} object
+#'
+#' @param tc.g_value \code{\link{numeric}} (with default): the time in seconds between irradiation
+#' and the prompt measurement used for estimating the g-value. If the g-value was normalised
+#' to, e.g., 2 days, this time in seconds (i.e., 172800) should be given here. If nothing is provided
+#' the time is set to tc, which is usual case for g-values obtained using the SAR method and g-values
+#' that had been not normalised to 2 days.
+#'
+#' @param n.MC \code{\link{integer}} (with default): number of Monte Carlo
+#' simulation runs for error estimation. If \code{n.MC = 'auto'} is used the function
+#' tries to find a 'stable' error for the age. Note: This may take a while!
+#'
+#' @param seed \code{\link{integer}} (optional): sets the seed for the random number generator
+#' in R using \code{\link{set.seed}}
+#'
+#' @param txtProgressBar \link{logical} (with default): enables or disables
+#' \code{\link{txtProgressBar}}
+#'
+#' @param verbose \code{\link{logical}} (with default): enables or disables terminal output
+#'
+#'
+#' @return Returns an S4 object of type \code{\linkS4class{RLum.Results}}.\cr
+#'
+#' Slot: \bold{@data}\cr
+#' \tabular{lll}{
+#' \bold{Object} \tab \bold{Type} \tab \bold{Comment}\cr
+#'  \code{age.corr} \tab \code{data.frame} \tab Corrected age \cr
+#'  \code{age.corr.MC} \tab \code{numeric} \tab MC simulation results with all possible ages from
+#'  that simulation\cr
+#' }
+#'
+#' Slot: \bold{@info}\cr
+#'
+#' \tabular{lll}{
+#' \bold{Object} \tab \bold{Type} \tab \bold{Comment}\cr
+#'  \code{info} \tab \code{character} \tab the original function call
+#'
+#' }
+#'
+#'
+#' @note The upper age limit is set to 500 ka! \cr
+#' Special thanks to Sebastien Huot for his support and clarification via e-mail.
+#'
+#'
+#' @section Function version: 0.4.1
+#'
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#'
+#' @seealso \code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}},
+#' \code{\link{uniroot}}
+#'
+#'
+#' @references Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading
+#' in K-feldspars and the measurement and correction for it in optical dating.
+#' Canadian Journal of Earth Sciences, 38, 1093-1106.
+#'
+#'
+#' @keywords datagen
+#'
+#'
+#' @examples
+#'
+#' ##run the examples given in the appendix of Huntley and Lamothe, 2001
+#'
+#' ##(1) faded age: 100 a
+#' results <- calc_FadingCorr(
+#'    age.faded = c(0.1,0),
+#'    g_value = c(5.0, 1.0),
+#'    tc = 2592000,
+#'    tc.g_value = 172800,
+#'    n.MC = 100)
+#'
+#' ##(2) faded age: 1 ka
+#' results <- calc_FadingCorr(
+#'    age.faded = c(1,0),
+#'    g_value = c(5.0, 1.0),
+#'    tc = 2592000,
+#'    tc.g_value = 172800,
+#'    n.MC = 100)
+#'
+#' ##(3) faded age: 10.0 ka
+#' results <- calc_FadingCorr(
+#'    age.faded = c(10,0),
+#'    g_value = c(5.0, 1.0),
+#'    tc = 2592000,
+#'    tc.g_value = 172800,
+#'    n.MC = 100)
+#'
+#' ##access the last output
+#' get_RLum(results)
+#'
+#' @export
+calc_FadingCorr <- function(
+  age.faded,
+  g_value,
+  tc = NULL,
+  tc.g_value = tc,
+  n.MC = 10000,
+  seed = NULL,
+  txtProgressBar = TRUE,
+  verbose = TRUE
+){
+
+  ##TODO set link after the function analyse_FadingMeasurement was released
+  ## ... this option should be tested as well
+
+  # Integrity checks ---------------------------------------------------------------------------
+  stopifnot(!missing(age.faded), !missing(g_value))
+
+  ##check input
+  if(class(g_value)[1] == "RLum.Results"){
+    if(g_value at originator == "analyse_FadingMeasurement"){
+
+      g_value <- get_RLum(g_value)[,c("FIT", "SD")]
+      tc <- get_RLum(g_value)[["TC"]]
+
+
+    }else{
+      try(stop("[calc_FadingCorr()] Unknown originator for the provided RLum.Results object via 'g_value'!", call. = FALSE))
+      return(NULL)
+
+
+    }
+
+
+  }
+
+  ##check if tc is still NULL
+  if(is.null(tc)){
+    try(stop("[calc_FadingCorr()] 'tc' needs to be set!", call. = FALSE))
+    return(NULL)
+
+  }
+
+
+  ##============================================================================##
+  ##DEFINE FUNCTION
+  ##============================================================================##
+
+
+  f <- function(x, af,kappa,tc){1-kappa*(log(x/tc)-1) - (af/x)}
+
+  ##============================================================================##
+  ##CALCULATION
+  ##============================================================================##
+
+  ##recalculate the g-value to the given tc ... should be similar
+  ##of tc = tc.g_value
+  ##re-calulation thanks to the help by Sebastien Huot, e-mail: 2016-07-19
+  ##Please note that we take the vector for the g_value here
+  k0 <- g_value / 100 / log(10)
+  k1 <- k0 / (1 - k0 * log(tc[1]/tc.g_value[1]))
+  g_value <-  100 * k1 * log(10)
+
+  ##calculate kappa (equation [5] in Huntley and Lamothe, 2001)
+  kappa <- g_value / log(10) / 100
+
+  ##transform tc in ka years
+  ##duration of the year over a long term taken from http://wikipedia.org
+  tc <- tc[1] / 60 / 60 / 24 / 365.2425  / 1000
+  tc.g_value <- tc.g_value[1] / 60 / 60 / 24 / 365.2425  / 1000
+
+  ##calculate mean value
+  temp <-
+    uniroot(
+      f,
+      c(0.1, 500),
+      tol = 0.001,
+      tc = tc,
+      af = age.faded[1],
+      kappa = kappa[1],
+      check.conv = FALSE
+    )
+
+  ##--------------------------------------------------------------------------##
+  ##Monte Carlo simulation for error estimation
+  tempMC.sd.recent <- NA
+  tempMC.sd.count <- 1:10
+  counter <- 1
+
+  ##show some progression bar of the process
+  if (n.MC == 'auto') {
+    n.MC.i <- 10000
+
+    cat("\n[calc_FadingCorr()] ... trying to find stable error value ...")
+    if (txtProgressBar) {
+      cat("\n -------------------------------------------------------------\n")
+      cat(paste0("   ",paste0("(",0:9,")", collapse = "   "), "\n"))
+    }
+  }else{
+    n.MC.i <- n.MC
+
+  }
+
+
+
+  # Start loop  ---------------------------------------------------------------------------------
+
+  ##set object and preallocate memory
+  tempMC <- vector("numeric", length = 1e+07)
+  tempMC[] <- NA
+  i <- 1
+  j <- n.MC.i
+
+  while(length(unique(tempMC.sd.count))>1 | j > 1e+07){
+
+    ##set previous
+    if(!is.na(tempMC.sd.recent)){
+      tempMC.sd.count[counter] <- tempMC.sd.recent
+
+    }
+
+    ##set seed
+    if (!is.null(seed)) set.seed(seed)
+
+    ##pre-allocate memory
+    g_valueMC <- vector("numeric", length = n.MC.i)
+    age.fadeMC <- vector("numeric", length = n.MC.i)
+    kappaMC <- vector("numeric", length = n.MC.i)
+
+    ##set-values
+    g_valueMC <- rnorm(n.MC.i,mean = g_value[1],sd = g_value[2])
+    age.fadedMC <- rnorm(n.MC.i,mean = age.faded[1],sd = age.faded[2])
+    kappaMC <- g_valueMC / log(10) / 100
+
+
+
+    ##calculate for all values
+    tempMC[i:j] <- suppressWarnings(vapply(X = 1:length(age.fadedMC), FUN = function(x) {
+      temp <- try(uniroot(
+        f,
+        c(0.1,500),
+        tol = 0.001,
+        tc = tc,
+        af = age.fadedMC[[x]],
+        kappa = kappaMC[[x]],
+        check.conv = TRUE,
+        maxiter = 1000,
+        extendInt = "yes"
+      ), silent = TRUE)
+
+      ##otherwise the automatic error value finding
+      ##will never work
+      if(!is(temp,"try-error") && temp$root<1e8) {
+        return(temp$root)
+      } else{
+        return(NA)
+      }
+
+    }, FUN.VALUE = 1))
+
+    i <- j + 1
+    j <- j + n.MC.i
+
+    ##stop here if a fixed value is set
+    if(n.MC != 'auto'){
+      break
+    }
+
+    ##set recent
+    tempMC.sd.recent <- round(sd(tempMC, na.rm = TRUE), digits = 3)
+
+    if (counter %% 10 == 0) {
+      counter <- 1
+
+    }else{
+      counter <- counter + 1
+
+    }
+
+    ##show progress in terminal
+    if (txtProgressBar) {
+      text <- rep("CHECK",10)
+      if (counter %% 2 == 0) {
+        text[1:length(unique(tempMC.sd.count))] <- "-----"
+      }else{
+        text[1:length(unique(tempMC.sd.count))] <- " CAL "
+      }
+
+
+
+      cat(paste("\r ",paste(rev(text), collapse = " ")))
+    }
+
+  }
+
+  ##--------------------------------------------------------------------------##
+
+  ##remove all NA values from tempMC
+  tempMC <- tempMC[!is.na(tempMC)]
+
+  ##obtain corrected age
+  age.corr <- data.frame(
+    AGE = round(temp$root, digits = 4),
+    AGE.ERROR = round(sd(tempMC), digits = 4),
+    AGE_FADED = age.faded[1],
+    AGE_FADED.ERROR = age.faded[2],
+    G_VALUE = g_value[1],
+    G_VALUE.ERROR = g_value[2],
+    KAPPA = kappa[1],
+    KAPPA.ERROR = kappa[2],
+    TC = tc,
+    TC.G_VALUE = tc.g_value,
+    n.MC = n.MC,
+    OBSERVATIONS = length(tempMC),
+    SEED = ifelse(is.null(seed), NA, seed)
+  )
+
+  ##============================================================================##
+  ##OUTPUT VISUAL
+  ##============================================================================##
+  if(verbose) {
+    cat("\n\n[calc_FadingCorr()]\n")
+    cat("\n >> Fading correction according to Huntley & Lamothe (2001)")
+
+    if (tc != tc.g_value) {
+      cat("\n >> g-value re-calculated for the given tc")
+
+    }
+
+    cat(paste(
+      "\n\n .. used g-value:\t",
+      round(g_value[1], digits = 3),
+      " \u00b1 ",
+      round(g_value[2], digits = 3),
+      " %/decade",
+      sep = ""
+    ))
+    cat(paste(
+      "\n .. used tc:\t\t",
+      format(tc, digits = 4, scientific = TRUE),
+      " ka",
+      sep = ""
+    ))
+    cat(paste0(
+      "\n .. used kappa:\t\t",
+      round(kappa[1], digits = 4),
+      " \u00b1 ",
+      round(kappa[2], digits = 4)
+    ))
+    cat("\n ----------------------------------------------")
+    cat(paste0("\n seed: \t\t\t", ifelse(is.null(seed), NA, seed)))
+    cat(paste0("\n n.MC: \t\t\t", n.MC))
+    cat(paste0(
+      "\n observations: \t\t",
+      format(length(tempMC), digits = 2, scientific = TRUE),
+      sep = ""
+    ))
+    cat("\n ----------------------------------------------")
+    cat(paste0(
+      "\n Age (faded):\t\t",
+      round(age.faded[1], digits = 4),
+      " ka \u00b1 ",
+      round(age.faded[2], digits = 4),
+      " ka"
+    ))
+    cat(paste0(
+      "\n Age (corr.):\t\t",
+      round(age.corr[1], digits = 4),
+      " ka \u00b1 ",
+      round(age.corr[2], digits = 4),
+      " ka"
+    ))
+    cat("\n ---------------------------------------------- \n")
+
+  }
+
+  ##============================================================================##
+  ##OUTPUT RLUM
+  ##============================================================================##
+  return(set_RLum(
+    class = "RLum.Results",
+    data = list(age.corr = age.corr,
+                age.corr.MC = tempMC),
+    info = list(call = sys.call())
+  ))
+
+}
diff --git a/R/calc_FastRatio.R b/R/calc_FastRatio.R
new file mode 100644
index 0000000..4445c9f
--- /dev/null
+++ b/R/calc_FastRatio.R
@@ -0,0 +1,363 @@
+#' Calculate the Fast Ratio for CW-OSL curves
+#' 
+#' Function to calculate the fast ratio of quartz CW-OSL single grain or single 
+#' aliquot curves after Durcan & Duller (2011). 
+#' 
+#' This function follows the equations of Durcan & Duller (2011). The energy
+#' required to reduce the fast and medium quartz OSL components to \code{x} and
+#' \code{x2} \% respectively using eq. 3 to determine channels L2 and L3 (start 
+#' and end). The fast ratio is then calculated from: \eqn{(L1-L3)/(L2-L3)}. 
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}}, 
+#' \code{\linkS4class{RLum.Data.Curve}} or \code{\link{data.frame}} 
+#' (\bold{required}): x, y data of measured values (time and counts).
+#' 
+#' @param stimulation.power \code{\link{numeric}} (with default): Stimulation power in mW/cm^2
+#' 
+#' @param wavelength \code{\link{numeric}} (with default): Stimulation wavelength in nm
+#' 
+#' @param sigmaF \code{\link{numeric}} (with default): Photoionisation cross-section (cm^2) of the
+#' fast component. Default value after Durcan & Duller (2011).
+#' 
+#' @param sigmaM \code{\link{numeric}} (with default): Photoionisation cross-section (cm^2) of the
+#' medium component. Default value after Durcan & Duller (2011).
+#' 
+#' @param Ch_L1 \code{\link{numeric}} (with default): An integer specifying the channel for L1.
+#' 
+#' @param x \code{\link{numeric}} (with default): \% of signal remaining from the fast component.
+#' Used to define the location of L2 and L3 (start).
+#' 
+#' @param x2 \code{\link{numeric}} (with default): \% of signal remaining from the medium component.
+#' Used to define the location of L3 (end). 
+#' 
+#' @param dead.channels \code{\link{numeric}} (with default): Vector of length 2 in the form of
+#' \code{c(x, y)}. Channels that do not contain OSL data, i.e. at the start or end of
+#' measurement.
+#' 
+#' @param fitCW.sigma \code{\link{logical}} (optional): fit CW-OSL curve using \code{\link{fit_CWCurve}}
+#' to calculate \code{sigmaF} and \code{sigmaM} (experimental).
+#' 
+#' @param fitCW.curve \code{\link{logical}} (optional): fit CW-OSL curve using \code{\link{fit_CWCurve}}
+#' and derive the counts of L2 and L3 from the fitted OSL curve (experimental).
+#' 
+#' @param plot \code{\link{logical}} (with default): plot output (\code{TRUE}/\code{FALSE})
+#' 
+#' @param ... available options: \code{verbose} (\code{\link{logical}}). Further
+#' arguments passed to \code{\link{fit_CWCurve}}.
+#'
+#' @return Returns a plot (optional) and an S4 object of type \code{\linkS4class{RLum.Results}}. 
+#' The slot \code{data} contains a \code{\link{list}} with the following elements:\cr
+#'
+#' \item{summary}{\code{\link{data.frame}} summary of all relevant results}
+#' \item{data}{the original input data}
+#' \item{fit}{\code{\linkS4class{RLum.Results}} object if either \code{fitCW.sigma} or \code{fitCW.curve} is \code{TRUE}}
+#' \item{args}{\code{\link{list}} of used arguments}
+#' \item{call}{\code{\link{call}} the function call}
+#' 
+#' @section Function version: 0.1.0
+#'
+#' @author 
+#' Georgina King, University of Cologne (Germany) \cr
+#' Julie A. Durcan, University of Oxford (United Kingdom) \cr
+#' Christoph Burow, University of Cologne (Germany) \cr
+#'
+#' @references 
+#' Durcan, J.A. & Duller, G.A.T., 2011. The fast ratio: A rapid measure for testing
+#' the dominance of the fast component in the initial OSL signal from quartz.
+#' Radiation Measurements 46, 1065-1072. \cr\cr
+#' 
+#' Madsen, A.T., Duller, G.A.T., Donnelly, J.P., Roberts, H.M. & Wintle, A.G., 2009.
+#' A chronology of hurricane landfalls at Little Sippewissett Marsh, Massachusetts, USA,
+#' using optical dating. Geomorphology 109, 36-45. \cr\cr
+#'
+#' \bold{Further reading} \cr\cr
+#' 
+#' Steffen, D., Preusser, F. & Schlunegger, 2009. OSL quartz age underestimation 
+#' due to unstable signal components. Quaternary Geochronology 4, 353-362.
+#' 
+#'
+#' @seealso \code{\link{fit_CWCurve}}, \code{\link{get_RLum}}, \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}, \code{\linkS4class{RLum.Data.Curve}}
+#' 
+#' @examples
+#' # load example CW-OSL curve
+#' data("ExampleData.CW_OSL_Curve")
+#' 
+#' # calculate the fast ratio w/o further adjustments
+#' res <- calc_FastRatio(ExampleData.CW_OSL_Curve)
+#' 
+#' # show the summary table
+#' get_RLum(res)
+#'
+#' @export
+calc_FastRatio <- function(object, 
+                           stimulation.power = 30.6, 
+                           wavelength = 470,
+                           sigmaF = 2.6E-17,
+                           sigmaM = 4.28E-18,
+                           Ch_L1 = 1,
+                           x = 1,
+                           x2 = 0.1,
+                           dead.channels = c(0,0),
+                           fitCW.sigma = FALSE,
+                           fitCW.curve = FALSE,
+                           plot = TRUE,
+                           ...) {
+  
+  ## Input object handling -----------------------------------------------------
+  if (inherits(object, "RLum.Analysis"))
+    object <- get_RLum(object)
+  
+  if (inherits(object, "RLum.Results"))
+    object <- get_RLum(object, "data")
+  
+  if (!inherits(object, "list"))
+    object <-list(object)
+  
+  ## Settings ------------------------------------------------------------------
+  settings <- list(verbose = TRUE,
+                   n.components.max = 3,
+                   fit.method = "LM",
+                   output.terminal = FALSE,
+                   info = list(),
+                   fit = NULL)
+  
+  # override defaults with args in ...
+  settings <- modifyList(settings, list(...))
+  
+  
+  ## Calculations --------------------------------------------------------------
+  # iterate over all user provided objects and calculate the FR
+  fast.ratios <- lapply(object, function(obj) {
+    
+    if (inherits(obj, "RLum.Data.Curve"))
+      A <- get_RLum(obj)
+    else
+      A <- obj
+    
+    ## Energy calculation
+    # P = user defined stimulation power in mW
+    # lambdaLED = wavelength of stimulation source in nm
+    P <- stimulation.power 
+    lamdaLED <- wavelength
+    
+    ## Constants
+    # h = speed of light, h = Planck's constant
+    h <- 6.62607004E-34
+    c <- 299792458
+    
+    I0 <- (P / 1000) / (h * c / (lamdaLED * 10^-9))
+    Ch_width <- max(A[ ,1]) / length(A[ ,1])
+    
+    # remove dead channels
+    A <- as.data.frame(A[(dead.channels[1] + 1):(nrow(A)-dead.channels[2]), ])
+    A[ ,1] <- A[ ,1] - A[1,1]
+    
+    # estimate the photo-ionisation crossections of the fast and medium
+    # component using the fit_CWCurve function
+    if (fitCW.sigma | fitCW.curve) {
+      fitCW.res <- try(fit_CWCurve(A, n.components.max = settings$n.components.max, 
+                                   fit.method = settings$fit.method, 
+                                   LED.power = stimulation.power, 
+                                   LED.wavelength = wavelength, 
+                                   output.terminal = settings$output.terminal, 
+                                   plot = plot))
+      settings$fit <- fitCW.res
+      
+      if (fitCW.sigma) {
+        if (!inherits(fitCW.res, "try-error")) {
+          sigmaF <- get_RLum(fitCW.res, "output.table")$cs1
+          sigmaM <- get_RLum(fitCW.res, "output.table")$cs2
+          if (settings$verbose) {
+            message("\n [calc_FitCWCurve()]\n")
+            message("New value for sigmaF: ", format(sigmaF, digits = 3, nsmall = 2))
+            message("New value for sigmaM: ", format(sigmaM, digits = 3, nsmall = 2))
+          }
+        } else {
+          if (settings$verbose)
+            message("Fitting failed! Please call 'fit_CWCurve()' manually before ",
+                    "calculating the fast ratio.")
+        }
+      }
+      
+      if (fitCW.curve) {
+        if (!inherits(fitCW.res, "try-error")) {
+          nls <- get_RLum(fitCW.res, "fit")
+          A[ ,2] <- predict(nls)
+        }
+      }
+
+    }
+    
+    
+    ## The equivalent time in s of L1, L2, L3
+    # Use these values to look up the channel
+    t_L1 <- 0
+    t_L2 <- (log(x / 100)) / (-sigmaF * I0)
+    t_L3_start <- (log(x / 100)) / (-sigmaM * I0)
+    t_L3_end <- (log(x2 / 100)) / (-sigmaM * I0)
+    
+    ## Channel number(s) of L2 and L3
+    Ch_L2 <- which.min(abs(A[,1] - t_L2))
+    
+    if (Ch_L2 <= 1) {
+      msg <- sprintf("Calculated time/channel for L2 is too small (%.f, %.f). Returned NULL.", 
+                     t_L2, Ch_L2)
+      settings$info <- modifyList(settings$info, list(L2 = msg))
+      warning(msg, call. = FALSE)
+      return(NULL)
+    }
+    
+    Ch_L3st<- which.min(abs(A[,1] - t_L3_start))
+    Ch_L3end <- which.min(abs(A[,1] - t_L3_end))
+    
+    ## Counts in channels L1, L2, L3
+    # L1 ----
+    Cts_L1 <- A[Ch_L1, 2]
+    
+    # L2 ----
+    if (Ch_L2 > nrow(A)) {
+      msg <- sprintf(paste("The calculated channel for L2 (%i) is equal", 
+                           "to or larger than the number of available channels (%i).",
+                           "Returned NULL."), Ch_L2, nrow(A))
+      settings$info <- modifyList(settings$info, list(L2 = msg))
+      warning(msg, call. = FALSE)
+      return(NULL)
+    } 
+  
+    Cts_L2 <- A[Ch_L2, 2]
+    
+    # optional: predict the counts from the fitted curve
+    if (fitCW.curve) {
+      if (!inherits(fitCW.res, "try-error")) {
+        nls <- get_RLum(fitCW.res, "fit")
+        Cts_L2 <- predict(nls, list(x = t_L2))
+      }
+    }
+
+    
+    # L3 ----
+    if (Ch_L3st >= nrow(A) | Ch_L3end > nrow(A)) {
+      msg <- sprintf(paste("The calculated channels for L3 (%i, %i) are equal to or", 
+                           "larger than the number of available channels (%i).",
+                           "\nThe background has instead been estimated from the last",
+                           "5 channels."), Ch_L3st, Ch_L3end, nrow(A))
+      settings$info <- modifyList(settings$info, list(L3 = msg))
+      warning(msg, call. = FALSE)
+      Ch_L3st <- nrow(A) - 5
+      Ch_L3end <- nrow(A)
+      t_L3_start <- A[Ch_L3st,1]
+      t_L3_end <- A[Ch_L3end,1]
+    }
+    
+    Cts_L3 <- mean(A[Ch_L3st:Ch_L3end, 2])
+    
+    # optional: predict the counts from the fitted curve
+    if (fitCW.curve) {
+      if (!inherits(fitCW.res, "try-error")) {
+        nls <- get_RLum(fitCW.res, "fit")
+        Cts_L3 <- mean(predict(nls, list(x = c(t_L3_start, t_L3_end))))
+      }
+    }
+    
+    # Warn if counts are not in decreasing order
+    if (Cts_L3 >= Cts_L2)
+      warning(sprintf("L3 contains more counts (%.f) than L2 (%.f).",
+                      Cts_L3, Cts_L2), call. = FALSE)
+    
+    ## Fast Ratio
+    FR <- (Cts_L1 - Cts_L3) / (Cts_L2 - Cts_L3)
+    if (length(FR) != 1)
+      FR <- NA
+    
+    ## Fast Ratio - Error calculation
+    if (!is.na(FR)) {
+      
+      # number of channels the background was derived from
+      nBG <- abs(Ch_L3end - Ch_L3st)
+      
+      # relative standard errors
+      rse_L1 <- sqrt(Cts_L1 + Cts_L3 / nBG) / (Cts_L1 - Cts_L3)
+      rse_L2 <- sqrt(Cts_L2 + Cts_L3 / nBG) / (Cts_L2 - Cts_L3)
+      
+      # absolute standard errors
+      se_L1 <- rse_L1 * (Cts_L1 - Cts_L3)
+      se_L2 <- rse_L2 * (Cts_L2 - Cts_L3)
+      
+      # absolute standard error on fast ratio
+      FR_se <- (sqrt((se_L1 / (Cts_L1 - Cts_L3))^2 + ((se_L2 / (Cts_L2 - Cts_L3))^2) )) * FR
+      FR_rse <- FR_se / FR * 100
+      
+    } else {
+      FR_se <- NA
+      FR_rse <- NA
+    }
+    
+    ## Return values -----------------------------------------------------------
+    summary <- data.frame(fast.ratio = FR,
+                          fast.ratio.se = FR_se,
+                          fast.ratio.rse = FR_rse,
+                          channels = nrow(A),
+                          channel.width = Ch_width,
+                          dead.channels.start = as.integer(dead.channels[1]),
+                          dead.channels.end = as.integer(dead.channels[2]),
+                          sigmaF = sigmaF,
+                          sigmaM = sigmaM,
+                          I0 = I0,
+                          stimulation.power = stimulation.power,
+                          wavelength = wavelength,
+                          t_L1 = t_L1,
+                          t_L2 = t_L2,
+                          t_L3_start = t_L3_start,
+                          t_L3_end = t_L3_end,
+                          Ch_L1 = as.integer(Ch_L1),
+                          Ch_L2 = as.integer(Ch_L2),
+                          Ch_L3_start = as.integer(Ch_L3st),
+                          Ch_L3_end = as.integer(Ch_L3end),
+                          Cts_L1 = Cts_L1,
+                          Cts_L2 = Cts_L2,
+                          Cts_L3 = Cts_L3)
+    
+    fast.ratio <- set_RLum(class = "RLum.Results",
+                           originator = "calc_FastRatio",
+                           data = list(summary = summary,
+                                       data = obj,
+                                       fit = settings$fit,
+                                       args = as.list(sys.call(-2L)[-1]),
+                                       call = sys.call(-2L)),
+                           info = settings$info
+    )
+    
+    ## Console Output ----------------------------------------------------------
+    if (settings$verbose) {
+      
+      table.names <- c(
+        "Fast Ratio\t", " \U02EA Absolute error", " \U02EA Relative error (%)", "Channels\t", 
+        "Channel width (s)", "Dead channels start", "Dead channels end",
+        "Sigma Fast\t", "Sigma Medium\t", "I0\t\t", "Stim. power (mW/cm^2)", "Wavelength (nm)",
+        "-\n Time L1 (s)\t", "Time L2 (s)\t", "Time L3 start (s)", "Time L3 end (s)",
+        "-\n Channel L1\t", "Channel L2\t", "Channel L3 start", "Channel L3 end\t",
+        "-\n Counts L1\t", "Counts L2\t", "Counts L3\t")
+      
+      cat("\n[calc_FastRatio()]\n")
+      cat("\n -------------------------------")
+      for (i in 1:ncol(summary)) {
+        cat(paste0("\n ", table.names[i],"\t: ",
+                   format(summary[1, i], digits = 2, nsmall = 2)))
+      }
+      cat("\n -------------------------------\n\n")
+      
+    }
+    ## Plotting ----------------------------------------------------------------
+    if (plot) 
+      try(plot_RLum.Results(fast.ratio, ...))
+
+    # return
+    return(fast.ratio)
+  }) # End of lapply
+  
+  if (length(fast.ratios) == 1)
+    fast.ratios <- fast.ratios[[1]]
+  
+  invisible(fast.ratios)
+}
diff --git a/R/calc_FiniteMixture.R b/R/calc_FiniteMixture.R
new file mode 100644
index 0000000..17c4c26
--- /dev/null
+++ b/R/calc_FiniteMixture.R
@@ -0,0 +1,570 @@
+#' Apply the finite mixture model (FMM) after Galbraith (2005) to a given De
+#' distribution
+#'
+#' This function fits a k-component mixture to a De distribution with differing
+#' known standard errors. Parameters (doses and mixing proportions) are
+#' estimated by maximum likelihood assuming that the log dose estimates are
+#' from a mixture of normal distributions.
+#'
+#' This model uses the maximum likelihood and Bayesian Information Criterion
+#' (BIC) approaches. \cr\cr Indications of overfitting are: \cr\cr - increasing
+#' BIC \cr - repeated dose estimates \cr - covariance matrix not positive
+#' definite \cr - covariance matrix produces NaNs\cr - convergence problems
+#' \cr\cr \bold{Plot} \cr\cr If a vector (\code{c(k.min:k.max)}) is provided
+#' for \code{n.components} a plot is generated showing the the k components
+#' equivalent doses as normal distributions. By default \code{pdf.weight} is
+#' set to \code{FALSE}, so that the area under each normal distribution is
+#' always 1. If \code{TRUE}, the probability density functions are weighted by
+#' the components proportion for each iteration of k components, so the sum of
+#' areas of each component equals 1. While the density values are on the same
+#' scale when no weights are used, the y-axis are individually scaled if the
+#' probability density are weighted by the components proportion. \cr The
+#' standard deviation (sigma) of the normal distributions is by default
+#' determined by a common \code{sigmab} (see \code{pdf.sigma}). For
+#' \code{pdf.sigma = "se"} the standard error of each component is taken
+#' instead.\cr The stacked barplot shows the proportion of each component (in
+#' per cent) calculated by the FFM. The last plot shows the achieved BIC scores
+#' and maximum log-likelihood estimates for each iteration of k.
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#' @param sigmab \code{\link{numeric}} (\bold{required}): spread in De values
+#' given as a fraction (e.g. 0.2). This value represents the expected
+#' overdispersion in the data should the sample be well-bleached (Cunningham &
+#' Wallinga 2012, p. 100).
+#' @param n.components \code{\link{numeric}} (\bold{required}): number of
+#' components to be fitted. If a vector is provided (e.g. \code{c(2:8)}) the
+#' finite mixtures for 2, 3 ... 8 components are calculated and a plot and a
+#' statistical evaluation of the model performance (BIC score and maximum
+#' log-likelihood) is provided.
+#' @param grain.probability \code{\link{logical}} (with default): prints the
+#' estimated probabilities of which component each grain is in
+#' @param dose.scale \code{\link{numeric}}: manually set the scaling of the
+#' y-axis of the first plot with a vector in the form of \code{c(min,max)}
+#' @param pdf.weight \code{\link{logical}} (with default): weight the
+#' probability density functions by the components proportion (applies only
+#' when a vector is provided for \code{n.components})
+#' @param pdf.sigma \code{\link{character}} (with default): if \code{"sigmab"}
+#' the components normal distributions are plotted with a common standard
+#' deviation (i.e. \code{sigmab}) as assumed by the FFM. Alternatively,
+#' \code{"se"} takes the standard error of each component for the sigma
+#' parameter of the normal distribution
+#' @param pdf.colors \code{\link{character}} (with default): color coding of
+#' the components in the the plot. Possible options are "gray", "colors" and
+#' "none"
+#' @param pdf.scale \code{\link{numeric}}: manually set the max density value
+#' for proper scaling of the x-axis of the first plot
+#' @param plot.proportions \code{\link{logical}} (with default): plot barplot
+#' showing the proportions of components
+#' @param plot \code{\link{logical}} (with default): plot output
+#' @param \dots further arguments to pass.  See details for their usage.
+#' @return Returns a plot (optional) and terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following elements:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant model results.}
+#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+#' used arguments} \item{call}{\link{call} the function call} \item{mle}{
+#' covariance matrices of the log likelhoods} \item{BIC}{ BIC score}
+#' \item{llik}{ maximum log likelihood} \item{grain.probability}{ probabilities
+#' of a grain belonging to a component} \item{components}{\link{matrix}
+#' estimates of the de, de error and proportion for each component}
+#' \item{single.comp}{\link{data.frame} single componente FFM estimate}
+#'
+#' If a vector for \code{n.components} is provided (e.g.  \code{c(2:8)}),
+#' \code{mle} and \code{grain.probability} are lists containing matrices of the
+#' results for each iteration of the model.
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @section Function version: 0.4
+#' @author Christoph Burow, University of Cologne (Germany) \cr Based on a
+#' rewritten S script of Rex Galbraith, 2006. \cr
+#' @seealso \code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
+#' \code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
+#' @references Galbraith, R.F. & Green, P.F., 1990. Estimating the component
+#' ages in a finite mixture. Nuclear Tracks and Radiation Measurements 17,
+#' 197-206. \cr\cr Galbraith, R.F. & Laslett, G.M., 1993. Statistical models
+#' for mixed fission track ages. Nuclear Tracks Radiation Measurements 4,
+#' 459-470.\cr\cr Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of
+#' equivalent dose and error calculation and display in OSL dating: An overview
+#' and some recommendations. Quaternary Geochronology 11, 1-27.\cr\cr Roberts,
+#' R.G., Galbraith, R.F., Yoshida, H., Laslett, G.M. & Olley, J.M., 2000.
+#' Distinguishing dose populations in sediment mixtures: a test of single-grain
+#' optical dating procedures using mixtures of laboratory-dosed quartz.
+#' Radiation Measurements 32, 459-465.\cr\cr Galbraith, R.F., 2005. Statistics
+#' for Fission Track Analysis, Chapman & Hall/CRC, Boca Raton.\cr\cr
+#' \bold{Further reading}\cr\cr Arnold, L.J. & Roberts, R.G., 2009. Stochastic
+#' modelling of multi-grain equivalent dose (De) distributions: Implications
+#' for OSL dating of sediment mixtures. Quaternary Geochronology 4,
+#' 204-230.\cr\cr Cunningham, A.C. & Wallinga, J., 2012. Realizing the
+#' potential of fluvial archives using robust OSL chronologies. Quaternary
+#' Geochronology 12, 98-106.\cr\cr Rodnight, H., Duller, G.A.T., Wintle, A.G. &
+#' Tooth, S., 2006. Assessing the reproducibility and accuracy of optical
+#' dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.\cr\cr
+#' Rodnight, H. 2008. How many equivalent dose values are needed to obtain a
+#' reproducible distribution?. Ancient TL 26, 3-10.
+#' @examples
+#'
+#' ## load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## (1) apply the finite mixture model
+#' ## NOTE: the data set is not suitable for the finite mixture model,
+#' ## which is why a very small sigmab is necessary
+#' calc_FiniteMixture(ExampleData.DeValues$CA1,
+#'                    sigmab = 0.2, n.components = 2,
+#'                    grain.probability = TRUE)
+#'
+#' ## (2) repeat the finite mixture model for 2, 3 and 4 maximum number of fitted
+#' ## components and save results
+#' ## NOTE: The following example is computationally intensive. Please un-comment
+#' ## the following lines to make the example work.
+#' FMM<- calc_FiniteMixture(ExampleData.DeValues$CA1,
+#'                          sigmab = 0.2, n.components = c(2:4),
+#'                          pdf.weight = TRUE, dose.scale = c(0, 100))
+#'
+#' ## show structure of the results
+#' FMM
+#'
+#' ## show the results on equivalent dose, standard error and proportion of
+#' ## fitted components
+#' get_RLum(object = FMM, data.object = "components")
+#'
+#' @export
+calc_FiniteMixture <- function(
+  data,
+  sigmab,
+  n.components,
+  grain.probability = FALSE,
+  dose.scale,
+  pdf.weight = TRUE,
+  pdf.sigma = "sigmab",
+  pdf.colors = "gray",
+  pdf.scale,
+  plot.proportions = TRUE,
+  plot=TRUE,
+  ...
+){
+
+  ##============================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ##============================================================================##
+
+  if(missing(data)==FALSE){
+
+    if(is(data, "data.frame") == FALSE & is(data,"RLum.Results") == FALSE){
+      stop("[calc_FiniteMixture] Error: 'data' object has to be of type
+           'data.frame' or 'RLum.Results'!")
+    } else {
+      if(is(data, "RLum.Results") == TRUE){
+        data <- get_RLum(data, "data")
+      }
+    }
+  }
+  try(colnames(data)<- c("ED","ED_Error"),silent=TRUE)
+  if(colnames(data[1])!="ED"||colnames(data[2])!="ED_Error") {
+    cat(paste("Columns must be named 'ED' and 'ED_Error'"), fill = FALSE)
+    stop(domain=NA)
+  }
+  if(sigmab <0 | sigmab >1) {
+    cat(paste("sigmab needs to be given as a fraction between",
+              "0 and 1 (e.g. 0.2)"), fill = FALSE)
+    stop(domain=NA)
+  }
+  if(any(n.components<2) == TRUE) {
+    cat(paste("Atleast two components need to be fitted"), fill = FALSE)
+    stop(domain=NA)
+  }
+  if(pdf.sigma!="se" ) {
+    if(pdf.sigma!="sigmab") {
+      cat(paste("Only 'se' or 'sigmab' allowed for the pdf.sigma argument"),
+          fill = FALSE)
+      stop(domain=NA)
+    }
+  }
+
+  ##============================================================================##
+  ## ... ARGUMENTS
+  ##============================================================================##
+
+  extraArgs <- list(...)
+
+  ## console output
+  if("verbose" %in% names(extraArgs)) {
+    verbose<- extraArgs$verbose
+  } else {
+    verbose<- TRUE
+  }
+  # trace calculations
+  if("trace" %in% names(extraArgs)) {
+    trace<- extraArgs$trace
+  } else {
+    trace<- FALSE
+  }
+  # plot title
+  if("main" %in% names(extraArgs)) {
+    main<- extraArgs$main
+  } else {
+    main<- "Finite Mixture Model"
+  }
+
+  ##============================================================================##
+  ## CALCULATIONS
+  ##============================================================================##
+
+  ## create storage variables if more than one k is provided
+  if(length(n.components)>1) {
+
+    # counter needed for various purposes
+    cnt<- 1
+
+    # create summary matrix containing DE, standard error (se) and proportion
+    # for each component
+    comp.n<- matrix(data = NA, ncol = length(n.components),
+                    nrow = n.components[length(n.components)] * 3,
+                    byrow = TRUE)
+
+    # create empty vector as storage for BIC and LLIK scores
+    BIC.n<- vector(mode = "double")
+    LLIK.n<- vector(mode = "double")
+
+    # create empty vectors of type "lists" as storage for mle matrices and
+    # grain probabilities
+    vmat.n<- vector(mode = "list", length = length(n.components))
+    grain.probability.n<- vector(mode = "list", length = length(n.components))
+
+  }
+
+  ## start actual calculation (loop) for each provided maximum components to
+  ## be fitted.
+  for(i in 1:length(n.components)) {
+
+    k<- n.components[i]
+
+    # calculate yu = log(ED),  su = se(logED),  n = number of grains
+    yu<- log(data$ED)
+    su<- data$ED_Error/data$ED
+    n<- length(yu)
+
+    # compute starting values
+    fui<- matrix(0,n,k)
+    pui<- matrix(0,n,k)
+    nui<- matrix(0,n,k)
+    pii<- rep(1/k,k)
+    mu<- min(yu) + (max(yu)-min(yu))*(1:k)/(k+1)
+
+    # remove the # in the line below to get alternative starting values
+    # (useful to check that the algorithm converges to the same values)
+    #	mu<- quantile(yu,(1:k)/(k+1))
+
+    # compute maximum log likelihood estimates
+    nit<- 499L
+    wu<- 1/(sigmab^2 + su^2)
+    rwu<- sqrt(wu)
+
+    for(j in 1:nit){
+      for(i in 1:k)
+      {
+        fui[,i]<-  rwu*exp(-0.5*wu*(yu-mu[i])^2)
+        nui[,i]<-  pii[i]*fui[,i]
+      }
+      pui<- nui/apply(nui,1,sum)
+      mu<- apply(wu*yu*pui,2,sum)/apply(wu*pui,2,sum)
+      pii<- apply(pui,2,mean)
+    }
+
+    # calculate the log likelihood and BIC
+    llik<- sum( log( (1/sqrt(2*pi))*apply(nui,1,sum) ))
+    bic<- -2*llik + (2*k - 1)*log(n)
+
+    # calculate the covariance matrix and standard errors of the estimates
+    # i.e., the dose estimtes in Gy and relative standard errors, and
+    # the mixing proportions and standard errors.
+    aui<- matrix(0,n,k)
+    bui<- matrix(0,n,k)
+    for(i in 1:k)
+    {
+      aui[,i]<- wu*(yu-mu[i])
+      bui[,i]<- -wu + (wu*(yu-mu[i]))^2
+    }
+    delta<- diag(rep(1,k))
+
+    Au<- matrix(0,k-1,k-1)
+    Bu<- matrix(0,k-1,k)
+    Cu<- matrix(0,k,k)
+
+    for(i in 1:(k-1)){ for(j in 1:(k-1)){
+      Au[i,j]<- sum( (pui[,i]/pii[i] - pui[,k]/pii[k])*(pui[,j]/pii[j] -
+                                                          pui[,k]/pii[k]) )}}
+
+    for(i in 1:(k-1)){ for(j in 1:k){
+      Bu[i,j]<- sum( pui[,j]*aui[,j]*(pui[,i]/pii[i] - pui[,k]/pii[k] -
+                                        delta[i,j]/pii[i] + delta[k,j]/pii[k] ) )}}
+
+    for(i in 1:k){ for(j in 1:k){
+      Cu[i,j]<- sum( pui[,i]*pui[,j]*aui[,i]*aui[,j] - delta[i,j]*bui[,i]*
+                       pui[,i] ) }}
+
+    invvmat<- rbind(cbind(Au,Bu),cbind(t(Bu),Cu))
+    vmat<- solve(invvmat, tol=.Machine$double.xmin)
+    rek<- sqrt(sum(vmat[1:(k-1),1:(k-1)]))
+
+
+    # calculate DE, relative standard error, standard error
+    dose<- exp(mu)
+    re<- sqrt(diag(vmat))[-c(1:(k-1))]
+    sed<- dose*re
+    estd<- rbind(dose,re,sed)
+
+    # rename proportion
+    prop<- pii
+
+    # this calculates the proportional standard error of the proportion of grains
+    # in the fitted components. However, the calculation is most likely erroneous.
+    # sep<-  c(sqrt(diag(vmat))[c(1:(k-1))],rek)
+
+    # rename proportion
+    estp<- prop
+
+    # merge results to a data frame
+    blk<- rep("    ",k)
+    comp<- rbind(blk,round(estd,4),blk,round(estp,4))
+    comp<- data.frame(comp,row.names=c("","dose (Gy)    ","rse(dose)    ",
+                                       "se(dose)(Gy)"," ","proportion   "))
+
+    # label results data frame
+    cp<- rep("comp",k)
+    cn<- c(1:k)
+    names(comp)<- paste(cp,cn,sep="")
+
+    # calculate the log likelihood and BIC for a single component -- can
+    # be useful to see if there is evidence of more than one component
+    mu0<- sum(wu*yu)/sum(wu)
+    fu0<-  rwu*exp(-0.5*wu*(yu-mu0)^2)
+    L0<- sum( log((1/sqrt(2*pi))*fu0 ) )
+    bic0<- -2*L0 + log(n)
+    comp0<- round(c(exp(mu0),sigmab,L0,bic0),4)
+
+
+    ## save results for k components in storage variables
+    if(length(n.components)>1) {
+
+      # vector of indices needed for finding the dose rows of the summary
+      # matrix - position 1,4,7...n
+      pos.n<- seq(from = 1, to = n.components[cnt]*3, by = 3)
+
+      # save results of each iteration to summary matrix
+      for(i in 1:n.components[cnt]) {
+        comp.n[pos.n[i], cnt]<- round(dose[i], 2) #De
+        comp.n[pos.n[i]+1, cnt]<- round(sed[i], 2) #SE
+        comp.n[pos.n[i]+2, cnt]<- round(estp[i], 2) #Proportion
+      }
+
+      # save BIC and llik of each iteration to corresponding vector
+      BIC.n[cnt]<- bic
+      LLIK.n[cnt]<- llik
+
+      # merge BIC and llik scores to a single data frame
+      results.n<- rbind(BIC = round(BIC.n, 3),
+                        llik = round(LLIK.n, 3))
+
+      # save mle matrix and grain probabilities to corresponding vector
+      vmat.n[[cnt]]<- vmat
+      grain.probability.n[[cnt]]<- as.data.frame(pui)
+
+      # increase counter by one for next iteration
+      cnt<- cnt+1
+    }#EndOf::save intermediate results
+  }##EndOf::calculation loop
+
+  ##============================================================================##
+  ## STATISTICAL CHECK
+  ##============================================================================##
+
+  if(length(n.components)>1) {
+
+    ## Evaluate maximum log likelihood estimates
+    LLIK.significant<- vector(mode = "logical")
+
+    # check if llik is at least three times greater when adding a further
+    # component
+    for(i in 1:c(length(LLIK.n)-1)) {
+      LLIK.significant[i]<- (LLIK.n[i+1]/LLIK.n[i])>3
+    }
+
+    ## Find lowest BIC score
+    BIC.lowest<- n.components[which.min(BIC.n)]
+  }
+
+  ##============================================================================##
+  ## OUTPUT
+  ##============================================================================##
+
+  if(verbose==TRUE) {
+
+    ## HEADER (always printed)
+    cat("\n [calc_FiniteMixture]")
+
+    ##----------------------------------------------------------------------------
+    ## OUTPUT WHEN ONLY ONE VALUE FOR n.components IS PROVIDED
+
+    if(length(n.components) == 1) {
+
+      # covariance matrix
+      cat(paste("\n\n--- covariance matrix of mle's ---\n\n"))
+      print(round(vmat,6))
+
+      # general information on sample and model performance
+      cat(paste("\n----------- meta data ------------"))
+      cat(paste("\n n:                    ",n))
+      cat(paste("\n sigmab:               ",sigmab))
+      cat(paste("\n number of components: ",k))
+      cat(paste("\n llik:                 ",round(llik,4)))
+      cat(paste("\n BIC:                   ",round(bic,3)))
+
+      # fitted components
+      cat(paste("\n\n----------- components -----------\n\n"))
+      print(comp)
+
+
+      # print (to 2 decimal places) the estimated probabilities of which component
+      # each grain is in -- sometimes useful for diagnostic purposes
+      if(grain.probability==TRUE) {
+        cat(paste("\n-------- grain probability -------\n\n"))
+        print(round(pui,2))
+      }
+
+      # output for single component
+      cat(paste("\n-------- single component --------"))
+      cat(paste("\n mu:                    ", comp0[1]))
+      cat(paste("\n sigmab:                ", comp0[2]))
+      cat(paste("\n llik:                  ", comp0[3]))
+      cat(paste("\n BIC:                   ", comp0[4]))
+      cat(paste("\n----------------------------------\n\n"))
+
+    }#EndOf::Output for length(n.components) == 1
+
+    ##----------------------------------------------------------------------------
+    ## OUTPUT WHEN ONLY >1 VALUE FOR n.components IS PROVIDED
+    if(length(n.components) > 1) {
+
+      ## final labeling of component and BIC/llik matrices
+      # create labels
+      dose.lab<- paste("c", 1:n.components[length(n.components)],"_dose", sep="")
+      se.lab<- paste("c", 1:n.components[length(n.components)],"_se", sep="")
+      prop.lab<- paste("c", 1:n.components[length(n.components)],"_prop", sep="")
+
+      # empty vector which stores the labeles in correct order (dose, se, prop)
+      n.lab<- vector(mode = "expression",
+                     n.components[length(n.components)]*3)
+
+      # loop to store the labels in correct order (dose, se, prop)
+      cnt<- 1
+      for(i in pos.n) {
+        n.lab[i]<- dose.lab[cnt]
+        n.lab[i+1]<- se.lab[cnt]
+        n.lab[i+2]<- prop.lab[cnt]
+        cnt<- cnt+1
+      }
+
+      # label columns and rows of summary matrix and BIC/LLIK data frame
+      colnames(comp.n)<- n.components[1]:n.components[length(n.components)]
+      rownames(comp.n)<- n.lab
+      colnames(results.n)<- n.components[1]:n.components[length(n.components)]
+
+      ## CONSOLE OUTPUT
+      # general information on sample and model performance
+      cat(paste("\n\n----------- meta data ------------"))
+      cat(paste("\n n:                    ",n))
+      cat(paste("\n sigmab:               ",sigmab))
+      cat(paste("\n number of components:  ",n.components[1],"-",
+                n.components[length(n.components)], sep=""))
+
+      # output for single component
+      cat(paste("\n\n-------- single component --------"))
+      cat(paste("\n mu:                    ", comp0[1]))
+      cat(paste("\n sigmab:                ", comp0[2]))
+      cat(paste("\n llik:                  ", comp0[3]))
+      cat(paste("\n BIC:                   ", comp0[4]))
+
+      # print component matrix
+      cat(paste("\n\n----------- k components -----------\n"))
+      print(comp.n, na.print="")
+
+      # print BIC scores and LLIK estimates
+      cat(paste("\n----------- statistical criteria -----------\n"))
+      print(results.n)
+
+      ## print evaluation of statistical criteria
+      # lowest BIC score
+      cat(paste("\n Lowest BIC score for k =", BIC.lowest))
+
+      # first significant increase in LLIK estimates
+      if(any(LLIK.significant)!=TRUE) {
+        cat(paste("\n No significant increase in maximum log",
+                  "likelihood estimates. \n"))
+      } else {
+        cat(paste("\n First significant increase in maximum log likelihood for",
+                  "k =", which(LLIK.significant==TRUE)[1], "\n\n"))
+      }
+
+      cat(paste("\n"))
+    }#EndOf::Output for length(n.components) > 1
+  }
+
+  ##============================================================================##
+  ## RETURN VALUES
+  ##============================================================================##
+
+  # . at data$meta
+  BIC<- data.frame(n.components=k, BIC=bic)
+  llik<- data.frame(n.components=k, llik=llik)
+
+  if(length(n.components)>1) {
+    BIC.n<- data.frame(n.components=n.components, BIC=BIC.n)
+    llik.n<- data.frame(n.components=n.components, llik=LLIK.n)
+  }
+
+  # . at data$single.comp
+  single.comp<- data.frame(mu=comp0[1],sigmab=comp0[2],
+                           llik=comp0[3],BIC=comp0[4])
+
+  # . at data$components
+  comp.re<- t(rbind(round(estd,4),round(estp,4)))
+  colnames(comp.re)<- c("de","rel_de_err","de_err","proportion")
+  comp.re<- comp.re[,-2] # remove the relative error column
+
+  # . at data$grain.probability
+  grain.probability<- round(pui, 2)
+
+  summary<- data.frame(comp.re)
+  call<- sys.call()
+  args<- list(sigmab = sigmab, n.components = n.components)
+
+  # create S4 object
+  newRLumResults.calc_FiniteMixture <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      summary=summary,
+      data=data,
+      args=args,
+      call=call,
+      mle=if(length(n.components)==1){vmat}else{vmat.n},
+      BIC=if(length(n.components)==1){BIC}else{BIC.n},
+      llik=if(length(n.components)==1){llik}else{llik.n},
+      grain.probability=if(length(n.components)==1){grain.probability}else{grain.probability.n},
+      components=if(length(n.components)==1){comp.re}else{comp.n},
+      single.comp=single.comp))
+
+  ##=========##
+  ## PLOTTING
+  if(plot==TRUE) {
+    try(plot_RLum.Results(newRLumResults.calc_FiniteMixture, ...))
+  }#endif::plot
+
+  # Return values
+  invisible(newRLumResults.calc_FiniteMixture)
+
+}
diff --git a/R/calc_FuchsLang2001.R b/R/calc_FuchsLang2001.R
new file mode 100644
index 0000000..d0c6b9b
--- /dev/null
+++ b/R/calc_FuchsLang2001.R
@@ -0,0 +1,231 @@
+#' Apply the model after Fuchs & Lang (2001) to a given De distribution.
+#'
+#' This function applies the method according to Fuchs & Lang (2001) for
+#' heterogeneously bleached samples with a given coefficient of variation
+#' threshold.
+#'
+#' \bold{Used values} \cr If the coefficient of variation (c[v]) of the first
+#' two values is larger than the threshold c[v_threshold], the first value is
+#' skipped.  Use the \code{startDeValue} argument to define a start value for
+#' calculation (e.g. 2nd or 3rd value).\cr
+#'
+#' \bold{Basic steps of the approach} \cr
+#'
+#' (1) Estimate natural relative variation of the sample using a dose recovery
+#' test\cr (2) Sort the input values ascendingly\cr (3) Calculate a running
+#' mean, starting with the lowermost two values and add values iteratively.\cr
+#' (4) Stop if the calculated c[v] exceeds the specified \code{cvThreshold}\cr
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#' @param cvThreshold \link{numeric} (with default): coefficient of variation
+#' in percent, as threshold for the method, e.g. \code{cvThreshold = 3}. See
+#' details.
+#' @param startDeValue \link{numeric} (with default): number of the first
+#' aliquot that is used for the calculations
+#' @param plot \link{logical} (with default): plot output
+#' \code{TRUE}/\code{FALSE}
+#' @param \dots further arguments and graphical parameters passed to
+#' \code{\link{plot}}
+#' @return Returns a plot (optional) and terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following elements:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant model results.}
+#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+#' used arguments} \item{call}{\link{call} the function call}
+#' \item{usedDeValues}{\link{data.frame} containing the used values for the
+#' calculation}
+#' @note Please consider the requirements and the constraints of this method
+#' (see Fuchs & Lang, 2001)
+#' @section Function version: 0.4.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France) Christoph Burow, University of Cologne (Germany)
+#' @seealso \code{\link{plot}}, \code{\link{calc_MinDose}},
+#' \code{\link{calc_FiniteMixture}}, \code{\link{calc_CentralDose}},
+#' \code{\link{calc_CommonDose}}, \code{\linkS4class{RLum.Results}}
+#' @references Fuchs, M. & Lang, A., 2001. OSL dating of coarse-grain fluvial
+#' quartz using single-aliqout protocols on sediments from NE Peloponnese,
+#' Greece. In: Quaternary Science Reviews 20, 783-787.
+#'
+#' Fuchs, M. & Wagner, G.A., 2003. Recognition of insufficient bleaching by
+#' small aliquots of quartz for reconstructing soil erosion in Greece.
+#' Quaternary Science Reviews 22, 1161-1167.
+#' @keywords dplot
+#' @examples
+#'
+#'
+#' ##load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ##calculate De according to Fuchs & Lang (2001)
+#' temp<- calc_FuchsLang2001(ExampleData.DeValues$BT998, cvThreshold = 5)
+#'
+#' @export
+calc_FuchsLang2001 <- function(
+  data,
+  cvThreshold=5,
+  startDeValue=1,
+  plot=TRUE,
+  ...
+){
+
+  # Integrity Tests ---------------------------------------------------------
+
+  if(missing(data)==FALSE){
+    if(is(data, "data.frame") == FALSE & is(data,"RLum.Results") == FALSE){
+      stop("[calc_FuchsLang2001] 'data' has to be of type 'data.frame' or 'RLum.Results'!")
+    } else {
+      if(is(data, "RLum.Results") == TRUE){
+        data <- get_RLum(data, "data")
+      }
+    }
+  }
+
+  # Deal with extra arguments -----------------------------------------------
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  verbose <- if("verbose" %in% names(extraArgs)) {extraArgs$verbose} else {TRUE}
+
+
+  ##============================================================================##
+  ##PREPARE DATA
+  ##============================================================================##
+
+  ##1. order values in acending order write used D[e] values in data.frame
+  o <- order(data[1]) # o is only an order parameter
+  data_ordered <- data[o,] # sort values after o and write them into a new variable
+
+  ##2. estimate D[e]
+
+  # set variables
+  usedDeValues<-data.frame(De=NA,De_Error=NA,cv=NA)
+  endDeValue<-startDeValue
+
+  # if the frist D[e] values are not used write this information in the data.frame
+  if (startDeValue!=1) {
+
+    n <- abs(1-startDeValue)
+
+    #  write used D[e] values in data.frame
+    usedDeValues[1:n,1]<-data_ordered[1:n,1]
+    usedDeValues[1:n,2]<-data_ordered[1:n,2]
+    usedDeValues[1:n,3]<-"skipped"
+  }
+
+  ##=================================================================================================##
+  ##LOOP FOR MODEL
+  ##=================================================================================================##
+
+  # repeat loop (run at least one time)
+  repeat {
+
+    #calculate mean, sd and cv
+    mean<-round(mean(data_ordered[startDeValue:endDeValue,1]),digits=2) #calculate mean from ordered D[e] values
+    sd<-round(sd(data_ordered[startDeValue:endDeValue,1]),digits=2)		#calculate sd from ordered D[e] values
+    cv<-round(sd/mean*100, digits=2) #calculate coefficent of variation
+
+
+    # break if cv > cvThreshold
+    if (cv>cvThreshold & endDeValue>startDeValue){
+
+      # if the first two D[e] values give a cv > cvThreshold, than skip the first D[e] value
+      if (endDeValue-startDeValue<2) {
+
+        #  write used D[e] values in data.frame
+        usedDeValues[endDeValue,1]<-data_ordered[endDeValue,1]
+        usedDeValues[endDeValue,2]<-data_ordered[endDeValue,2]
+        usedDeValues[endDeValue-1,3]<-"not used"
+
+        # go to the next D[e] value
+        startDeValue<-startDeValue+1
+
+      } else {
+
+        usedDeValues[endDeValue,1]<-data_ordered[endDeValue,1]
+        usedDeValues[endDeValue,2]<-data_ordered[endDeValue,2]
+        usedDeValues[endDeValue,3]<-paste("# ",cv," %",sep="")
+
+        break #break loop
+      }
+
+    }#EndIf
+    else {
+
+      # write used D[e] values in data.frame
+      usedDeValues[endDeValue,1]<-data_ordered[endDeValue,1]
+      usedDeValues[endDeValue,2]<-data_ordered[endDeValue,2]
+
+      # first cv values alway contains NA to ensure that NA% is not printed test
+      if(is.na(cv)==TRUE) {
+        usedDeValues[endDeValue,3]<-cv
+      } else {
+        usedDeValues[endDeValue,3]<-paste(cv," %",sep="")
+      }
+    }#EndElse
+
+    # go the next D[e] value until the maximum number is reached
+    if (endDeValue<length(data_ordered[,1])) {
+      endDeValue<-endDeValue+1
+    } else {break}
+
+  }#EndRepeat
+
+  ##=================================================================================================##
+  ##ADDITIONAL CALCULATIONS and TERMINAL OUTPUT
+  ##=================================================================================================##
+
+  # additional calculate weighted mean
+  w<-1/(data_ordered[startDeValue:endDeValue,2])^2 #weights for weighted mean
+  weighted_mean <- round(weighted.mean(data_ordered[startDeValue:endDeValue,1], w), digits=2)
+  weighted_sd<-round(sqrt(1/sum(w)),digits=2)
+  n.usedDeValues<-endDeValue-startDeValue+1
+
+  # standard error
+  se<- round(sd/sqrt(endDeValue-startDeValue+1), digits=2)
+
+  if(verbose==TRUE){
+    cat("\n [calc_FuchsLang2001]")
+    cat(paste("\n\n----------- meta data --------------"))
+    cat(paste("\n cvThreshold:            ",cvThreshold,"%"))
+    cat(paste("\n used values:            ",n.usedDeValues))
+    cat(paste("\n----------- dose estimate ----------"))
+    cat(paste("\n mean:                   ",mean))
+    cat(paste("\n sd:                     ",sd))
+    cat(paste("\n weighted mean:          ",weighted_mean))
+    cat(paste("\n weighted sd:            ",weighted_sd))
+    cat(paste("\n------------------------------------\n\n"))
+  }
+
+  ##=================================================================================================#
+  ##RETURN  VALUES
+  ##=================================================================================================##
+
+  summary<- data.frame(de=mean,
+                       de_err=sd,
+                       de_weighted=weighted_mean,
+                       de_weighted_err=weighted_sd,
+                       n.usedDeValues=n.usedDeValues)
+
+  call<- sys.call()
+  args<- list(cvThreshold = cvThreshold, startDeValue = startDeValue)
+
+  newRLumResults.calc_FuchsLang2001<- set_RLum(
+    class = "RLum.Results",
+    data = list(summary = summary,
+                data = data,
+                args = args,
+                call = call,
+                usedDeValues=usedDeValues))
+
+  ##=========##
+  ## PLOTTING
+  if(plot==TRUE) {
+    try(plot_RLum.Results(newRLumResults.calc_FuchsLang2001, ...))
+  }#endif::plot
+
+  invisible(newRLumResults.calc_FuchsLang2001)
+
+}
diff --git a/R/calc_HomogeneityTest.R b/R/calc_HomogeneityTest.R
new file mode 100644
index 0000000..cd24ebf
--- /dev/null
+++ b/R/calc_HomogeneityTest.R
@@ -0,0 +1,128 @@
+#' Apply a simple homogeneity test after Galbraith (2003)
+#'
+#' A simple homogeneity test for De estimates
+#'
+#' For details see Galbraith (2003).
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#' @param log \code{\link{logical}} (with default): peform the homogeniety test
+#' with (un-)logged data
+#' @param \dots further arguments (for internal compatibility only).
+#' @return Returns a terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following element:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant model results.}
+#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+#' used arguments} \item{call}{\link{call} the function call}
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @section Function version: 0.2
+#' @author Christoph Burow, University of Cologne (Germany)
+#' @seealso \code{\link{pchisq}}
+#' @references Galbraith, R.F., 2003. A simple homogeneity test for estimates
+#' of dose obtained using OSL. Ancient TL 21, 75-77.
+#' @examples
+#'
+#' ## load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## apply the homogeneity test
+#' calc_HomogeneityTest(ExampleData.DeValues$BT998)
+#'
+#' @export
+calc_HomogeneityTest <- function(
+  data,
+  log=TRUE,
+  ...
+){
+
+  ##============================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ##============================================================================##
+
+  if(missing(data)==FALSE){
+    if(is(data, "data.frame") == FALSE & is(data, "RLum.Results") == FALSE){
+      stop("[calc_FiniteMixture] Error: 'data' object has to be of type
+           'data.frame' or 'RLum.Results'!")
+    } else {
+      if(is(data, "RLum.Results") == TRUE){
+        data <- get_RLum(data, "data")
+
+      }
+    }
+  }
+
+  ##==========================================================================##
+  ## ... ARGUMENTS
+  ##==========================================================================##
+
+  extraArgs <- list(...)
+
+  ## set plot main title
+  if("verbose" %in% names(extraArgs)) {
+    verbose<- extraArgs$verbose
+  } else {
+    verbose<- TRUE
+  }
+
+  ##============================================================================##
+  ## CALCULATIONS
+  ##============================================================================##
+
+  if(log==TRUE){
+    dat<- log(data)
+  } else {
+    dat<- data
+  }
+
+  wi<- 1/dat[2]^2
+  wizi<- wi*dat[1]
+  mu<- sum(wizi)/sum(wi)
+  gi<- wi*(dat[1]-mu)^2
+
+  G<- sum(gi)
+  df<- length(wi)-1
+  n<- length(wi)
+  P<- pchisq(G, df, lower.tail = FALSE)
+
+  ##============================================================================##
+  ## OUTPUT
+  ##============================================================================##
+
+  if(verbose == TRUE) {
+    cat("\n [calc_HomogeneityTest]")
+    cat(paste("\n\n ---------------------------------"))
+    cat(paste("\n n:                 ", n))
+    cat(paste("\n ---------------------------------"))
+    cat(paste("\n mu:                ", round(mu,4)))
+    cat(paste("\n G-value:           ", round(G,4)))
+    cat(paste("\n Degrees of freedom:", df))
+    cat(paste("\n P-value:           ", round(P,4)))
+    cat(paste("\n ---------------------------------\n\n"))
+  }
+
+  ##============================================================================##
+  ## RETURN VALUES
+  ##============================================================================##
+
+  summary<- data.frame(n=n,g.value=G,df=df,P.value=P)
+
+  call<- sys.call()
+  args<- list(log=log)
+
+  newRLumResults.calc_HomogeneityTest <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      summary=summary,
+      data=data,
+      args=args,
+      call=call
+    ))
+
+  invisible(newRLumResults.calc_HomogeneityTest)
+
+}
diff --git a/R/calc_IEU.R b/R/calc_IEU.R
new file mode 100644
index 0000000..b3655f3
--- /dev/null
+++ b/R/calc_IEU.R
@@ -0,0 +1,448 @@
+#' Apply the internal-external-uncertainty (IEU) model after Thomsen et al.
+#' (2007) to a given De distribution
+#'
+#' Function to calculate the IEU De for a De data set.
+#'
+#' This function uses the equations of Thomsen et al. (2007).  The parameters a
+#' and b are estimated from dose-recovery experiments.
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#'
+#' @param a \code{\link{numeric}}: slope
+#'
+#' @param b \code{\link{numeric}}: intercept
+#'
+#' @param interval \code{\link{numeric}}: fixed interval (e.g. 5 Gy) used for
+#' iteration of Dbar, from the mean to Lowest.De used to create Graph.IEU
+#' [Dbar.Fixed vs Z]
+#'
+#' @param decimal.point \code{\link{numeric}} (with default): number of decimal
+#' points for rounding calculations (e.g. 2)
+#'
+#' @param plot \code{\link{logical}} (with default): plot output
+#'
+#' @param \dots further arguments (\code{trace, verbose}).
+#'
+#' @return Returns a plot (optional) and terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following element:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant model results.}
+#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+#' used arguments} \item{call}{\link{call} the function call}
+#' \item{tables}{\link{list} a list of data frames containing all calculation
+#' tables}
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}.
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Rachel Smedley, Geography & Earth Sciences, Aberystwyth University
+#' (United Kingdom) \cr Based on an excel spreadsheet and accompanying macro
+#' written by Kristina Thomsen.
+#'
+#' @seealso \code{\link{plot}}, \code{\link{calc_CommonDose}},
+#' \code{\link{calc_CentralDose}}, \code{\link{calc_FiniteMixture}},
+#' \code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
+#'
+#' @references Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model.
+#' Ancient TL 33, 16-21.
+#'
+#' Thomsen, K.J., Murray, A.S., Boetter-Jensen, L. & Kinahan, J.,
+#' 2007. Determination of burial dose in incompletely bleached fluvial samples
+#' using single grains of quartz. Radiation Measurements 42, 370-379.
+#'
+#' @examples
+#'
+#' ## load data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## apply the IEU model
+#' ieu <- calc_IEU(ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1)
+#'
+#' @export
+calc_IEU <- function(
+  data,
+  a,
+  b,
+  interval,
+  decimal.point = 2,
+  plot = TRUE,
+  ...
+) {
+
+  ##==========================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ##==========================================================================##
+  if(missing(data)==FALSE){
+    if(is(data, "data.frame") == FALSE & is(data,"RLum.Results") == FALSE){
+      stop("[calc_IEU] Error: 'data' object has to be of type
+           'data.frame' or 'RLum.Results'!")
+    }else{
+      if(is(data, "RLum.Results") == TRUE){
+        data <- get_RLum(data, "data")
+      }
+    }
+  }
+
+  ##==========================================================================##
+  ## ... ARGUMENTS
+  ##==========================================================================##
+  extraArgs <- list(...)
+  ## console output
+  if ("verbose" %in% names(extraArgs)) {
+    verbose <- extraArgs$verbose
+  } else {
+    verbose <- TRUE
+  }
+  # trace calculations
+  if ("trace" %in% names(extraArgs)) {
+    trace <- extraArgs$trace
+  } else {
+    trace <- FALSE
+  }
+  # TODO: main, xlab, ylab, xlim, ylim, pch, col
+
+
+  ##============================================================================##
+  ## CALCULATIONS
+  ##============================================================================##
+  empty <- NULL
+  Table.Fixed.Iteration <- data.frame(matrix(nrow = 0, ncol = 9))
+  colnames(data) <- c("De", "De.Error")
+  data <- data[order(data$De), ]
+  Mean <- mean(data$De)
+  Dbar <- round(Mean, decimal.point)
+  Lowest.De <- round(data$De[1], decimal.point)
+
+  # (a) Calculate IEU at fixed intervals of Dbar starting from the Mean and
+  # subtracting the interval until Dbar is < Lowest.De; this creates a plot
+  N <- nrow(data)
+  Rank.number <- t(c(1:N))
+  De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2))
+  Table.Calculations <- data.frame(Rank.number = c(Rank.number),
+                                   De = c(data$De),
+                                   De.Total.Error = c(De.Total.Error))
+  Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2))
+  Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2))
+  Z <- Z.top/Z.bottom
+  Table.Calculations["Z"] <- Z
+
+  temp <- NULL
+  for (j in 1:N) {
+    for (i in j) {
+      Z <- Table.Calculations$Z[j]
+      x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2)
+      y <- (sum(x))
+      temp <- rbind(temp, data.frame(y))
+    }
+  }
+
+  EXT.top <- temp
+  EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom
+  EXT <- EXT.top/EXT.bottom
+  INT <- 1/Z.bottom
+  R <- sqrt(INT/EXT)
+  R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5)
+
+  Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De,
+                          Table.Calculations$De.Total.Error, Table.Calculations$Z,
+                          EXT.top, EXT, INT, R, R.Error)
+
+  colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z", "EXT.top",
+                           "EXT", "INT", "R", "R.Uncertainty")
+
+  Unity <- Table.IEU[R >= 1, ]
+  Max <- max(Unity$Rank.number, na.rm = TRUE)
+  Above.Z <- Table.IEU[Max, 4]
+  Above.Error <- Table.IEU[Max, 6]
+  Below.Z <- Table.IEU[Max + 1, 4]
+  Below.Error <- Table.IEU[Max + 1, 6]
+  Above.R <- Table.IEU[Max, 8]
+  Below.R <- Table.IEU[Max + 1, 8]
+  Slope <- (Above.R - Below.R)/(Above.Z - Below.Z)
+  Intercept <- Above.R - (Slope * Above.Z)
+  IEU.De <- round(((1 - Intercept)/Slope), decimal.point)
+  IEU.Error <- max(sqrt(Above.Error), sqrt(Below.Error))
+  IEU.Error <- round(IEU.Error, decimal.point)
+  n <- Max + 1
+
+  Dbar.Fixed <- Dbar - interval
+  Dbar.Mean <- c(1, Dbar, Dbar.Fixed, IEU.De, IEU.Error, n, Below.R, a, b)
+
+  repeat {
+    if (Dbar.Fixed < Lowest.De) {
+      break
+    } else {
+      Dbar <- Dbar.Fixed
+    }
+    De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2))
+    Table.Calculations <- data.frame(Rank.number = c(Rank.number),
+                                     De = c(data$De),
+                                     De.Total.Error = c(De.Total.Error))
+    Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2))
+    Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2))
+    Z <- Z.top/Z.bottom
+    Table.Calculations["Z"] <- Z
+
+    temp <- NULL
+    for (j in 1:N) {
+      for (i in j) {
+        Z <- Table.Calculations$Z[j]
+        x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2)
+        y <- (sum(x))
+        temp <- rbind(temp, data.frame(y))
+      }
+    }
+
+    EXT.top <- temp
+    EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom
+    EXT <- EXT.top/EXT.bottom
+    INT <- 1/Z.bottom
+    R <- sqrt(INT/EXT)
+    R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5)
+
+    Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De,
+                            Table.Calculations$De.Total.Error, Table.Calculations$Z,
+                            EXT.top, EXT, INT, R, R.Error)
+
+    colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z", "EXT.top",
+                             "EXT", "INT", "R", "R.Uncertainty")
+
+    Unity <- Table.IEU[R >= 1, ]
+    Max <- max(Unity$Rank.number, na.rm = TRUE)
+    Above.Z <- Table.IEU[Max, 4]
+    Above.Error <- Table.IEU[Max, 6]
+    Below.Z <- Table.IEU[Max + 1, 4]
+    Below.Error <- Table.IEU[Max + 1, 6]
+    Above.R <- Table.IEU[Max, 8]
+    Below.R <- Table.IEU[Max + 1, 8]
+    Slope <- (Above.R - Below.R)/(Above.Z - Below.Z)
+    Intercept <- Above.R - (Slope * Above.Z)
+    Zbar <- round(((1 - Intercept)/Slope), decimal.point)
+    Zbar.Error <- max(sqrt(Above.Error), sqrt(Below.Error))
+    Zbar.Error <- round(IEU.Error, decimal.point)
+    n <- Max + 1
+    Dbar.Fixed <- Dbar - interval
+    Table.Fixed.Iteration <- rbind(Table.Fixed.Iteration,
+                                   cbind(1, Dbar, Dbar.Fixed, Zbar, Zbar.Error,
+                                         n, Below.R, a, b))
+  }
+
+  Table.Fixed.Iteration <- rbind(Dbar.Mean, Table.Fixed.Iteration)
+  colnames(Table.Fixed.Iteration) <- c(FALSE, "Dbar", "Dbar.Fixed", "Zbar",
+                                       "Zbar.Error", "n", "Below.R", "a", "b")
+
+  if (plot) {
+    plot(Table.Fixed.Iteration$Dbar,
+         Table.Fixed.Iteration$Zbar,
+         type = "b",
+         ylab = "Zbar, weighted mean  (Gy)",
+         xlab = "Dbar (Gy)",
+         asp = 1/1)
+
+    arrows(Table.Fixed.Iteration$Dbar, Table.Fixed.Iteration$Zbar + Table.Fixed.Iteration$Zbar.Error,
+           Table.Fixed.Iteration$Dbar, Table.Fixed.Iteration$Zbar - Table.Fixed.Iteration$Zbar.Error,
+           col = 1, angle = 90, length = 0.05, code = 3)
+
+    abline(0, 1, untf = FALSE, lty = 3)
+  }
+
+  # (b) Calculate Dbar by iteration from [Dbar = Lowest.De] until [IEU.De = Dbar];
+  # this calculates the IEU De
+  Dbar <- Lowest.De
+  N <- nrow(data)
+  Rank.number <- t(c(1:N))
+  De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2))
+  Table.Calculations <- data.frame(Rank.number = c(Rank.number),
+                                   De = c(data$De),
+                                   De.Total.Error = c(De.Total.Error))
+  Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2))
+  Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2))
+  Z <- Z.top/Z.bottom
+  Table.Calculations["Z"] <- Z
+
+  temp <- NULL
+  for (j in 1:N) {
+    for (i in j) {
+      Z <- Table.Calculations$Z[j]
+      x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2)
+      y <- (sum(x))
+      temp <- rbind(temp, data.frame(y))
+    }
+  }
+
+  EXT.top <- temp
+  EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom
+  EXT <- EXT.top/EXT.bottom
+  INT <- 1/Z.bottom
+  R <- sqrt(INT/EXT)
+  R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5)
+
+  Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De,
+                          Table.Calculations$De.Total.Error, Table.Calculations$Z,
+                          EXT.top, EXT, INT, R, R.Error)
+
+  colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z",
+                           "EXT.top", "EXT", "INT", "R", "R.Uncertainty")
+
+  Unity <- Table.IEU[R >= 1, ]
+  Max <- max(Unity$Rank.number, na.rm = TRUE)
+  Above.Z <- Table.IEU[Max, 4]
+  Above.Error <- Table.IEU[Max, 6]
+  Below.Z <- Table.IEU[Max + 1, 4]
+  Below.Error <- Table.IEU[Max + 1, 6]
+  Above.R <- Table.IEU[Max, 8]
+  Below.R <- Table.IEU[Max + 1, 8]
+  Slope <- (Above.R - Below.R)/(Above.Z - Below.Z)
+  Intercept <- Above.R - (Slope * Above.Z)
+  IEU.De <- round(((1 - Intercept)/Slope), decimal.point)
+  IEU.Error <- max(sqrt(Above.Error), sqrt(Below.Error))
+  IEU.Error <- round(IEU.Error, decimal.point)
+  n <- Max + 1
+
+  repeat {
+    if (IEU.De <= Dbar) {
+      break
+    } else {
+      Dbar <- IEU.De
+    }
+    De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2))
+    Table.Calculations <- data.frame(Rank.number = c(Rank.number),
+                                     De = c(data$De),
+                                     De.Total.Error = c(De.Total.Error))
+    Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2))
+    Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2))
+    Z <- round((Z.top/Z.bottom), decimal.point)
+    Table.Calculations["Z"] <- Z
+
+    temp <- NULL
+    for (j in 1:N) {
+      for (i in j) {
+        Z <- Table.Calculations$Z[j]
+        x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2)
+        y <- (sum(x))
+        temp <- rbind(temp, data.frame(y))
+      }
+    }
+
+    EXT.top <- temp
+    EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom
+    EXT <- EXT.top/EXT.bottom
+    INT <- 1/Z.bottom
+    R <- sqrt(INT/EXT)
+    R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5)
+
+    Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De,
+                            Table.Calculations$De.Total.Error, Table.Calculations$Z,
+                            EXT.top, EXT, INT, R, R.Error)
+
+    colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z", "EXT.top",
+                             "EXT", "INT", "R", "R.Error")
+
+    # to reduce the number of plots and increase perfomance
+    # intermediate calculations are only plotted when trace = TRUE
+    if (plot && trace) {
+      ymin <- min(Table.IEU$R[2:nrow(Table.IEU)] - Table.IEU$R.Error[2:nrow(Table.IEU)])
+      ymax <- max(Table.IEU$R[2:nrow(Table.IEU)] + Table.IEU$R.Error[2:nrow(Table.IEU)])
+      ylim <- c(ifelse(ymin > 0, 0, ymin), ymax)
+
+      plot(Table.IEU$Z, Table.IEU$R,
+           type = "b",
+           ylab = expression(paste("R = [", alpha["in"], "/", alpha["ex"],"]")),
+           xlab = "Z [Gy]",
+           ylim = ylim)
+
+      arrows(Table.IEU$Z, Table.IEU$R + Table.IEU$R.Error,
+             Table.IEU$Z, Table.IEU$R - Table.IEU$R.Error,
+             col = 1, angle = 90,
+             length = 0.05, code = 3)
+
+      abline(1, 0, untf = FALSE, lty = 3)
+    }
+
+    Unity <- Table.IEU[R >= 1, ]
+    Max <- max(Unity$Rank.number, na.rm = TRUE)
+    Above.Z <- Table.IEU[Max, 4]
+    Above.Error <- Table.IEU[Max, 6]
+    Below.Z <- Table.IEU[Max + 1, 4]
+    Below.Error <- Table.IEU[Max + 1, 6]
+    Above.R <- Table.IEU[Max, 8]
+    Below.R <- Table.IEU[Max + 1, 8]
+    Slope <- (Above.R - Below.R)/(Above.Z - Below.Z)
+    Intercept <- Above.R - (Slope * Above.Z)
+    IEU.De <- round(((1 - Intercept)/Slope), decimal.point)
+    IEU.Error <- max(sqrt(Above.Error), sqrt(Below.Error))
+    IEU.Error <- round(IEU.Error, decimal.point)
+    n <- Max + 1
+
+    if (trace) {
+      message(sprintf("[Iteration of Dbar] \n Dbar: %.4f \n IEU.De: %.4f \n IEU.Error: %.4f \n n: %i \n R: %.4f \n",
+                      Dbar, IEU.De, IEU.Error, n, Below.R))
+    }
+
+  }
+
+  # final plot
+  if (plot) {
+    ymin <- min(Table.IEU$R[2:nrow(Table.IEU)] - Table.IEU$R.Error[2:nrow(Table.IEU)])
+    ymax <- max(Table.IEU$R[2:nrow(Table.IEU)] + Table.IEU$R.Error[2:nrow(Table.IEU)])
+    ylim <- c(ifelse(ymin > 0, 0, ymin), ymax)
+
+    plot(Table.IEU$Z, Table.IEU$R,
+         type = "b",
+         ylab = expression(paste("R = [", alpha["in"], "/", alpha["ex"],"]")),
+         xlab = "Z [Gy]",
+         ylim = ylim)
+
+    arrows(Table.IEU$Z, Table.IEU$R + Table.IEU$R.Error,
+           Table.IEU$Z, Table.IEU$R - Table.IEU$R.Error,
+           col = 1, angle = 90,
+           length = 0.05, code = 3)
+
+    abline(1, 0, untf = FALSE, lty = 3)
+  }
+
+
+  Table.Results <- data.frame(Dbar, IEU.De, IEU.Error, n, a, b)
+  colnames(Table.Results) <- c("Dbar", "IEU.De (Gy)", "IEU.Error (Gy)",
+                               "Number of De", "a", "b")
+
+  ##==========================================================================##
+  ## TERMINAL OUTPUT
+  ##==========================================================================##
+  if (verbose) {
+    message(sprintf(
+      "\n [calc_IEU] \n\n Dbar: %.2f \n IEU.De (Gy): %.2f \n IEU.Error (Gy): %.2f Number of De: %.0f \n a: %.4f \n b: %.4f",
+      Table.Results[1], Table.Results[2], Table.Results[3],
+      Table.Results[4], Table.Results[5], Table.Results[6]))
+  }
+
+  ##==========================================================================##
+  ## RETURN VALUES
+  ##==========================================================================##
+  summary <- Table.Results[ ,c(-1, -5, -6)]
+  colnames(summary) <- c("de", "de_err", "n")
+
+  call <- sys.call()
+  args <- list(a = a, b = b, interval = interval,
+               decimal.point = decimal.point, plot = plot)
+
+  newRLumResults.calc_IEU <- set_RLum(
+    class = "RLum.Results",
+    data = list(summary = summary,
+                data = data,
+                args = args,
+                call = call,
+                tables = list(
+                  Table.IEUCalculations = Table.IEU,
+                  Table.Fixed.Iteration = Table.Fixed.Iteration,
+                  Table.IEUResults = Table.Results
+                )))
+
+  invisible(newRLumResults.calc_IEU)
+
+}
diff --git a/R/calc_MaxDose.R b/R/calc_MaxDose.R
new file mode 100644
index 0000000..9afee5d
--- /dev/null
+++ b/R/calc_MaxDose.R
@@ -0,0 +1,99 @@
+#' Apply the maximum age model to a given De distribution
+#'
+#' Function to fit the maximum age model to De data. This is a wrapper function
+#' that calls calc_MinDose() and applies a similiar approach as described in
+#' Olley et al. (2006).
+#'
+#' \bold{Data transformation} \cr\cr To estimate the maximum dose population
+#' and its standard error, the three parameter minimum age model of Galbraith
+#' et al. (1999) is adapted. The measured De values are transformed as follows:
+#' \cr\cr 1. convert De values to natural logs \cr 2. multiply the logged data
+#' to creat a mirror image of the De distribution\cr 3. shift De values along
+#' x-axis by the smallest x-value found to obtain only positive values \cr 4.
+#' combine in quadrature the measurement error associated with each De value
+#' with a relative error specified by sigmab \cr 5. apply the MAM to these data
+#' \cr\cr When all calculations are done the results are then converted as
+#' follows\cr\cr 1. subtract the x-offset \cr 2. multiply the natural logs by
+#' -1 \cr 3. take the exponent to obtain the maximum dose estimate in Gy \cr\cr
+#' \bold{Further documentation} \cr\cr Please see \code{\link{calc_MinDose}}.
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#' @param sigmab \code{\link{numeric}} (\bold{required}): spread in De values
+#' given as a fraction (e.g. 0.2). This value represents the expected
+#' overdispersion in the data should the sample be well-bleached (Cunningham &
+#' Walling 2012, p. 100).
+#' @param log \code{\link{logical}} (with default): fit the (un-)logged three
+#' parameter minimum dose model to De data
+#' @param par \code{\link{numeric}} (with default): apply the 3- or
+#' 4-parametric minimum age model (\code{par=3} or \code{par=4}).
+#' @param bootstrap \code{\link{logical}} (with default): apply the recycled
+#' bootstrap approach of Cunningham & Wallinga (2012).
+#' @param init.values \code{\link{numeric}} (with default): starting values for
+#' gamma, sigma, p0 and mu. Custom values need to be provided in a vector of
+#' length three in the form of \code{c(gamma, sigma, p0)}.
+#' @param plot \code{\link{logical}} (with default): plot output
+#' (\code{TRUE}/\code{FALSE})
+#' @param \dots further arguments for bootstrapping (\code{bs.M, bs.N, bs.h,
+#' sigmab.sd}).  See details for their usage.
+#' @return Please see \code{\link{calc_MinDose}}.
+#' @section Function version: 0.3
+#' @author Christoph Burow, University of Cologne (Germany) \cr Based on a
+#' rewritten S script of Rex Galbraith, 2010 \cr
+#' @seealso \code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
+#' \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
+#' \code{\link{calc_MinDose}}
+#' @references Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
+#' 2009. A revised burial dose estimation procedure for optical dating of young
+#' and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+#' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
+#' track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+#' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
+#' 1999. Optical dating of single grains of quartz from Jinmium rock shelter,
+#' northern Australia. Part I: experimental design and statistical models.
+#' Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
+#' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
+#' R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+#' calculation and display in OSL dating: An overview and some recommendations.
+#' Quaternary Geochronology 11, 1-27. \cr\cr Olley, J.M., Roberts, R.G.,
+#' Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
+#' associated with human burials at Lake Mungo, Australia. Quaternary Science
+#' Reviews 25, 2469-2474.\cr\cr \bold{Further reading} \cr\cr Arnold, L.J. &
+#' Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+#' (De) distributions: Implications for OSL dating of sediment mixtures.
+#' Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold, L.J.,
+#' 2006. Statistical modelling of single grain quartz De distributions and an
+#' assessment of procedures for estimating burial dose. Quaternary Science
+#' Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
+#' Realizing the potential of fluvial archives using robust OSL chronologies.
+#' Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
+#' Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+#' of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
+#' \cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
+#' obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
+#' @examples
+#'
+#' ## load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' # apply the maximum dose model
+#' calc_MaxDose(ExampleData.DeValues$CA1, sigmab = 0.2, par = 3)
+#'
+#' @export
+calc_MaxDose<- function(
+  data,
+  sigmab,
+  log=TRUE,
+  par=3,
+  bootstrap=FALSE,
+  init.values,
+  plot=TRUE,
+  ...
+){
+  res<- calc_MinDose(data, sigmab, log, par, bootstrap, init.values, plot=FALSE, invert=TRUE, ...)
+  res at originator<- "calc_MaxDose"
+  if (plot) try(plot_RLum.Results(res, ...))
+
+  invisible(res)
+}
diff --git a/R/calc_MinDose.R b/R/calc_MinDose.R
new file mode 100644
index 0000000..c494926
--- /dev/null
+++ b/R/calc_MinDose.R
@@ -0,0 +1,852 @@
+#' Apply the (un-)logged minimum age model (MAM) after Galbraith et al. (1999)
+#' to a given De distribution
+#'
+#' Function to fit the (un-)logged three or four parameter minimum dose model
+#' (MAM-3/4) to De data.
+#'
+#' \bold{Parameters} \cr\cr This model has four parameters: \cr\cr
+#' \tabular{rl}{ \code{gamma}: \tab minimum dose on the log scale \cr
+#' \code{mu}: \tab mean of the non-truncated normal distribution \cr
+#' \code{sigma}: \tab spread in ages above the minimum \cr \code{p0}: \tab
+#' proportion of grains at gamma \cr } If \code{par=3} (default) the
+#' 3-parametric minimum age model is applied, where \code{gamma=mu}. For
+#' \code{par=4} the 4-parametric model is applied instead.\cr\cr
+#' \bold{(Un-)logged model} \cr\cr In the original version of the
+#' three-parameter minimum dose model, the basic data are the natural
+#' logarithms of the De estimates and relative standard errors of the De
+#' estimates. This model will be applied if \code{log=TRUE}. \cr\cr If
+#' \code{log=FALSE}, the modified un-logged model will be applied instead. This
+#' has essentially the same form as the original version.  \code{gamma} and
+#' \code{sigma} are in Gy and \code{gamma} becomes the minimum true dose in the
+#' population. \cr\cr While the original (logged) version of the mimimum dose
+#' model may be appropriate for most samples (i.e. De distributions), the
+#' modified (un-logged) version is specially designed for modern-age and young
+#' samples containing negative, zero or near-zero De estimates (Arnold et al.
+#' 2009, p. 323). \cr\cr \bold{Initial values & boundaries} \cr\cr The log
+#' likelihood calculations use the \link{nlminb} function for box-constrained
+#' optimisation using PORT routines.  Accordingly, initial values for the four
+#' parameters can be specified via \code{init.values}. If no values are
+#' provided for \code{init.values} reasonable starting values are estimated
+#' from the input data.  If the final estimates of \emph{gamma}, \emph{mu},
+#' \emph{sigma} and \emph{p0} are totally off target, consider providing custom
+#' starting values via \code{init.values}. \cr In contrast to previous versions
+#' of this function the boundaries for the individual model parameters are no
+#' longer required to be explicitly specified. If you want to override the default
+#' boundary values use the arguments \code{gamma.lower}, \code{gamma.upper},
+#' \code{sigma.lower}, \code{sigma.upper}, \code{p0.lower}, \code{p0.upper},
+#' \code{mu.lower} and \code{mu.upper}.  \cr\cr
+#' \bold{Bootstrap} \cr\cr When
+#' \code{bootstrap=TRUE} the function applies the bootstrapping method as
+#' described in Wallinga & Cunningham (2012). By default, the minimum age model
+#' produces 1000 first level and 3000 second level bootstrap replicates
+#' (actually, the number of second level bootstrap replicates is three times
+#' the number of first level replicates unless specified otherwise).  The
+#' uncertainty on sigmab is 0.04 by default. These values can be changed by
+#' using the arguments \code{bs.M} (first level replicates), \code{bs.N}
+#' (second level replicates) and \code{sigmab.sd} (error on sigmab). With
+#' \code{bs.h} the bandwidth of the kernel density estimate can be specified.
+#' By default, \code{h} is calculated as \cr \deqn{h =
+#' (2*\sigma_{DE})/\sqrt{n}} \cr \bold{Multicore support} \cr\cr This function
+#' supports parallel computing and can be activated by \code{multicore=TRUE}.
+#' By default, the number of available logical CPU cores is determined
+#' automatically, but can be changed with \code{cores}. The multicore support
+#' is only available when \code{bootstrap=TRUE} and spawns \code{n} R instances
+#' for each core to get MAM estimates for each of the N and M boostrap
+#' replicates. Note that this option is highly experimental and may or may not
+#' work for your machine. Also the performance gain increases for larger number
+#' of bootstrap replicates. Also note that with each additional core and hence
+#' R instance and depending on the number of bootstrap replicates the memory
+#' usage can significantly increase. Make sure that memory is always availabe,
+#' otherwise there will be a massive perfomance hit.
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De \code{(data[
+#' ,1])} and De error \code{(values[ ,2])}
+#' @param sigmab \code{\link{numeric}} (\bold{required}): spread in De values
+#' given as a fraction (e.g. 0.2). This value represents the expected
+#' overdispersion in the data should the sample be well-bleached (Cunningham &
+#' Walling 2012, p. 100).
+#' @param log \code{\link{logical}} (with default): fit the (un-)logged minimum
+#' dose model to De data
+#' @param par \code{\link{numeric}} (with default): apply the 3- or
+#' 4-parametric minimum age model (\code{par=3} or \code{par=4}). The MAM-3 is
+#' used by default.
+#' @param bootstrap \code{\link{logical}} (with default): apply the recycled
+#' bootstrap approach of Cunningham & Wallinga (2012).
+#' @param init.values \code{\link{numeric}} (optional): a named list with
+#' starting values for gamma, sigma, p0 and mu (e.g. \code{list(gamma=100
+#' sigma=1.5, p0=0.1, mu=100)}). If no values are provided reasonable values
+#' are tried to be estimated from the data.
+#' @param level \code{\link{logical}} (with default): the confidence level
+#' required (defaults to 0.95).
+#' @param plot \code{\link{logical}} (with default): plot output
+#' (\code{TRUE}/\code{FALSE})
+#' @param multicore \code{\link{logical}} (with default): enable parallel
+#' computation of the bootstrap by creating a multicore SNOW cluster. Depending
+#' on the number of available logical CPU cores this will drastically reduce
+#' the computation time. Note that this option is highly experimental and not
+#' work for all machines. (\code{TRUE}/\code{FALSE})
+#' @param \dots (optional) further arguments for bootstrapping (\code{bs.M,
+#' bs.N, bs.h, sigmab.sd}).  See details for their usage. Further arguments are
+#' \code{verbose} to de-/activate console output (logical), \code{debug} for
+#' extended console output (logical) and \code{cores} (integer) to manually
+#' specify the number of cores to be used when \code{multicore=TRUE}.
+#' @return Returns a plot (optional) and terminal output. In addition an
+#' \code{\linkS4class{RLum.Results}} object is returned containing the
+#' following elements:
+#'
+#' \item{summary}{\link{data.frame} summary of all relevant model results.}
+#' \item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+#' used arguments} \item{call}{\link{call} the function call}
+#' \item{mle}{\link{mle2} object containing the maximum log likelhood functions
+#' for all parameters} \item{BIC}{\link{numeric} BIC score}
+#' \item{confint}{\link{data.frame} confidence intervals for all parameters}
+#' \item{profile}{\link{profile.mle2} the log likelihood profiles}
+#' \item{bootstrap}{\link{list} bootstrap results}
+#'
+#' The output should be accessed using the function
+#' \code{\link{get_RLum}}
+#' @note The default starting values for \emph{gamma}, \emph{mu}, \emph{sigma}
+#' and \emph{p0} may only be appropriate for some De data sets and may need to
+#' be changed for other data. This is especially true when the un-logged
+#' version is applied. \cr Also note that all R warning messages are suppressed
+#' when running this function. If the results seem odd consider re-running the
+#' model with \code{debug=TRUE} which provides extended console output and
+#' forwards all internal warning messages.
+#' @section Function version: 0.4.3
+#' @author Christoph Burow, University of Cologne (Germany) \cr Based on a
+#' rewritten S script of Rex Galbraith, 2010 \cr The bootstrap approach is
+#' based on a rewritten MATLAB script of Alastair Cunningham. \cr Alastair
+#' Cunningham is thanked for his help in implementing and cross-checking the
+#' code.
+#' @seealso \code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
+#' \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
+#' \code{\link{calc_MaxDose}}
+#' @references Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
+#' 2009. A revised burial dose estimation procedure for optical dating of young
+#' and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+#' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
+#' track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+#' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
+#' 1999. Optical dating of single grains of quartz from Jinmium rock shelter,
+#' northern Australia. Part I: experimental design and statistical models.
+#' Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
+#' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
+#' R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+#' calculation and display in OSL dating: An overview and some recommendations.
+#' Quaternary Geochronology 11, 1-27. \cr\cr \bold{Further reading} \cr\cr
+#' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain
+#' equivalent dose (De) distributions: Implications for OSL dating of sediment
+#' mixtures. Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold,
+#' L.J., 2006. Statistical modelling of single grain quartz De distributions
+#' and an assessment of procedures for estimating burial dose. Quaternary
+#' Science Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
+#' Realizing the potential of fluvial archives using robust OSL chronologies.
+#' Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
+#' Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+#' of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
+#' \cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
+#' obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
+#' @examples
+#'
+#'
+#' ## Load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' # (1) Apply the minimum age model with minimum required parameters.
+#' # By default, this will apply the un-logged 3-parametric MAM.
+#' calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1)
+#'
+#' # (2) Re-run the model, but save results to a variable and turn
+#' # plotting of the log-likelihood profiles off.
+#' mam <- calc_MinDose(data = ExampleData.DeValues$CA1,
+#'                     sigmab = 0.1,
+#'                     plot = FALSE)
+#'
+#' # Show structure of the RLum.Results object
+#' mam
+#'
+#' # Show summary table that contains the most relevant results
+#' res <- get_RLum(mam, "summary")
+#' res
+#'
+#' # Plot the log likelihood profiles retroactively, because before
+#' # we set plot = FALSE
+#' plot_RLum(mam)
+#'
+#' # Plot the dose distribution in an abanico plot and draw a line
+#' # at the minimum dose estimate
+#' plot_AbanicoPlot(data = ExampleData.DeValues$CA1,
+#'                  main = "3-parameter Minimum Age Model",
+#'                  line = mam,polygon.col = "none",
+#'                  hist = TRUE,
+#'                  rug = TRUE,
+#'                  summary = c("n", "mean", "mean.weighted", "median", "in.ci"),
+#'                  centrality = res$de,
+#'                  line.col = "red",
+#'                  grid.col = "none",
+#'                  line.label = paste0(round(res$de, 1), "\U00B1",
+#'                                      round(res$de_err, 1), " Gy"),
+#'                  bw = 0.1,
+#'                  ylim = c(-25, 18),
+#'                  summary.pos = "topleft",
+#'                  mtext = bquote("Parameters: " ~
+#'                                   sigma[b] == .(get_RLum(mam, "args")$sigmab) ~ ", " ~
+#'                                   gamma == .(round(log(res$de), 1)) ~ ", " ~
+#'                                   sigma == .(round(res$sig, 1)) ~ ", " ~
+#'                                   rho == .(round(res$p0, 2))))
+#'
+#'
+#' \dontrun{
+#' # (3) Run the minimum age model with bootstrap
+#' # NOTE: Bootstrapping is computationally intensive
+#' # (3.1) run the minimum age model with default values for bootstrapping
+#' calc_MinDose(data = ExampleData.DeValues$CA1,
+#'              sigmab = 0.15,
+#'              bootstrap = TRUE)
+#'
+#' # (3.2) Bootstrap control parameters
+#' mam <- calc_MinDose(data = ExampleData.DeValues$CA1,
+#'                     sigmab = 0.15,
+#'                     bootstrap = TRUE,
+#'                     bs.M = 300,
+#'                     bs.N = 500,
+#'                     bs.h = 4,
+#'                     sigmab.sd = 0.06,
+#'                     plot = FALSE)
+#'
+#' # Plot the results
+#' plot_RLum(mam)
+#'
+#' # save bootstrap results in a separate variable
+#' bs <- get_RLum(mam, "bootstrap")
+#'
+#' # show structure of the bootstrap results
+#' str(bs, max.level = 2, give.attr = FALSE)
+#'
+#' # print summary of minimum dose and likelihood pairs
+#' summary(bs$pairs$gamma)
+#'
+#' # Show polynomial fits of the bootstrap pairs
+#' bs$poly.fits$poly.three
+#'
+#' # Plot various statistics of the fit using the generic plot() function
+#' par(mfcol=c(2,2))
+#' plot(bs$poly.fits$poly.three, ask = FALSE)
+#'
+#' # Show the fitted values of the polynomials
+#' summary(bs$poly.fits$poly.three$fitted.values)
+#' }
+#'
+#' @export
+calc_MinDose <- function(
+  data,
+  sigmab,
+  log = TRUE,
+  par = 3,
+  bootstrap = FALSE,
+  init.values,
+  level = 0.95,
+  plot = TRUE,
+  multicore = FALSE,
+  ...
+){
+
+  ## ============================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ## ============================================================================##
+  if (!missing(data)) {
+    if (!is(data, "data.frame") & !is(data, "RLum.Results")) {
+      stop("[calc_MinDose] Error: 'data' object has to be of type\n
+           'data.frame' or 'RLum.Results'!")
+    } else {
+      if (is(data, "RLum.Results")) {
+        data <- get_RLum(data, "data")
+      }
+    }
+  }
+
+
+  ##============================================================================##
+  ## ... ARGUMENTS
+  ##============================================================================##
+
+  extraArgs <- list(...)
+
+  ## check if this function is called by calc_MaxDose()
+  if ("invert" %in% names(extraArgs)) {
+    invert <- extraArgs$invert
+    if (!log) {
+      log <- TRUE # overwrite user choice as max dose model currently only supports the logged version
+      cat(paste("\n[WARNING] The maximum dose model only supports the logged version.",
+                "'log' was automatically changed to TRUE.\n\n"))
+    }
+  } else {
+    invert <- FALSE
+  }
+
+  ## console output
+  if ("verbose" %in% names(extraArgs)) {
+    verbose <- extraArgs$verbose
+  } else {
+    verbose <- TRUE
+  }
+
+  ## bootstrap replications
+  # first level bootstrap
+  if ("bs.M" %in% names(extraArgs)) {
+    M <- as.integer(extraArgs$bs.M)
+  } else {
+    M <- 1000
+  }
+
+  # second level bootstrap
+  if ("bs.N" %in% names(extraArgs)) {
+    N <- as.integer(extraArgs$bs.N)
+  } else {
+    N <- 3*M
+  }
+
+  # KDE bandwith
+  if ("bs.h" %in% names(extraArgs)) {
+    h <- extraArgs$bs.h
+  } else {
+    h <- (sd(data[ ,1])/sqrt(length(data[ ,1])))*2
+  }
+
+  # standard deviation of sigmab
+  if ("sigmab.sd" %in% names(extraArgs)) {
+    sigmab.sd <- extraArgs$sigmab.sd
+  } else {
+    sigmab.sd <- 0.04
+  }
+
+  if ("debug" %in% names(extraArgs)) {
+    debug <- extraArgs$debug
+  } else {
+    debug <- FALSE
+  }
+
+  if ("cores" %in% names(extraArgs)) {
+    cores <- extraArgs$cores
+  } else {
+    cores <- parallel::detectCores()
+    if (multicore)
+      message(paste("Logical CPU cores detected:", cores))
+  }
+
+  ## WARNINGS ----
+  if (!debug)
+    options(warn = -1)
+
+  ##============================================================================##
+  ## START VALUES
+  ##============================================================================##
+
+  if (missing(init.values)) {
+    start <- list(gamma = ifelse(log, log(quantile(data[ ,1], probs = 0.25)),
+                                 quantile(data[ ,1], probs = 0.25)),
+                  sigma = 1.2,
+                  p0 = 0.01,
+                  mu = ifelse(log, log(quantile(data[ ,1], probs = 0.25)),
+                              mean(data[ ,1])))
+  } else {
+    start <- list(gamma = init.values$gamma,
+                  sigma = init.values$sigma,
+                  p0 = init.values$p0,
+                  mu = init.values$mu)
+  }
+
+  ##============================================================================##
+  ## ESTIMATE BOUNDARY PARAMETERS
+  ##============================================================================##
+
+  boundaries <- list(
+    # gamma.lower = min(data[ ,1]/10),
+    # gamma.upper = max(data[ ,1]*1.1),
+    # sigma.lower = 0,
+    # sigma.upper = 5,
+    # mu.lower = min(data[ ,1])/10,
+    # mu.upper = max(data[ ,1]*1.1)
+    gamma.lower = -Inf,
+    gamma.upper = Inf,
+
+    sigma.lower = 0,
+    sigma.upper = Inf,
+
+    p0.lower = 0,
+    p0.upper = 1,
+
+    mu.lower = -Inf,
+    mu.upper = Inf
+  )
+
+  boundaries <- modifyList(boundaries, list(...))
+
+  # combine lower and upper boundary values to vectors
+  if (log) {
+    xlb <- c(log(boundaries$gamma.lower), boundaries$sigma.lower, boundaries$p0.lower)
+    xub <- c(log(boundaries$gamma.upper), boundaries$sigma.upper, boundaries$p0.lower)
+  } else {
+    xlb <- c(boundaries$gamma.lower, boundaries$sigma.lower, boundaries$p0.lower)
+    xub <- c(boundaries$gamma.upper, exp(boundaries$sigma.upper), boundaries$p0.lower)
+  }
+  if (par==4) {
+    xlb <- c(xlb, ifelse(log, log(boundaries$mu.lower), boundaries$mu.lower))
+    xub <- c(xub, ifelse(log, log(boundaries$mu.upper), boundaries$mu.upper))
+  }
+
+  ##============================================================================##
+  ## AUXILLARY FUNCTIONS
+  ##============================================================================##
+
+  # THIS FUNCTION CALCULATES THE NEGATIVE LOG LIKELIHOOD OF THE DATA
+  Neglik_f <- function(gamma, sigma, p0, mu, data) {
+    # this calculates the negative of the log likelihood of the
+    # data (data) for a given set of parameters (gamma, sigma, p0)
+    # data is a 2x2 matrix of data: De, rel_error (including sigma_b)
+
+    # recover the data
+    zi <- data[ ,1]
+    si <- data[ ,2]
+    n <- length(zi)
+
+    # in the MAM-3 gamma and mu are assumed to be equal
+    if (par == 3)
+      mu <- gamma
+
+    # calculate sigma^2 + seld^2, mu0 and sigma0
+    s2 <- sigma^2 + si^2
+    sigma0 <- 1/sqrt(1/sigma^2 + 1/si^2)
+    mu0 <- (mu/sigma^2 + zi/si^2)/(1/sigma^2 + 1/si^2)
+
+    # calculate the log-likelihood
+    logsqrt2pi <- 0.5*log(2*pi)
+    res0 <- (gamma - mu0)/sigma0
+    res1 <- (gamma - mu)/sigma
+    lf1i <- log(p0) - log(si) - 0.5*((zi-gamma)/si)^2   - logsqrt2pi
+    lf2i <- log(1-p0) - 0.5*log(s2) - 0.5*(zi-mu)^2/s2  - logsqrt2pi
+    lf2i <- lf2i + log(1-pnorm(res0)) - log(1-pnorm(res1))
+    llik <- log( exp(lf1i) + exp(lf2i) )
+    negll <- -sum(llik)
+
+    return(negll)
+  }
+
+  # THIS MAXIMIZES THE Neglik_f LIKELIHOOD FUNCTION AND RETURNS AN MLE OBJECT
+  Get_mle <- function(data) {
+    # TODO: PROPER ERROR HANDLING
+    tryCatch({
+      mle <- bbmle::mle2(data = list(data = data),
+                         optimizer = "nlminb",
+                         lower=c(gamma = boundaries$gamma.lower,
+                                 sigma = boundaries$sigma.lower,
+                                 p0 = boundaries$p0.lower,
+                                 mu = boundaries$mu.lower),
+                         upper=c(gamma = boundaries$gamma.upper,
+                                 sigma = boundaries$sigma.upper,
+                                 p0 = boundaries$p0.upper,
+                                 mu = boundaries$mu.upper),
+                         minuslogl = Neglik_f,
+                         control = list(iter.max = 1000L),
+                         start = start)
+    }, error = function(e) {
+      stop(paste("Sorry, seems like I encountered an error...:", e), call. = FALSE)
+    })
+    return(mle)
+  }
+
+  ##============================================================================##
+  ## MAIN PROGRAM
+  ##============================================================================##
+
+  # combine errors
+  if (log) {
+    if (invert) {
+      lcd <- log(data[ ,1])*-1
+      x.offset <- abs(min(lcd))
+      lcd <- lcd+x.offset
+    } else {
+      lcd <- log(data[ ,1])
+    }
+    lse <- sqrt((data[ ,2]/data[ ,1])^2 + sigmab^2)
+  } else {
+    lcd <- data[ ,1]
+    lse <- sqrt(data[ ,2]^2 + sigmab^2)
+  }
+
+  # create new data frame with DE and combined relative error
+  dat <- cbind(lcd, lse)
+
+  # get the maximum likelihood estimate
+  ests <- Get_mle(dat)
+
+  # check if any standard errors are NA or NaN
+  coef_err <- t(as.data.frame(bbmle::summary(ests)@coef[ ,2]))
+
+  if (debug)
+    print(bbmle::summary(ests))
+
+  if (any(is.nan(coef_err)))
+    coef_err[which(is.nan(coef_err))] <- t(as.data.frame(ests at coef))/100
+  if (any(is.na(coef_err)))
+    coef_err[which(is.na(coef_err))] <- t(as.data.frame(ests at coef))/100
+
+  if (par == 3)
+    which <- c("gamma", "sigma", "p0")
+  if (par == 4)
+    which <- c("gamma", "sigma", "p0", "mu")
+
+  # calculate profile log likelihoods
+  prof <- bbmle::profile(ests,
+                         which = which,
+                         std.err = as.vector(coef_err),
+                         #try_harder = TRUE,
+                         quietly = TRUE,
+                         tol.newmin = Inf,
+                         skiperrs = TRUE,
+                         prof.lower=c(gamma = -Inf,
+                                      sigma = 0,
+                                      p0 = 0,
+                                      mu = -Inf),
+                         prof.upper=c(gamma = Inf,
+                                      sigma = Inf,
+                                      p0 = 1,
+                                      mu = Inf)
+  )
+  # Fallback when profile() returns a 'better' fit
+  maxsteps <- 100
+  cnt <- 1
+  while (!inherits(prof, "profile.mle2")) {
+    message(paste0("## Trying to find a better fit (", cnt, "/10) ##"))
+    if (maxsteps == 0L)
+      stop(paste("Sorry, but I can't find a converging fit for the profile log-likelihood."),
+           call. = FALSE)
+
+    prof <- bbmle::profile(ests,
+                           which = which,
+                           std.err = as.vector(coef_err),
+                           try_harder = TRUE,
+                           quietly = TRUE,
+                           maxsteps = maxsteps,
+                           tol.newmin = Inf,
+                           skiperrs = TRUE,
+                           prof.lower=c(gamma = -Inf,
+                                        sigma = 0,
+                                        p0 = 0,
+                                        mu = -Inf),
+                           prof.upper=c(gamma = Inf,
+                                        sigma = Inf,
+                                        p0 = 1,
+                                        mu = Inf)
+    )
+    maxsteps <- maxsteps - 10
+    cnt <- cnt + 1
+  }
+
+  ## TODO: reduce the redundant code
+  ## DELETE rows where z = -Inf/Inf
+  prof at profile$gamma <-  prof at profile$gamma[which(prof at profile$gamma["z"] != Inf), ]
+  prof at profile$gamma <-  prof at profile$gamma[which(prof at profile$gamma["z"] != -Inf), ]
+  prof at profile$sigma <-  prof at profile$sigma[which(prof at profile$sigma["z"] != Inf), ]
+  prof at profile$sigma <-  prof at profile$sigma[which(prof at profile$sigma["z"] != -Inf), ]
+  prof at profile$p0 <-  prof at profile$p0[which(prof at profile$p0["z"] != Inf), ]
+  prof at profile$p0 <-  prof at profile$p0[which(prof at profile$p0["z"] != -Inf), ]
+
+  if (par == 4) {
+    prof at profile$mu <-  prof at profile$mu[which(prof at profile$mu["z"] != Inf), ]
+    prof at profile$mu <-  prof at profile$mu[which(prof at profile$mu["z"] != -Inf), ]
+  }
+
+  # calculate Bayesian Information Criterion (BIC)
+  BIC <- BIC(ests)
+
+  # retrieve results from mle2-object
+  pal <- if (log) {
+    if (invert) {
+      exp((bbmle::coef(ests)[["gamma"]]-x.offset)*-1)
+    } else {
+      exp(bbmle::coef(ests)[["gamma"]])
+    }
+  } else {
+    bbmle::coef(ests)[["gamma"]]
+  }
+  sig <- bbmle::coef(ests)[["sigma"]]
+  p0end <- bbmle::coef(ests)[["p0"]]
+
+  if (par == 4) {
+    muend <- ifelse(log, exp(bbmle::coef(ests)[["mu"]]), bbmle::coef(ests)[["mu"]])
+  } else {
+    muend <- NA
+  }
+
+  ##============================================================================##
+  ## ERROR CALCULATION
+
+  #### METHOD 1: follow the instructions of Galbraith & Roberts (2012) ####
+  # "If the likelihood profile is symmetrical about the parameter, an approximate standard error
+  #  can be calculated by dividing the length of this interval by 3.92"
+  conf <- as.data.frame(bbmle::confint(prof, tol.newmin = Inf, quietly = TRUE, level = level))
+  class(conf[,1]) <- class(conf[,2]) <- "numeric"
+
+  if (invert) {
+    conf[1, ] <- (conf[1, ]-x.offset)*-1
+    t <- conf[1,1]
+    conf[1,1] <- conf[1,2]
+    conf[1,2] <- t
+  }
+  gamma_err <- if (log) {
+    (exp(conf["gamma",2])-exp(conf["gamma",1]))/3.92
+  } else {
+    (conf["gamma",2]-conf["gamma",1])/3.92
+  }
+
+  ##============================================================================##
+  ## AGGREGATE RESULTS
+  summary <- data.frame(de=pal,
+                        de_err=gamma_err,
+                        ci_level = level,
+                        "ci_lower"=ifelse(log, exp(conf["gamma",1]), conf["gamma",1]),
+                        "ci_upper"=ifelse(log, exp(conf["gamma",2]), conf["gamma",2]),
+                        par=par,
+                        sig=sig,
+                        p0=p0end,
+                        mu=muend,
+                        Lmax=-ests at min,
+                        BIC=BIC)
+  call <- sys.call()
+  args <- list(log=log, sigmab=sigmab, bootstrap=bootstrap,
+               init.values=start,
+               bs.M=M, bs.N=N, bs.h=h, sigmab.sd=sigmab.sd)
+
+  ##============================================================================##
+  ## BOOTSTRAP
+  ##============================================================================##
+  if (bootstrap) {
+
+    ## BOOTSTRAP FUNCTIONS ----
+    # Function that draws N+M sets of integer values from 1:n and returns
+    # both the indices and frequencies
+    draw_Freq <- function() {
+      f <- R <- matrix(0L, N+M, n)
+      for (i in seq_len(N+M)) {
+        R[i, ] <- sample(x = n, size = n, replace = TRUE)
+        f[i, ] <- tabulate(R, n)
+      }
+      return(list(R = R, freq = f))
+    }
+
+    # Function that adds the additional error sigmab to each individual DE error
+    combine_Errors <- function(d, e) {
+      if (log) {
+        d[ ,2] <- sqrt((d[ ,2]/d[ ,1])^2 + e^2)
+        d[ ,1] <- log(d[ ,1])
+      } else {
+        d[ ,2] <- sqrt(d[ ,2]^2 + e^2)
+      }
+      return(d)
+    }
+
+    # Function that produces N+M replicates from the original data set using
+    # randomly sampled indices with replacement and adding a randomly drawn
+    # sigmab error
+    create_Replicates <- function(f, s) {
+      d <- apply(f$R, 1, function(x) data[x, ])
+      r <- mapply(function(x, y) combine_Errors(x, y), d, s, SIMPLIFY = FALSE)
+      return(r)
+    }
+
+    # Function to extract the estimate of gamma from mle2 objects and converting
+    # it back to the 'normal' scale
+    save_Gamma <- function(d) {
+      if (log) {
+        if (invert) {
+          m <- exp((bbmle::coef(d)[["gamma"]]-x.offset)*-1)
+        } else {
+          m <- exp(bbmle::coef(d)[["gamma"]])
+        }
+      } else {
+        m <- bbmle::coef(d)[["gamma"]]
+      }
+      return(m)
+    }
+
+    # Function that takes each of the N replicates and produces a kernel density
+    # estimate of length n. The normalised values are then returned as a matrix
+    # with dimensions [N, n]
+    get_KDE <- function(d) {
+      f <- approx(density(x=d[ ,1], kernel="gaussian", bw = h), xout = d[ ,1])
+      pStarTheta <- as.vector(f$y / sum(f$y))
+      x <- matrix(t(pStarTheta/(1/n)), N, n, byrow = TRUE)
+      return(x)
+    }
+
+    # Function that calculates the product term of the recycled bootstrap
+    get_ProductTerm <- function(Pmat, b2Pmatrix) {
+      prodterm <- apply(Pmat^b2Pmatrix$freq[1:N, ], 1, prod)
+      return(prodterm)
+    }
+
+    # Function that calculates the pseudo likelihoods for M replicates and
+    # returns the dose-likelihood pairs
+    make_Pairs <- function(theta, b2mamvec, prodterm) {
+      pairs <- matrix(0, M, 2)
+      for (i in seq_len(M)) {
+        thetavec <- matrix(theta[i], N, 1)
+        kdthis <- (thetavec-b2mamvec)/h
+        kd1 <- dnorm(kdthis)
+
+        kd2 <- kd1*prodterm[[i]]
+        kd <- sum(kd2)
+        likelihood <- (1/(N*h))*kd
+        pairs[i, ] <- c(theta[i], likelihood)
+      }
+      return(pairs)
+    }
+
+    ## START BOOTSTRAP ----
+    msg <- sprintf(paste("\n [calc_MinDose] \n\nRecycled Bootstrap",
+                         "\n\nParameters:",
+                         "\n M = %d",
+                         "\n N = %d",
+                         "\n sigmab = %.2f \U00B1 %.2f",
+                         "\n h = %.2f",
+                         "\n\n Creating %d bootstrap replicates..."),
+                   M, N, sigmab, sigmab.sd, h, N+M)
+    message(msg)
+
+    n <- length(data[ ,1])
+    # Draw N+M samples of a normale distributed sigmab
+    sigmab <- rnorm(N + M, sigmab, sigmab.sd)
+    # Draw N+M random indices and their frequencies
+    b2Pmatrix <- draw_Freq()
+    # Finally draw N+M bootstrap replicates
+    replicates <- create_Replicates(b2Pmatrix, sigmab)
+
+    # MULTICORE: The call to 'Get_mle' is the bottleneck of the function.
+    # Using multiple CPU cores can reduce the computation cost, but may
+    # not work for all machines.
+    if (multicore) {
+      message(paste("\n Spawning", cores, "instances of R for parallel computation. This may take a few seconds..."))
+      cl <- parallel::makeCluster(cores)
+      message("\n Done! Applying the model to all replicates. This may take a while...")
+      mle <- parallel::parLapply(cl, replicates, Get_mle)
+      parallel::stopCluster(cl)
+    } else {
+      message("\n Applying the model to all replicates. This may take a while...")
+      mle <- lapply(replicates, Get_mle)
+    }
+
+    # Final bootstrap calculations
+    message("\n Calculating the likelihoods...")
+    # Save 2nd- and 1st-level bootstrap results (i.e. estimates of gamma)
+    b2mamvec <- as.matrix(sapply(mle[1:N], save_Gamma, simplify = TRUE))
+    theta <- sapply(mle[c(N+1):c(N+M)], save_Gamma)
+    # Calculate the probality/pseudo-likelihood
+    Pmat <- lapply(replicates[c(N+1):c(N+M)], get_KDE)
+    prodterm <- lapply(Pmat, get_ProductTerm, b2Pmatrix)
+    # Save the bootstrap results as dose-likelihood pairs
+    pairs <- make_Pairs(theta, b2mamvec, prodterm)
+
+    ## --------- FIT POLYNOMIALS -------------- ##
+    message("\n Fit curves to dose-likelihood pairs...")
+    # polynomial fits of increasing degrees
+    poly.three <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 3, raw = TRUE))
+    poly.four <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 4, raw = TRUE))
+    poly.five <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 5, raw = TRUE))
+    poly.six <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 6, raw = TRUE))
+
+    ## --------- FIT LOESS -------------- ##
+    # Polynomials are probably not reasonable and often suffer badly from
+    # overfitting, especially towards the margins of the fitted data. In this
+    # particular use case polynomials may suggest a multimodal likelihood
+    # distribution where actually none is given. The non-parametric
+    # LOESS (LOcal polynomial regrESSion) often yields better results than
+    # standard polynomials.
+    loess <- loess(pairs[ ,2] ~ pairs[ ,1])
+
+  }#EndOf::Bootstrap
+
+  ##============================================================================##
+  ## CONSOLE PRINT
+  ##============================================================================##
+  if (verbose) {
+    if (!bootstrap) {
+      cat("\n----------- meta data -----------\n")
+      print(data.frame(n=length(data[ ,1]),
+                       par=par,
+                       sigmab=sigmab,
+                       logged=log,
+                       Lmax=-ests at min,
+                       BIC=BIC,
+                       row.names = ""))
+
+      cat("\n--- final parameter estimates ---\n")
+      print(round(data.frame(gamma=ifelse(!invert, bbmle::coef(ests)[["gamma"]], (bbmle::coef(ests)[["gamma"]]-x.offset)*-1),
+                             sigma=bbmle::coef(ests)[["sigma"]],
+                             p0=bbmle::coef(ests)[["p0"]],
+                             mu=ifelse(par==4, ifelse(log,log(muend),muend),0),
+                             row.names=""), 2))
+
+      cat("\n------ confidence intervals -----\n")
+      print(round(conf, 2))
+
+      cat("\n------ De (asymmetric error) -----\n")
+      print(round(data.frame(De=pal,
+                             "lower"=ifelse(log, ifelse(!invert, exp(conf["gamma",1]), exp((conf["gamma",2]-x.offset)*-1)), conf["gamma",1]),
+                             "upper"=ifelse(log, ifelse(!invert, exp(conf["gamma",2]), exp((conf["gamma",1]-x.offset)*-1)), conf["gamma",2]),
+                             row.names=""), 2))
+
+      cat("\n------ De (symmetric error) -----\n")
+      print(round(data.frame(De=pal,
+                             error=gamma_err,
+                             row.names=""), 2))
+
+    } else if (bootstrap) {
+      message("\n Finished!")
+    }
+  }
+
+  ##============================================================================##
+  ## RETURN VALUES
+  ##============================================================================##
+
+  if (!bootstrap)
+    pairs <- poly.three <- poly.four <- poly.five <- poly.six <- loess <- NULL
+
+  newRLumResults.calc_MinDose <- set_RLum(
+    class = "RLum.Results",
+    data = list(summary = summary,
+                data = data,
+                args = args,
+                call = call,
+                mle = ests,
+                BIC = BIC,
+                confint = conf,
+                profile = prof,
+                bootstrap = list(
+                  pairs = list(gamma=pairs),
+                  poly.fits = list(poly.three = poly.three,
+                                   poly.four = poly.four,
+                                   poly.five = poly.five,
+                                   poly.six = poly.six),
+                  loess.fit = loess)))
+
+  ##=========##
+  ## PLOTTING
+  if (plot)
+    try(plot_RLum.Results(newRLumResults.calc_MinDose, ...))
+
+
+  if (!debug)
+    options(warn = 0)
+
+  if (!is.na(summary$mu) && !is.na(summary$de)) {
+    if (log(summary$de) > summary$mu)
+      warning("Gamma is larger than mu. Consider re-running the model",
+              " with new boundary values (see details '?calc_MinDose').", call. = FALSE)
+  }
+
+  invisible(newRLumResults.calc_MinDose)
+
+}
diff --git a/R/calc_OSLLxTxRatio.R b/R/calc_OSLLxTxRatio.R
new file mode 100644
index 0000000..495e9b1
--- /dev/null
+++ b/R/calc_OSLLxTxRatio.R
@@ -0,0 +1,491 @@
+#' Calculate Lx/Tx ratio for CW-OSL curves
+#'
+#' Calculate Lx/Tx ratios from a given set of CW-OSL curves assuming late light background subtraction.
+#'
+#' The integrity of the chosen values for the signal and background integral is
+#' checked by the function; the signal integral limits have to be lower than
+#' the background integral limits. If a \link{vector} is given as input instead
+#' of a \link{data.frame}, an artificial \code{data.frame} is produced. The
+#' error calculation is done according to Galbraith (2002).\cr
+#'
+#' \bold{Please note:} In cases where the calculation results in \code{NaN} values (for
+#' example due to zero-signal, and therefore a division of 0 by 0), these \code{NaN} values are replaced
+#' by 0.
+#'
+#' \bold{sigmab}\cr
+#'
+#' The default value of \code{sigmab} is calculated assuming the background is
+#' constant and \bold{would not} applicable when the background varies as,
+#' e.g., as observed for the early light substraction method.\cr
+#'
+#' \bold{sig0}\cr
+#'
+#' This argument allows to add an extra component of error to the final Lx/Tx error value.
+#' The input will be treated as factor that is multiplied with the already calculated
+#' LxTx and the result is add up by:
+#'
+#' \deqn{se(LxTx) = \sqrt(se(LxTx)^2 + (LxTx * sig0)^2)}
+#'
+#'
+#' \bold{background.count.distribution}\cr
+#'
+#' This argument allows selecting the distribution assumption that is used for
+#' the error calculation. According to Galbraith (2002, 2014) the background
+#' counts may be overdispersed (i.e. do not follow a poisson distribution,
+#' which is assumed for the photomultiplier counts). In that case (might be the
+#' normal case) it has to be accounted for the overdispersion by estimating
+#' \eqn{\sigma^2} (i.e. the overdispersion value). Therefore the relative
+#' standard error is calculated as:\cr\cr (a) \code{poisson}\cr
+#' \deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2)/Y_{0} - Y_{1}/k} (b)
+#' \code{non-poisson}\cr \deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2 +
+#' \sigma^2(1+1/k))/Y_{0} - Y_{1}/k}
+#'
+#' \bold{Please note} that when using the early background subtraction method in
+#' combination with the 'non-poisson' distribution argument, the corresponding Lx/Tx error
+#' may considerably increase due to a high sigmab value.
+#' Please check whether this is valid for your data set and  if necessary
+#' consider to provide an own sigmab value using the corresponding argument \code{sigmab}.
+#'
+#' @param Lx.data \code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+#' (\bold{required}): requires a CW-OSL shine down curve (x = time, y = counts)
+#'
+#' @param Tx.data \code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+#' (optional): requires a CW-OSL shine down curve (x = time, y = counts). If no
+#' input is given the Tx.data will be treated as \code{NA} and no Lx/Tx ratio
+#' is calculated.
+#'
+#' @param signal.integral \code{\link{vector}} (\bold{required}): vector with the
+#' limits for the signal integral.
+#'
+#' @param signal.integral.Tx \code{\link{vector}} (optional): vector with the
+#' limits for the signal integral for the Tx curve. If nothing is provided the
+#' value from \code{signal.integral} is used.
+#'
+#' @param background.integral \code{\link{vector}} (\bold{required}): vector with the
+#' bounds for the background integral.
+#'
+#' @param background.integral.Tx \code{\link{vector}} (optional): vector with the
+#' limits for the background integral for the Tx curve. If nothing is provided the
+#' value from \code{background.integral} is used.
+#'
+#' @param background.count.distribution \code{\link{character}} (with default): sets
+#' the count distribution assumed for the error calculation. Possible arguments
+#' \code{poisson} or \code{non-poisson}. See details for further information
+#'
+#' @param sigmab \code{\link{numeric}} (optional): option to set a manual value for
+#' the overdispersion (for LnTx and TnTx), used for the Lx/Tx error
+#' calculation. The value should be provided as absolute squared count values,
+#' e.g. \code{sigmab = c(300,300)}. Note: If only one value is provided this
+#' value is taken for both (LnTx and TnTx) signals.
+#'
+#' @param sig0 \code{\link{numeric}} (with default): allow adding an extra component of error
+#' to the final Lx/Tx error value (e.g., instrumental errror, see details).
+#'
+#' @param digits \code{\link{integer}} (with default): round numbers to the specified digits. If
+#' digits is set to \code{NULL} nothing is rounded.
+#'
+#' @return Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+#'
+#' Slot \code{data} contains a \code{\link{list}} with the following structure:\cr
+#' $LxTx.table (data.frame) \cr
+#' .. $ LnLx \cr
+#' .. $ LnLx.BG \cr
+#' .. $ TnTx \cr
+#' .. $ TnTx.BG \cr
+#' .. $ Net_LnLx \cr
+#' .. $ Net_LnLx.Error\cr
+#' .. $ Net_TnTx.Error\cr
+#' .. $ LxTx\cr
+#' .. $ LxTx.Error \cr
+#' $ calc.parameters (list) \cr
+#' .. $ sigmab.LnTx\cr
+#' .. $ sigmab.TnTx\cr
+#' .. $ k \cr
+#' $ call (original function call)\cr
+#'
+#' @note The results of this function have been cross-checked with the Analyst
+#' (vers. 3.24b). Access to the results object via  \code{\link{get_RLum}}.\cr
+#'
+#' \bold{Caution:} If you are using early light subtraction (EBG), please either provide your
+#' own \code{sigmab} value or use \code{background.count.distribution = "poisson"}.
+#'
+#'
+#' @section Function version: 0.6.3
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\link{Analyse_SAR.OSLdata}}, \code{\link{plot_GrowthCurve}},
+#' \code{\link{analyse_SAR.CWOSL}}
+#'
+#' @references Duller, G., 2007. Analyst.
+#' \url{http://www.nutech.dtu.dk/english/~/media/Andre_Universitetsenheder/Nutech/Produkter\%20og\%20services/Dosimetri/radiation_measurement_instruments/tl_osl_reader/Manuals/analyst_manual_v3_22b.ashx}\cr
+#'
+#' Galbraith, R.F., 2002. A note on the variance of a background-corrected OSL
+#' count. Ancient TL, 20 (2), 49-51.
+#'
+#' Galbraith, R.F., 2014. A further note on the variance of a
+#' background-corrected OSL count. Ancient TL, 31 (2), 1-3.
+#'
+#' @keywords datagen
+#'
+#' @examples
+#'
+#' ##load data
+#' data(ExampleData.LxTxOSLData, envir = environment())
+#'
+#' ##calculate Lx/Tx ratio
+#' results <- calc_OSLLxTxRatio(Lx.data, Tx.data, signal.integral = c(1:2),
+#'                              background.integral = c(85:100))
+#'
+#' ##get results object
+#' get_RLum(results)
+#'
+#' @export
+calc_OSLLxTxRatio <- function(
+  Lx.data,
+  Tx.data,
+  signal.integral,
+  signal.integral.Tx = NULL,
+  background.integral,
+  background.integral.Tx = NULL,
+  background.count.distribution = "non-poisson",
+  sigmab = NULL,
+  sig0 = 0,
+  digits = NULL
+){
+
+  ##--------------------------------------------------------------------------##
+  ##(1) - integrity checks
+
+
+  if(missing(Tx.data) == FALSE){
+
+    ##(a) - check data type
+    if(is(Lx.data)[1]!=is(Tx.data)[1]){
+      stop("[calc_OSLLxTxRatio()] Data type of Lx and Tx data differs!")
+    }
+
+    ##(b) - test if data.type is valid in general
+    if(is(Lx.data)[1] == "RLum.Data.Curve"){
+
+      Lx.data <- as(Lx.data, "data.frame")
+      Tx.data <- as(Tx.data, "data.frame")
+
+
+    }else{
+
+      ##go further
+      if((is(Lx.data)[1] != "data.frame" &
+          is(Lx.data)[1] != "numeric") &
+         is(Lx.data)[1] != "matrix"){
+        stop("[calc_OSLLxTxRatio()] Data type error! Required types are data.frame or numeric vector.")
+      }
+    }
+
+    ##(c) - convert vector to data.frame if nescessary
+    if(is(Lx.data)[1] != "data.frame" &
+       is(Lx.data)[1] != "matrix"){
+      Lx.data <- data.frame(x=1:length(Lx.data),y=Lx.data)
+      Tx.data <- data.frame(x=1:length(Tx.data),y=Tx.data)
+    }
+
+    ##(d) - check if Lx and Tx curves have the same channel length
+    if(length(Lx.data[,2]) != length(Tx.data[,2])){
+      stop("[calc_OSLLxTxRatio()] Channel number of Lx and Tx data differs!")}
+
+  }else{
+
+    Tx.data <- data.frame(x = NA,y = NA)
+
+    ##support RLum.objects
+    if(is(Lx.data)[1] == "RLum.Data.Curve"){
+      Lx.data <- as(Lx.data, "data.frame")
+
+    }
+
+    ##check for matrix
+    if(is(Lx.data)[1] == "matrix"){
+      Lx.data <- as.data.frame(Lx.data)
+
+    }
+
+    ##no it should be a data.frame, if not, try to produce one
+    if(is(Lx.data)[1]!="data.frame") {
+      Lx.data <- data.frame(x = 1:length(Lx.data),y = Lx.data)
+    }
+
+  }#endif::missing Tx.data
+
+  ##(e) - check if signal integral is valid
+  if(min(signal.integral) < 1 | max(signal.integral>length(Lx.data[,2]))){
+    stop("[calc_OSLLxTxRatio()] signal.integral is not valid!")}
+
+  ##(f) - check if background integral is valid
+  if(min(background.integral)<1 | max(background.integral>length(Lx.data[,2]))){
+    stop(paste("[calc_OSLLxTxRatio()] background.integral is not valid! Max: ",length(Lx.data[,2]),sep=""))}
+
+  ##(g) - check if signal and background integral overlapping
+  if(min(background.integral)<=max(signal.integral)){
+    stop("[calc_OSLLxTxRatio()] Overlapping of 'signal.integral' and 'background.integral' is not permitted!")}
+
+  ##(h) - similar procedure for the Tx limits
+  if(all(c(!is.null(signal.integral.Tx),!is.null(background.integral.Tx)))){
+
+    if(min(signal.integral.Tx) < 1 | max(signal.integral.Tx>length(Tx.data[,2]))){
+      stop("[calc_OSLLxTxRatio()] signal.integral.Tx is not valid!")}
+
+    if(min(background.integral.Tx)<1 | max(background.integral.Tx>length(Tx.data[,2]))){
+      stop(paste("[calc_OSLLxTxRatio()] background.integral.Tx is not valid! Max: ",length(Tx.data[,2]),sep=""))}
+
+    if(min(background.integral.Tx)<=max(signal.integral.Tx)){
+      stop("[calc_OSLLxTxRatio()] Overlapping of 'signal.integral.Tx' and 'background.integral.Tx' is not permitted!")}
+
+  }
+  else if(!all(c(is.null(signal.integral.Tx),is.null(background.integral.Tx)))){
+    stop("[calc_OSLLxTxRatio()] You have to provide both: signal.integral.Tx and background.integral.Tx!")
+
+  }else{
+    signal.integral.Tx <- signal.integral
+    background.integral.Tx <- background.integral
+
+  }
+
+
+
+  ##check sigmab
+  if (!is.null(sigmab)) {
+      if (!is(sigmab, "numeric")) {
+        stop("[calc_OSLLxTxRatio()] 'sigmab' has to be of type numeric.")
+      }
+
+      if (length(sigmab) > 2) {
+        stop("[calc_OSLLxTxRatio()] Maximum allowed vector length for 'sigmab' is 2.")
+      }
+  }
+
+
+
+  ##--------------------------------------------------------------------------##
+  ##(2) - read data and produce background subtracted values
+
+  ## calculate k value - express the background as mutiple value from the number
+  ## of signal integral channels, however, it can be < 1 also
+  n <- length(signal.integral)
+  m <- length(background.integral)
+  k <- m/n
+
+  n.Tx <- length(signal.integral.Tx)
+  m.Tx <- length(background.integral.Tx)
+  k.Tx <- m.Tx/n.Tx
+
+
+  ##LnLx (comments are corresponding variables to Galbraith, 2002)
+  Lx.curve <- Lx.data[,2]
+  Lx.signal <- sum(Lx.curve[signal.integral])                #Y.0
+  Lx.background <- sum(Lx.curve[background.integral])        #Y.1
+  Lx.background <- Lx.background*1/k                         #mu.B
+  LnLx <- Lx.signal - Lx.background
+
+  ##TnTx
+  Tx.curve <- ifelse(is.na(Tx.data[,1])==FALSE, Tx.data[,2], NA)
+  Tx.signal <- sum(Tx.curve[signal.integral.Tx])
+  Tx.background <- sum(Tx.curve[background.integral.Tx])*1/k.Tx
+  TnTx <- (Tx.signal-Tx.background)
+
+  ##--------------------------------------------------------------------------##
+  ##(3)
+  ## calculate Lx/Tx Errors according Galbraith (2002) and the personal
+  ## communication of Galbraith (2014) via e-mail
+  ## Nomenclature as stated in the articles
+
+  ##(a)
+  ## set Y.0 (sum OSL signal including the background) and
+  ## Y.1 (total counts over m later channels)
+  Y.0 <- Lx.signal
+  Y.0_TnTx <- Tx.signal
+  Y.1 <- sum(Lx.curve[background.integral])
+  Y.1_TnTx <- sum(Tx.curve[background.integral.Tx])
+
+
+  ##(b) estimate overdispersion (here called sigmab), see equation (4) in
+  ## Galbraith (2002), Galbraith (2014)
+  ## If else condition for the case that k < 2
+
+  if(round(k,digits = 1) >= 2 & ((min(background.integral) + length(signal.integral)*(2+1)) <= length(Lx.curve))){
+
+    ##(b)(1)(1)
+    ## note that m = n*k = multiple of background.integral from signal.integral
+    Y.i <- sapply(0:round(k,digits=0), function(i){
+      sum(Lx.curve[
+        (min(background.integral)+length(signal.integral)*i):
+          (min(background.integral)+length(signal.integral)+length(signal.integral)*i)])
+    })
+
+    Y.i <- na.exclude(Y.i)
+    sigmab.LnLx <- abs(var(Y.i) - mean(Y.i))  ##sigmab is denoted as sigma^2 = s.Y^2-Y.mean
+    ##therefore here absolute values are given
+
+
+  }else{
+
+    ## provide warning if m is < 25, as suggested by Rex Galbraith
+    ## low number of degree of freedom
+    if (m < 25) {
+      warning("[calc_OSLLxTxRatio()] Number of background channels for Lx < 25; error estimation might be not reliable!", call. = FALSE)
+
+    }
+
+    sigmab.LnLx <- abs((var(Lx.curve[background.integral]) -
+                          mean(Lx.curve[background.integral])) * n)
+
+  }
+
+  if (round(k.Tx, digits = 1) >= 2 &
+      ((
+        min(background.integral.Tx) + length(signal.integral.Tx) * (2 + 1)
+      ) <= length(Tx.curve))) {
+    ##(b)(1)(1)
+    ## note that m.Tx = n.Tx*k.Tx = multiple of background.integral.Tx from signal.integral.Tx
+    ## also for the TnTx signal
+    Y.i_TnTx <- sapply(0:round(k.Tx, digits = 0), function(i) {
+      sum(Tx.curve[(min(background.integral.Tx) + length(signal.integral.Tx) *
+                      i):(
+                        min(background.integral.Tx) + length(signal.integral.Tx) + length(signal.integral.Tx) *
+                          i
+                      )])
+    })
+
+    Y.i_TnTx <- na.exclude(Y.i_TnTx)
+    sigmab.TnTx <- abs(var(Y.i_TnTx) - mean(Y.i_TnTx))
+
+  } else{
+    ## provide warning if m is < 25, as suggested by Rex Galbraith
+    ## low number of degree of freedom
+    if (m.Tx < 25) {
+      warning("[calc_OSLLxTxRatio()] Number of background channels for Tx < 25; error estimation might be not reliable!", call. = FALSE)
+
+    }
+
+    sigmab.TnTx <- abs((var(Tx.curve[background.integral.Tx]) -
+                          mean(Tx.curve[background.integral.Tx])) * n.Tx)
+  }
+
+
+  ##account for a manually set sigmab value
+  if (!is.null(sigmab)) {
+      if (length(sigmab) == 2) {
+        sigmab.LnLx <- sigmab[1]
+        sigmab.TnTx <- sigmab[2]
+
+      }else{
+        sigmab.LnLx <- sigmab[1]
+        sigmab.TnTx <- sigmab[1]
+
+      }
+  }
+
+  ##(c)
+  ## Calculate relative error of the background subtracted signal
+  ## according to Galbratith (2002), equation (6) with changes
+  ## from Galbraith (2014), equation 6
+  ## Discussion with Rex Galbraith via e-mail (2014-02-27):
+  ## Equation 6 is approriate to be implemented as standard
+
+  if(background.count.distribution == "poisson"){
+
+    ##(c.1) estimate relative standard error for assuming a poisson distribution
+    LnLx.relError <- sqrt((Y.0 + Y.1/k^2))/(Y.0-Y.1/k)        ##  rse(mu.s)
+    TnTx.relError <- sqrt((Y.0_TnTx + Y.1_TnTx/k^2))/(Y.0_TnTx-Y.1_TnTx/k)
+
+  }else{
+
+    ##(c.2) estimate relative standard error for a non-poisson distribution
+    if(background.count.distribution != "non-poisson"){
+      warning("Unknown method for background.count.distribution. A non-poisson distribution is assumed!")}
+
+    LnLx.relError <- sqrt(Y.0 + Y.1/k^2 + sigmab.LnLx*(1+1/k))/
+      (Y.0 - Y.1/k)
+    TnTx.relError <- sqrt(Y.0_TnTx + Y.1_TnTx/k^2 + sigmab.TnTx*(1+1/k))/
+      (Y.0_TnTx - Y.1_TnTx/k)
+
+  }
+
+  ##(d)
+  ##calculate absolute standard error
+  LnLx.Error <- abs(LnLx*LnLx.relError)
+  TnTx.Error <- abs(TnTx*TnTx.relError)
+
+    ##we do not want to have NaN values, as they are mathematically correct, but make
+    ##no sense and would result in aliquots that become rejected later
+    if(is.nan(LnLx.Error)) LnLx.Error <- 0
+    if(is.nan(TnTx.Error)) TnTx.Error <- 0
+
+  ##combine results
+  LnLxTnTx <- cbind(
+    Lx.signal,
+    Lx.background,
+    Tx.signal,
+    Tx.background,
+    LnLx,
+    LnLx.Error,
+    TnTx,
+    TnTx.Error
+  )
+
+  ##--------------------------------------------------------------------------##
+  ##(4) Calculate LxTx error according Galbraith (2014)
+
+  #transform results in a data.frame
+  LnLxTnTx <- as.data.frame((LnLxTnTx))
+
+  #add col names
+  colnames(LnLxTnTx)<-c("LnLx", "LnLx.BG",
+                        "TnTx", "TnTx.BG",
+                        "Net_LnLx", "Net_LnLx.Error",
+                        "Net_TnTx", "Net_TnTx.Error")
+
+  ##calculate Ln/Tx
+  LxTx <- LnLxTnTx$Net_LnLx/LnLxTnTx$Net_TnTx
+
+    ##set NaN
+    if(is.nan(LxTx)) LxTx <- 0
+
+  ##calculate Ln/Tx error
+  LxTx.relError <- sqrt(LnLx.relError^2 + TnTx.relError^2)
+  LxTx.Error <- abs(LxTx * LxTx.relError)
+
+    ##set NaN
+    if(is.nan(LxTx.Error)) LxTx.Error <- 0
+
+    ##add an extra component of error
+    LxTx.Error <- sqrt(LxTx.Error^2 + (sig0 * LxTx)^2)
+
+  ##return combined values
+  temp <- cbind(LnLxTnTx,LxTx,LxTx.Error)
+
+
+  ##apply digits if wanted
+  if(!is.null(digits)){
+    temp[1,] <- round(temp[1,], digits = digits)
+
+  }
+
+  calc.parameters <- list(sigmab.LnLx = sigmab.LnLx,
+                          sigmab.TnTx = sigmab.TnTx,
+                          k = k)
+
+  ##set results object
+  temp.return <-
+    set_RLum(
+      class = "RLum.Results",
+      data = list(
+        LxTx.table = temp,
+        calc.parameters = calc.parameters,
+        call = sys.call())
+    )
+
+  invisible(temp.return)
+
+}
diff --git a/R/calc_SourceDoseRate.R b/R/calc_SourceDoseRate.R
new file mode 100644
index 0000000..b6af7ed
--- /dev/null
+++ b/R/calc_SourceDoseRate.R
@@ -0,0 +1,229 @@
+#' Calculation of the source dose rate via the date of measurement
+#'
+#' Calculating the dose rate of the irradiation source via the date of
+#' measurement based on: source calibration date, source dose rate, dose rate
+#' error. The function returns a data.frame that provides the input argument
+#' dose_rate for the function \code{\link{Second2Gray}}.
+#'
+#' Calculation of the source dose rate based on the time elapsed since the last
+#' calibration of the irradiation source. Decay parameters assume a Sr-90 beta
+#' source. \deqn{dose.rate = D0 * exp(-log(2) / T.1/2 * t)} \cr with: D0 <-
+#' calibration dose rate T.1/2 <- half-life of the source nuclide (here in
+#' days) t <- time since source calibration (in days) log(2) / T.1/2 equals the
+#' decay constant lambda
+#'
+#' Information on the date of measurements may be taken from the data's
+#' original .BIN file (using e.g., BINfile <- readBIN2R() and the slot
+#' BINfile@@METADATA$DATE)
+#'
+#' \bold{Allowed source types and related values}
+#'
+#' \tabular{rllll}{ \bold{#} \tab \bold{Source type} \tab \bold{T.1/2} \tab
+#' \bold{Reference} \cr [1] \tab Sr-90 \tab 28.90 y \tab NNDC, Brookhaven
+#' National Laboratory \cr [2] \tab Am-214 \tab 432.6 y \tab NNDC, Brookhaven
+#' National Laboratory \cr [3] \tab Co-60 \tab 5.274 y \tab NNDC, Brookhaven
+#' National Laboratory }
+#'
+#' @param measurement.date \code{\link{character}} or \code{\link{Date}} (\bold{required}): date of
+#' measurement in "YYYY-MM-DD". Exceptionally, if no value is provided, the date will be set to today.
+#' The argument can be provided as vector.
+#'
+#' @param calib.date \code{\link{character}} or \code{\link{Date}} (\bold{required}): date of source
+#' calibration in "YYYY-MM-DD"
+#'
+#' @param calib.dose.rate \code{\link{numeric}} (\bold{required}): dose rate at
+#' date of calibration in Gy/s or Gy/min
+#'
+#' @param calib.error \code{\link{numeric}} (\bold{required}): error of dose
+#' rate at date of calibration Gy/s or Gy/min
+#'
+#' @param source.type \code{\link{character}} (with default): specify
+#' irrdiation source (\code{Sr-90} or \code{Co-60} or \code{Am-214}), see
+#' details for further information
+#'
+#' @param dose.rate.unit \code{\link{character}} (with default): specify dose
+#' rate unit for input (\code{Gy/min} or \code{Gy/s}), the output is given in
+#' Gy/s as valid for the function \code{\link{Second2Gray}}
+#'
+#' @param predict \code{\link{integer}} (with default): option allowing to predicit the dose
+#' rate of the source over time in days set by the provided value. Starting date is the value set
+#' with \code{measurement.date}, e.g., \code{calc_SourceDoseRate(...,predict = 100)} calculates
+#' the source dose rate for the next 100 days.
+#'
+#' @return Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+#' Slot \code{data} contains a \code{\link{list}} with the following
+#' structure:\cr
+#' $ dose.rate (data.frame)\cr
+#' .. $ dose.rate \cr
+#' .. $ dose.rate.error \cr
+#' .. $ date (corresponding measurement date)\cr
+#' $ parameters (list) \cr
+#' .. $ source.type\cr
+#' .. $ halflife\cr
+#' .. $ dose.rate.unit\cr
+#' $ call (the original function call)\cr
+#'
+#' The output should be accessed using the function \code{\link{get_RLum}}.\cr
+#' A plot method of the output is provided via \code{\link{plot_RLum}}
+#'
+#' @note Please be careful when using the option \code{predict}, especially when a multiple set
+#' for \code{measurement.date} and \code{calib.date} is provided. For the source dose rate prediction
+#' the function takes the last value \code{measurement.date} and predicts from that the the source
+#' source dose rate for the number of days requested,
+#' means: the (multiple) orignal input will be replaced. However, the function
+#' do not change entries for the calibration dates, but mix them up. Therefore,
+#' it is not recommended to use this option when multiple calibration dates (\code{calib.date})
+#' are provided.
+#'
+#' @section Function version: 0.3.0
+#'
+#' @author Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany),
+#' \cr Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#'
+#' @seealso \code{\link{Second2Gray}}, \code{\link{get_RLum}}, \code{\link{plot_RLum}}
+#'
+#' @references NNDC, Brookhaven National Laboratory
+#' (\code{http://www.nndc.bnl.gov/})
+#'
+#' @keywords manip
+#'
+#' @examples
+#'
+#'
+#' ##(1) Simple function usage
+#' ##Basic calculation of the dose rate for a specific date
+#' dose.rate <-  calc_SourceDoseRate(measurement.date = "2012-01-27",
+#'                                   calib.date = "2014-12-19",
+#'                                   calib.dose.rate = 0.0438,
+#'                                   calib.error = 0.0019)
+#'
+#' ##show results
+#' get_RLum(dose.rate)
+#'
+#' ##(2) Usage in combination with another function (e.g., Second2Gray() )
+#' ## load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## use the calculated variable dose.rate as input argument
+#' ## to convert De(s) to De(Gy)
+#' Second2Gray(ExampleData.DeValues$BT998, dose.rate)
+#'
+#' ##(3) source rate prediction and plotting
+#' dose.rate <-  calc_SourceDoseRate(measurement.date = "2012-01-27",
+#'                                   calib.date = "2014-12-19",
+#'                                   calib.dose.rate = 0.0438,
+#'                                   calib.error = 0.0019,
+#'                                   predict = 1000)
+#' plot_RLum(dose.rate)
+#'
+#'
+#'##(4) export output to a LaTeX table (example using the package 'xtable')
+#'\dontrun{
+#' xtable::xtable(get_RLum(dose.rate))
+#'
+#'}
+#'
+#'
+#' @export
+calc_SourceDoseRate <- function(
+  measurement.date,
+  calib.date,
+  calib.dose.rate,
+  calib.error,
+  source.type = "Sr-90",
+  dose.rate.unit = "Gy/s",
+  predict = NULL
+){
+
+
+  # -- transform input so far necessary
+  ## measurement.data
+    if (missing(measurement.date)) {
+      measurement.date <- Sys.Date()
+
+      warning("Argument 'measurement.date', automatically set to today.")
+
+    }else{
+      if (is(measurement.date, "character")) {
+        measurement.date <- as.Date(measurement.date)
+      }
+
+    }
+
+  ##calibration date
+  if(is(calib.date, "character")) {
+    calib.date <- as.Date(calib.date)
+  }
+
+  # --- if predict is set
+  if(!is.null(predict) && predict > 1){
+    measurement.date <- seq(tail(measurement.date), by = 1, length = predict)
+
+  }
+
+  # -- calc days since source calibration
+  decay.days <- measurement.date - calib.date
+
+
+  # -- calc dose rate of source at date of measurement, considering the chosen source-type
+
+  ##set halflife
+  halflife.years  <- switch(
+    source.type,
+    "Sr-90" = 28.90,
+    "Am-241" = 432.6,
+    "Co-60" = 5.274)
+
+  if(is.null(halflife.years)){
+
+    stop("[calc_SourceDoseRate()] Source type unknown or currently not supported!")
+
+  }
+
+
+  halflife.days  <- halflife.years * 365
+
+  # N(t) = N(0)*e^((lambda * t) with lambda = log(2)/T1.2)
+  measurement.dose.rate <- (calib.dose.rate) *
+    exp((-log(2) / halflife.days) * as.numeric(decay.days))
+  measurement.dose.rate.error <- (calib.error) *
+    exp((-log(2) / halflife.days) * as.numeric(decay.days))
+
+
+
+  # -- convert to input unit to [Gy/s]
+  if(dose.rate.unit == "Gy/min"){
+    source.dose.rate <- measurement.dose.rate / 60
+    source.dose.rate.error <- source.dose.rate *
+      (measurement.dose.rate.error / measurement.dose.rate)
+
+  }else if(dose.rate.unit == "Gy/s"){
+    source.dose.rate <- measurement.dose.rate
+    source.dose.rate.error <- measurement.dose.rate.error
+
+  }
+
+
+  # Output --------------------------------------------------------------------------------------
+
+  dose_rate <- data.frame(
+    dose.rate = source.dose.rate,
+    dose.rate.error = source.dose.rate.error,
+    date = measurement.date,
+    stringsAsFactors = TRUE
+  )
+
+  temp.return <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      dose.rate = dose_rate,
+      parameters = list(source.type = source.type,
+                        halflife = halflife.years,
+                        dose.rate.unit = dose.rate.unit),
+      call = sys.call()
+    ))
+
+  return(temp.return)
+
+}
diff --git a/R/calc_Statistics.R b/R/calc_Statistics.R
new file mode 100644
index 0000000..c7823fe
--- /dev/null
+++ b/R/calc_Statistics.R
@@ -0,0 +1,256 @@
+#' Function to calculate statistic measures
+#'
+#' This function calculates a number of descriptive statistics for De-data,
+#' most fundamentally using error-weighted approaches.
+#'
+#' The option to use Monte Carlo Methods (\code{n.MCM > 0}) allows calculating
+#' all descriptive statistics based on random values. The distribution of these
+#' random values is based on the Normal distribution with \code{De} values as
+#' means and \code{De_error} values as one standard deviation. Increasing the
+#' number of MCM-samples linearly increases computation time. On a Lenovo X230
+#' machine evaluation of 25 Aliquots with n.MCM = 1000 takes 0.01 s, with
+#' n = 100000, ca. 1.65 s. It might be useful to work with logarithms of these
+#' values. See Dietze et al. (2016, Quaternary Geochronology) and the function
+#' \code{\link{plot_AbanicoPlot}} for details.
+#'
+#' @param data \code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+#' object (required): for \code{data.frame} two columns: De (\code{data[,1]})
+#' and De error (\code{data[,2]}). To plot several data sets in one plot the
+#' data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.
+#'
+#' @param weight.calc \code{\link{character}}: type of weight calculation. One
+#' out of \code{"reciprocal"} (weight is 1/error), \code{"square"} (weight is
+#' 1/error^2). Default is \code{"square"}.
+#'
+#' @param digits \code{\link{integer}} (with default): round numbers to the
+#' specified digits. If digits is set to \code{NULL} nothing is rounded.
+#'
+#' @param n.MCM \code{\link{numeric}} (with default): number of samples drawn
+#' for Monte Carlo-based statistics. Set to zero to disable this option.
+#'
+#' @param na.rm \code{\link{logical}} (with default): indicating whether NA
+#' values should be stripped before the computation proceeds.
+#'
+#' @return Returns a list with weighted and unweighted statistic measures.
+#'
+#' @section Function version: 0.1.6
+#'
+#' @keywords datagen
+#'
+#' @author Michael Dietze, GFZ Potsdam (Germany)
+#'
+#' @examples
+#'
+#' ## load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## show a rough plot of the data to illustrate the non-normal distribution
+#' plot_KDE(ExampleData.DeValues$BT998)
+#'
+#' ## calculate statistics and show output
+#' str(calc_Statistics(ExampleData.DeValues$BT998))
+#'
+#' \dontrun{
+#' ## now the same for 10000 normal distributed random numbers with equal errors
+#' x <- as.data.frame(cbind(rnorm(n = 10^5, mean = 0, sd = 1),
+#'                          rep(0.001, 10^5)))
+#'
+#' ## note the congruent results for weighted and unweighted measures
+#' str(calc_Statistics(x))
+#' }
+#'
+#' @export
+calc_Statistics <- function(
+  data,
+  weight.calc = "square",
+  digits = NULL,
+  n.MCM = 1000,
+  na.rm = TRUE
+) {
+
+  ## Check input data
+  if(is(data, "RLum.Results") == FALSE &
+       is(data, "data.frame") == FALSE) {
+    stop(paste("[calc_Statistics()] Input data format is neither",
+               "'data.frame' nor 'RLum.Results'"))
+  } else {
+    if(is(data, "RLum.Results") == TRUE) {
+      data <- get_RLum(data, "data")[,1:2]
+    }
+  }
+
+  ##strip na values
+  if(na.rm){
+    data <- na.exclude(data)
+  }
+
+  ## handle error-free data sets
+  if(ncol(data) == 1) {
+    data <- cbind(data, rep(NA, length(data)))
+  }
+
+  ## replace Na values in error by 0
+  data[is.na(data[,2]),2] <- 0
+
+  if(sum(data[,2]) == 0) {
+    warning("All errors are NA or zero! Automatically set to 10^-9!")
+    data[,2] <- rep(x = 10^-9, length(data[,2]))
+  }
+
+  if(weight.calc == "reciprocal") {
+    S.weights <- 1 / data[,2]
+  } else if(weight.calc == "square") {
+    S.weights <- 1 / data[,2]^2
+  } else {
+    stop ("[calc_Statistics()] Weight calculation type not supported!")
+  }
+
+  S.weights <- S.weights / sum(S.weights)
+
+  ## create MCM data
+  if (n.MCM == 0) {
+    data.MCM <- cbind(data[, 1])
+  } else {
+    data.MCM <-
+      matrix(data = rnorm(
+        n = n.MCM * nrow(data),
+        mean = data[, 1],
+        sd = data[, 2]
+      ),
+      ncol = n.MCM)
+
+  }
+
+  ## calculate n
+  S.n <- nrow(data)
+
+  ## calculate mean
+  S.mean <- mean(x = data[,1],
+                 na.rm = na.rm)
+
+  S.wg.mean <- weighted.mean(x = data[,1],
+                             w = S.weights,
+                             n.rm = na.rm)
+
+  S.m.mean <- mean(x = data.MCM,
+                   na.rm = na.rm)
+
+
+  ## calculate median
+  S.median <- median(x = data[,1],
+                     na.rm = na.rm)
+
+  S.wg.median <- S.median
+
+  S.m.median <- median(x = data.MCM,
+                       na.rm = na.rm)
+
+  ## calculate absolute standard deviation
+  S.sd.abs <- sd(x = data[,1],
+                 na.rm = na.rm)
+
+  S.wg.sd.abs <- sqrt(sum(S.weights * (data[,1] - S.wg.mean)^2) /
+                        (((S.n - 1) * sum(S.weights)) / S.n))
+
+  S.m.sd.abs <- sd(x = data.MCM,
+                   na.rm = na.rm)
+
+
+  ## calculate relative standard deviation
+  S.sd.rel <- S.sd.abs / S.mean * 100
+
+  S.wg.sd.rel <- S.wg.sd.abs / S.wg.mean * 100
+
+  S.m.sd.rel <- S.m.sd.abs / S.m.mean * 100
+
+  ## calculate absolute standard error of the mean
+  S.se.abs <- S.sd.abs / sqrt(S.n)
+
+  S.wg.se.abs <- S.wg.sd.abs / sqrt(S.n)
+
+  S.m.se.abs <- S.m.sd.abs / sqrt(S.n)
+
+  ## calculate relative standard error of the mean
+  S.se.rel <- S.se.abs / S.mean * 100
+
+  S.wg.se.rel <- S.wg.se.abs / S.wg.mean * 100
+
+  S.m.se.rel <- S.m.se.abs / S.m.mean * 100
+
+  ## calculate skewness
+  S.skewness <- 1 / S.n * sum(((data[,1] - S.mean) / S.sd.abs)^3)
+
+  S.m.skewness <- 1 / S.n * sum(((data.MCM - S.m.mean) / S.m.sd.abs)^3)
+
+  ## calculate kurtosis
+  S.kurtosis <- 1 / S.n * sum(((data[,1] - S.mean) / S.sd.abs)^4)
+
+  S.m.kurtosis <- 1 / S.n * sum(((data.MCM - S.m.mean) / S.m.sd.abs)^4)
+
+  ## create list objects of calculation output
+  S.weighted <- list(n = S.n,
+                     mean = S.wg.mean,
+                     median = S.wg.median,
+                     sd.abs = S.wg.sd.abs,
+                     sd.rel = S.wg.sd.rel,
+                     se.abs = S.wg.se.abs,
+                     se.rel = S.wg.se.rel,
+                     skewness = S.skewness,
+                     kurtosis = S.kurtosis)
+
+
+  if(!is.null(digits)) {
+
+     S.weighted <- sapply(names(S.weighted),
+                          simplify = FALSE,
+                          USE.NAMES = TRUE,
+                          function(x) {
+                            round(S.weighted[[x]],
+                                  digits = digits)})
+  }
+
+  S.unweighted <- list(n = S.n,
+                       mean = S.mean,
+                       median = S.median,
+                       sd.abs = S.sd.abs,
+                       sd.rel = S.sd.rel,
+                       se.abs = S.se.abs,
+                       se.rel = S.se.rel,
+                       skewness = S.skewness,
+                       kurtosis = S.kurtosis)
+
+  if(!is.null(digits)){
+
+    S.unweighted  <- sapply(names(S.unweighted),
+                            simplify = FALSE,
+                            USE.NAMES = TRUE,
+                            function(x) {
+                              round(S.unweighted [[x]],
+                                    digits = digits)})
+  }
+
+  S.MCM <- list(n = S.n,
+                mean = S.m.mean,
+                median = S.m.median,
+                sd.abs = S.m.sd.abs,
+                sd.rel = S.m.sd.rel,
+                se.abs = S.m.se.abs,
+                se.rel = S.m.se.rel,
+                skewness = S.m.skewness,
+                kurtosis = S.m.kurtosis)
+
+  if(!is.null(digits)){
+
+    S.MCM  <- sapply(names(S.MCM),
+                     simplify = FALSE,
+                     USE.NAMES = TRUE,
+                     function(x) {
+                       round(S.MCM [[x]],
+                             digits = digits)})
+  }
+
+  list(weighted = S.weighted,
+       unweighted = S.unweighted,
+       MCM = S.MCM)
+
+}
diff --git a/R/calc_TLLxTxRatio.R b/R/calc_TLLxTxRatio.R
new file mode 100644
index 0000000..ca0f067
--- /dev/null
+++ b/R/calc_TLLxTxRatio.R
@@ -0,0 +1,247 @@
+#' Calculate the Lx/Tx ratio for a given set of TL curves [beta version]
+#'
+#' Calculate Lx/Tx ratio for a given set of TL curves.
+#'
+#' -
+#'
+#' @param Lx.data.signal \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (\bold{required}): TL data (x =
+#' temperature, y = counts) (TL signal)
+#'
+#' @param Lx.data.background \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (optional): TL data (x =
+#' temperature, y = counts). If no data are provided no background subtraction
+#' is performed.
+#'
+#' @param Tx.data.signal \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (\bold{required}): TL data (x =
+#' temperature, y = counts) (TL test signal)
+#'
+#' @param Tx.data.background \code{\linkS4class{RLum.Data.Curve}} or
+#' \code{\link{data.frame}} (optional): TL data (x =
+#' temperature, y = counts). If no data are provided no background subtraction
+#' is performed.
+#'
+#' @param signal.integral.min \code{\link{integer}} (\bold{required}): channel number
+#' for the lower signal integral bound (e.g. \code{signal.integral.min = 100})
+#'
+#' @param signal.integral.max \code{\link{integer}} (\bold{required}): channel number
+#' for the upper signal integral bound (e.g. \code{signal.integral.max = 200})
+#'
+#' @return Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+#' Slot \code{data} contains a \link{list} with the following structure:\cr\cr
+#' $ LxTx.table \cr .. $ LnLx \cr .. $ LnLx.BG \cr .. $ TnTx \cr .. $ TnTx.BG
+#' \cr .. $ Net_LnLx \cr .. $ Net_LnLx.Error\cr
+#'
+#' @note \bold{This function has still BETA status!}
+#'
+#' @section Function version: 0.3.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France), Christoph Schmidt, University of Bayreuth (Germany)
+#'
+#' @seealso \code{\linkS4class{RLum.Results}}, \code{\link{analyse_SAR.TL}}
+#'
+#' @references -
+#'
+#' @keywords datagen
+#'
+#' @examples
+#'
+#'
+#' ##load package example data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##convert Risoe.BINfileData into a curve object
+#' temp <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos = 3)
+#'
+#'
+#' Lx.data.signal <- get_RLum(temp, record.id=1)
+#' Lx.data.background <- get_RLum(temp, record.id=2)
+#' Tx.data.signal <- get_RLum(temp, record.id=3)
+#' Tx.data.background <- get_RLum(temp, record.id=4)
+#' signal.integral.min <- 210
+#' signal.integral.max <- 230
+#'
+#' output <- calc_TLLxTxRatio(Lx.data.signal,
+#'                            Lx.data.background,
+#'                            Tx.data.signal, Tx.data.background,
+#'                            signal.integral.min, signal.integral.max)
+#' get_RLum(output)
+#'
+#' @export
+calc_TLLxTxRatio <- function(
+  Lx.data.signal,
+  Lx.data.background,
+  Tx.data.signal,
+  Tx.data.background,
+  signal.integral.min,
+  signal.integral.max
+){
+
+
+  ##--------------------------------------------------------------------------##
+  ##(1) - a few integrity check
+
+     ##check for MISSING objects
+     if(missing(Lx.data.signal) == TRUE | missing(Tx.data.signal) == TRUE |
+        missing(signal.integral.min) == TRUE |  missing(signal.integral.max) == TRUE){
+
+       temp.missing <- paste(
+                       c(if(missing(Lx.data.signal)){"Lx.data.signal"},
+                         if(missing(Tx.data.signal)){"Tx.data.signal"},
+                         if(missing(signal.integral.min)){"signal.integral.min"},
+                         if(missing(signal.integral.max)){"signal.integral.max"}),
+                       collapse = ", ")
+
+          stop(paste("[calc_TLLxTxRatio()] Arguments are missing: ",temp.missing, ".", sep=""))
+
+     }
+
+
+     ##check DATA TYPE differences
+     if(is(Lx.data.signal)[1]!=is(Tx.data.signal)[1]){
+       stop("[calc_TLLxTxRatio()] Data type of Lx and Tx data differs!")}
+
+     ##check for allowed data.types
+     if(!is(Lx.data.signal, "data.frame") &
+        !is(Lx.data.signal, "RLum.Data.Curve")){
+
+       stop("[calc_TLLxTxRatio()] Input data type for not allowed. Allowed are 'RLum.Data.Curve' and 'data.frame'")
+
+     }
+
+  ##--------------------------------------------------------------------------##
+  ## Type conversion (assuming that all input variables are of the same type)
+
+  if(is(Lx.data.signal, "RLum.Data.Curve")){
+
+    Lx.data.signal <- as(Lx.data.signal, "matrix")
+    Tx.data.signal <- as(Tx.data.signal, "matrix")
+
+    if(missing(Lx.data.background) == FALSE && is.null(Lx.data.background) == FALSE){
+
+      Lx.data.background <- as(Lx.data.background, "matrix")
+
+    }
+
+    if(missing(Tx.data.background) == FALSE && is.null(Tx.data.background) == FALSE){
+
+      Tx.data.background <- as(Tx.data.background, "matrix")
+
+    }
+
+  }
+
+  ##(d) - check if Lx and Tx curves have the same channel length
+     if(length(Lx.data.signal[,2])!=length(Tx.data.signal[,2])){
+
+       stop("[calc_TLLxTxRatio()] Channel number of Lx and Tx data differs!")}
+
+
+   ##(e) - check if signal integral is valid
+   if(signal.integral.min < 1 | signal.integral.max > length(Lx.data.signal[,2])){
+     stop("[calc_TLLxTxRatio()] Signal.integral is not valid!")}
+
+
+
+
+#  Background Consideration --------------------------------------------------
+
+
+   ##Lx.data
+   if(missing(Lx.data.background)==FALSE){
+
+     LnLx.BG <- sum(Lx.data.background[signal.integral.min:signal.integral.max,2])
+
+    }else{
+
+     LnLx.BG <- NA
+
+    }
+
+   ##Tx.data
+      if(missing(Tx.data.background)==FALSE){
+
+        TnTx.BG <- sum(Tx.data.background[signal.integral.min:signal.integral.max,2])
+
+      }else{
+
+        TnTx.BG <- NA
+
+      }
+
+# Calculate Lx/Tx values --------------------------------------------------
+
+    LnLx <- sum(Lx.data.signal[signal.integral.min:signal.integral.max,2])
+    TnTx <- sum(Tx.data.signal[signal.integral.min:signal.integral.max,2])
+
+
+     ##calculate variance of background
+     if(is.na(LnLx.BG) == FALSE & is.na(TnTx.BG) == FALSE){
+
+       BG.Error <- sd(c(LnLx.BG, TnTx.BG))
+     }
+
+
+    if(is.na(LnLx.BG) == FALSE){
+
+      net_LnLx <-  LnLx - LnLx.BG
+      net_LnLx.Error <- abs(net_LnLx * BG.Error/LnLx.BG)
+
+    }else{
+
+      net_LnLx <- NA
+      net_LnLx.Error <- NA
+
+    }
+
+    if(is.na(TnTx.BG) == FALSE){
+
+         net_TnTx <-  TnTx - TnTx.BG
+         net_TnTx.Error <- abs(net_TnTx * BG.Error/TnTx.BG)
+
+    }else{
+
+      net_TnTx <- NA
+      net_TnTx.Error  <- NA
+
+    }
+
+
+    if(is.na(net_TnTx) == TRUE){
+
+      LxTx <- LnLx/TnTx
+      LxTx.Error <- NA
+
+    }else{
+
+      LxTx <- net_LnLx/net_TnTx
+      LxTx.Error <- LxTx*((net_LnLx.Error/net_LnLx) + (net_TnTx.Error/net_TnTx))
+
+
+    }
+
+
+
+    ##COMBINE to a data.frame
+    temp.results <- data.frame(LnLx,
+                               LnLx.BG,
+                               TnTx,
+                               TnTx.BG,
+                               net_LnLx,
+                               net_LnLx.Error,
+                               net_TnTx,
+                               net_TnTx.Error,
+                               LxTx,
+                               LxTx.Error)
+
+# Return values -----------------------------------------------------------
+
+   newRLumResults.calc_TLLxTxRatio <- set_RLum(
+     class = "RLum.Results",
+     data=list(LxTx.table = temp.results))
+
+   return(newRLumResults.calc_TLLxTxRatio)
+
+}
diff --git a/R/calc_ThermalLifetime.R b/R/calc_ThermalLifetime.R
new file mode 100644
index 0000000..47b7082
--- /dev/null
+++ b/R/calc_ThermalLifetime.R
@@ -0,0 +1,400 @@
+#' Calculates the Thermal Lifetime using the Arrhenius equation
+#'
+#' The function calculates the thermal lifetime of charges for given E (in eV), s (in 1/s) and
+#' T (in deg. C.) parameters. The function can be used in two operational modes:\cr
+#'
+#' \bold{Mode 1 \code{(profiling = FALSE)}}
+#'
+#' An arbitrary set of input parameters (E, s, T) can be provided and the
+#' function calculates the thermal lifetimes using the Arrhenius equation for
+#' all possible combinations of these input parameters. An array with 3-dimensions
+#' is returned that can be used for further analyses or graphical output (see example 1)
+#'
+#' \bold{Mode 2 \code{(profiling = TRUE)}}
+#'
+#' This mode tries to profile the variation of the thermal lifetime for a chosen
+#' temperature by accounting for the provided E and s parameters and their corresponding
+#' standard errors, e.g., \code{E = c(1.600, 0.001)}
+#' The calculation based on a Monte Carlo simulation, where values are sampled from a normal
+#' distribution (for E and s).\cr
+#'
+#' \bold{Used equation (Arrhenius equation)}\cr
+#'
+#' \deqn{\tau = 1/s exp(E/kT)}
+#' where: \eqn{\tau} in s as the mean time an electron spends in the trap for a given \eqn{T},
+#' \eqn{E} trap depth in eV, \eqn{s} the frequency factor in 1/s, \eqn{T} the temperature in K and \eqn{k} the Boltzmann constant in eV/K (cf. Furetta, 2010).
+#'
+#' @param E \code{\link{numeric}} (\bold{required}): vector of trap depths in eV,
+#' if \code{profiling = TRUE}
+#' only the first two elements are considered
+#'
+#' @param s \code{\link{numeric}} (\bold{required}): vector of frequency factor in 1/s,
+#' if \code{profiling = TRUE} only the first two elements are considered
+#'
+#' @param T \code{\link{numeric}} (with default): temperature in deg. C for which the lifetime(s)
+#' will be calculted. A vector can be provided.
+#'
+#' @param output_unit \code{\link{character}} (with default):
+#' output unit of the calculated lifetimes, accepted
+#' entries are: \code{"Ma"}, \code{"ka"}, \code{"a"}, \code{"d"}, \code{"h"}, \code{"min"}, \code{"s"}
+#'
+#' @param profiling \code{\link{logical}} (with default):
+#' this option allows to estimate uncertainties based on
+#' given E and s parameters and their corresponding standard error (cf. details and examples section)
+#'
+#' @param profiling_config \code{\link{list}} (optional): allows to set configurate parameters
+#' used for the profiling (and only have an effect here). Supported parameters are:
+#' \code{n} (number of MC runs), \code{E.distribution} (distribution used for the resampling for E) and
+#' \code{s.distribution} (distribution used for the resampling for s). Currently only the normal
+#' distribution is supported (e.g., \code{profiling_config = list(E.distribution = "norm")}
+#'
+#' @param verbose \code{\link{logical}}: enables/disables verbose mode
+#'
+#' @param plot \code{\link{logical}}: enables/disables output plot, currenlty only in combination
+#' with \code{profiling = TRUE}.
+#'
+#' @param \dots further arguments that can be passed in combination with the plot output. Standard
+#' plot parameters are supported (\code{\link{plot.default}})
+#'
+#' @return A \code{\linkS4class{RLum.Results}} object is returned a along with a plot (for
+#' \code{profiling = TRUE}). The output object contain the following slots:
+#'
+#' \bold{\code{@data}}\cr
+#' \tabular{lll}{
+#'  \bold{Object} \tab \bold{Type} \tab \bold{Description} \cr
+#'  \code{lifetimes} \tab \code{\link{array}} or \code{\link{numeric}} \tab calculated lifetimes \cr
+#'  \code{profiling_matrix} \tab \code{\link{matrix}} \tab profiling matrix used for the MC runs
+#'
+#' }
+#'
+#' \bold{\code{@info}}\cr
+#' \tabular{lll}{
+#' \bold{Object} \tab \bold{Type} \tab \bold{Description} \cr
+#' \code{call} \tab \code{call} \tab the original function call
+#' }
+#'
+#' @note The profiling is currently based on resampling from a normal distribution, this
+#' distribution assumption might be, however, not valid for given E and s paramters.
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\link[graphics]{matplot}}, \code{\link[stats]{rnorm}}, \code{\link{get_RLum}},
+#'
+#' @references Furetta, C., 2010. Handbook of Thermoluminescence, Second Edition. ed.
+#' World Scientific.
+#'
+#' @keywords datagen
+#'
+#' @examples
+#'
+#' ##EXAMPLE 1
+#' ##calculation for two trap-depths with similar frequency factor for different temperatures
+#' E <- c(1.66, 1.70)
+#' s <- 1e+13
+#' T <- 10:20
+#' temp <- calc_ThermalLifetime(
+#'   E = E,
+#'   s = s,
+#'   T = T,
+#'   output_unit = "Ma"
+#' )
+#' contour(x = E, y = T, z = temp$lifetimes[1,,],
+#'         ylab = "Temperature [\u00B0C]",
+#'         xlab = "Trap depth [eV]",
+#'         main = "Thermal Lifetime Contour Plot"
+#' )
+#' mtext(side = 3, "(values quoted in Ma)")
+#'
+#' ##EXAMPLE 2
+#' ##profiling of thermal life time for E and s and their standard error
+#' E <- c(1.600, 0.003)
+#' s <- c(1e+13,1e+011)
+#' T <- 20
+#' calc_ThermalLifetime(
+#'   E = E,
+#'   s = s,
+#'   T = T,
+#'   profiling = TRUE,
+#'   output_unit = "Ma"
+#')
+#'
+#' @export
+calc_ThermalLifetime <- function(
+  E,
+  s,
+  T = 20,
+  output_unit = "Ma",
+  profiling = FALSE,
+  profiling_config = NULL,
+  verbose = TRUE,
+  plot = TRUE,
+  ...
+
+){
+
+
+
+
+# Integrity -----------------------------------------------------------------------------------
+
+    if(missing(E) | missing(s)){
+      stop("[calc_ThermalLifetime()] 'E' or 's' or both are missing, but required.")
+
+    }
+
+
+# Set variables -------------------------------------------------------------------------------
+
+  ##Boltzmann constant
+  k <- 8.6173324e-05 #eV/K
+
+  ##recalculate temparature
+  T.K <- T + 273.15 #K
+
+
+  ##SETTINGS FOR PROFILING
+  ##profiling settings
+  profiling_settings <- list(
+    n = 1000,
+    E.distribution = "norm",
+    s.distribution = "norm"
+
+  )
+
+  ##replace if set
+  if(!is.null(profiling_config)){
+    profiling_settings <- modifyList(profiling_settings, profiling_config)
+
+  }
+
+  ##check for odd input values
+  if (profiling_settings$n < 1000){
+    profiling_settings$n <- 1000
+
+    warning("[calc_ThermalLifetime()] minimum MC runs are 1000, parameter 'n' in profiling_config reset to 1000.")
+
+  }
+
+# Calculation ---------------------------------------------------------------------------------
+
+ ##set function for the calculation
+ f <- function(E, s, T.K) {
+    1 / s * exp(E / (k * T.K))
+ }
+
+ ##PROFILING
+  if(profiling) {
+    ##set profiling matrix
+    profiling_matrix <-
+      matrix(NA, ncol = 4, nrow = profiling_settings$n)
+
+    ##fill matrix
+
+    ##E
+    profiling_matrix[, 1] <-
+      if( profiling_settings$E.distribution == "norm"){
+        rnorm(profiling_settings$n, mean = E[1], sd = E[2])
+
+      }else{
+        stop("[calc_ThermalLifetime()] unknown distribution setting for E profiling")
+
+      }
+
+
+    ##s
+    profiling_matrix[, 2] <-
+      if (profiling_settings$s.distribution == "norm") {
+        rnorm(profiling_settings$n, mean = s[1], sd = s[2])
+
+      } else{
+        stop("[calc_ThermalLifetime()] unknown distribution setting for s profiling")
+
+      }
+
+    ##T
+    profiling_matrix[, 3] <-
+      rep(T.K[1], each = profiling_settings$n)
+
+
+    ##calulate lifetimes
+    profiling_matrix[, 4] <-
+      f(profiling_matrix[, 1], profiling_matrix[, 2], profiling_matrix[, 3])
+
+    ##reduce E and s vector on the first entry
+    T <- T[1]
+
+    ##set lifetimes
+    lifetimes <- profiling_matrix[, 4]
+
+  } else{
+
+    ##set empty profiling matrix
+    profiling_matrix <- matrix()
+
+    ##calculate lifetimes
+    lifetimes <- vapply(
+      X = T.K,
+      FUN = function(i) {
+        vapply(
+          X = E,
+          FUN = function(j) {
+            f(E = j, s = s, T.K = i)
+
+          },
+          FUN.VALUE = vector(mode = "numeric", length = length(s))
+        )
+
+      },
+      FUN.VALUE = matrix(numeric(), ncol = length(E), nrow = length(s))
+    )
+
+
+
+    ##transform to an arry in either case to have the same output
+    if (!is(lifetimes, "array")) {
+      lifetimes <-
+        array(lifetimes, dim = c(length(s), length(E), length(T)))
+
+    }
+
+    ##set dimnames to make reading more clear
+    dimnames(lifetimes) <- list(s, E, paste0("T = ", T, " \u00B0C"))
+
+  }
+
+ ##re-calculate lifetimes accourding to the chosen output unit
+ temp.lifetimes <- switch (
+    output_unit,
+    "s" = lifetimes,
+    "min" = lifetimes / 60,
+    "h" = lifetimes / 60 / 60,
+    "d" = lifetimes / 60 / 60 / 24,
+    "a" = lifetimes / 60 / 60 / 24 / 365,
+    "ka" = lifetimes / 60 / 60 / 24 / 365 / 1000,
+    "Ma" = lifetimes / 60 / 60 / 24 / 365 / 1000 / 1000
+  )
+
+  ##check for invalid values
+  if(is.null(temp.lifetimes)){
+    output_unit <- "s"
+    warning("[calc_ThermalLifetime()] 'output_unit' unknown, reset to 's'")
+
+
+  }else{
+    lifetimes <- temp.lifetimes
+    rm(temp.lifetimes)
+
+  }
+
+
+  # Terminal output -----------------------------------------------------------------------------
+
+  if(verbose){
+    cat("\n[calc_ThermalLifetime()]\n\n")
+
+    if(profiling){
+
+
+    cat("\tprofiling = TRUE")
+    cat("\n\t--------------------------\n")
+    }
+    cat(paste("\tmean:\t", format(mean(lifetimes), scientific = TRUE), output_unit))
+    cat(paste("\n\tsd:\t", format(sd(lifetimes), scientific = TRUE), output_unit))
+    cat(paste("\n\tmin:\t", format(min(lifetimes), scientific = TRUE), output_unit))
+
+    if(!profiling){
+      cat(paste0(" (@",T[which(lifetimes == min(lifetimes), arr.ind = TRUE)[3]], " \u00B0C)"))
+    }
+
+    cat(paste("\n\tmax:\t", format(max(lifetimes), scientific = TRUE), output_unit))
+
+    if(!profiling){
+      cat(paste0(" (@",T[which(lifetimes == max(lifetimes), arr.ind = TRUE)[3]], " \u00B0C)"))
+    }
+
+    cat("\n\t--------------------------")
+    cat(paste0("\n\t(", length(lifetimes), " lifetimes calculated in total)"))
+
+
+  }
+
+
+  # Plotting ------------------------------------------------------------------------------------
+  if(plot & profiling){
+
+    ##plot settings
+    plot.settings <- list(
+      main = "Thermal Lifetime Density Plot",
+      xlab = paste0("Thermal lifetime [",output_unit,"]"),
+      ylab = "Density",
+      xlim = NULL,
+      ylim = NULL,
+      log = "",
+      lwd = 1,
+      lty = 1,
+      col = rgb(0, 0, 0, 0.25)
+
+    )
+
+    ##modify on request
+    plot.settings <-   modifyList(plot.settings, list(...))
+
+    ##split data and calculate density
+    ##set seq
+    id_seq <- seq(
+      from = 1,
+      to = length(lifetimes),
+      length.out = 200)
+
+    ##calculate lifetime of the density
+    lifetimes_density <-
+      lapply(1:(length(id_seq) - 1),
+             function(x) {
+               density(lifetimes[id_seq[x]:id_seq[x+1]])
+
+             })
+
+    ##get x values
+    lifetimes_density.x <- matrix(unlist(lapply(1:length(lifetimes_density), function(i){
+      lifetimes_density[[i]]$x
+
+
+    })), nrow = length(lifetimes_density[[1]]$x))
+
+    ##get y values
+    lifetimes_density.y <- matrix(unlist(lapply(1:length(lifetimes_density), function(i){
+      lifetimes_density[[i]]$y
+
+
+    })), nrow = length(lifetimes_density[[1]]$y))
+
+
+    ##plot density curves
+    graphics::matplot(
+      lifetimes_density.x,
+      lifetimes_density.y,
+      type = "l",
+      lwd = plot.settings$lwd,
+      lty = plot.settings$lty,
+      col = plot.settings$col,
+      main = plot.settings$main,
+      xlab = plot.settings$xlab,
+      ylab = plot.settings$ylab,
+      xlim = plot.settings$xlim,
+      ylim = plot.settings$ylim,
+      log = plot.settings$log,
+    )
+
+  }
+
+  # Return values -------------------------------------------------------------------------------
+ return(set_RLum(
+   class = "RLum.Results",
+   data = list(lifetimes = lifetimes,
+               profiling_matrix = profiling_matrix),
+   info = list(call = sys.call())
+ ))
+
+}
diff --git a/R/calc_gSGC.R b/R/calc_gSGC.R
new file mode 100644
index 0000000..1a7f10a
--- /dev/null
+++ b/R/calc_gSGC.R
@@ -0,0 +1,448 @@
+#' Calculate De value based on the gSGC by Li et al., 2015
+#'
+#' Function returns De value and De value error using the global standardised growth
+#' curve (gSGC) assumption proposed by Li et al., 2015 for OSL dating of sedimentary quartz
+#'
+#' The error of the De value is determined using a Monte Carlo simulation approach.
+#' Solving of the equation is realised using \code{\link{uniroot}}.
+#' Large values for \code{n.MC} will significantly increase the computation time.
+#'
+#'
+#' @param data \code{\link{data.frame}} (\bold{required}): input data of providing the following
+#' columns: 'LnTn', 'LnTn.error', Lr1Tr1', 'Lr1Tr1.error', 'Dr1'
+#' Note: column names are not required. The function expect the input data in the given order
+#'
+#' @param gSGC.type \code{\link{character}} (with default): define the function parameters that
+#' should be used for the iteration procedure: Li et al., 2015 (Table 2)
+#' presented function parameters for two dose ranges: \code{"0-450"} and \code{"0-250"}
+#'
+#' @param gSGC.parameters \code{\link{list}} (optional): option to provide own function
+#' parameters used for #' fitting as named list.
+#' Nomenclature follows Li et al., 2015, i.e.
+#' \code{list(A,A.error,D0,D0.error,c,c.error,Y0,Y0.error,range)}, range requires a vector for
+#' the range the function is considered as valid, e.g. \code{range = c(0,250)}\cr
+#' Using this option overwrites the default parameter list of the gSGC, meaning the argument
+#' \code{gSGC.type} will be without effect
+#'
+#' @param n.MC \code{\link{integer}} (with default): number of Monte Carlo simulation runs for
+#' error estimation, s. details.
+#'
+#' @param verbose \code{\link{logical}}: enable or disable terminal output
+#'
+#' @param plot \code{\link{logical}}: enable or disable graphical feedback as plot
+#'
+#' @param ... parameters will be passed to the plot output
+#'
+#' @return Returns an S4 object of type \code{\linkS4class{RLum.Results}}.\cr
+#'
+#' \bold{@data}\cr
+#' $ De.value (data.frame) \cr
+#'  .. $ De  \cr
+#'  .. $ De.error \cr
+#'  .. $ Eta \cr
+#' $ De.MC (list) contains the matricies from the error estimation.\cr
+#' $ uniroot (list) contains the uniroot outputs of the De estimations\cr
+#'
+#' \bold{@info}\cr
+#' $ call (call) the original function call
+#'
+#'
+#' @section Function version: 0.1.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
+#'
+#' @seealso \code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}}, \code{\link{uniroot}}
+#'
+#' @references  Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., 2015. Potential of establishing
+#' a 'global standardised growth curve' (gSGC) for optical dating of quartz from sediments.
+#' Quaternary Geochronology 27, 94-104. doi:10.1016/j.quageo.2015.02.011
+#'
+#' @keywords datagen
+#'
+#' @examples
+#' results <- calc_gSGC(data = data.frame(
+#' LnTn =  2.361, LnTn.error = 0.087,
+#' Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091,
+#' Dr1 = 34.4))
+#'
+#' get_RLum(results, data.object = "De")
+#'
+#' @export
+calc_gSGC<- function(
+  data,
+  gSGC.type = "0-250",
+  gSGC.parameters,
+  n.MC = 100,
+  verbose = TRUE,
+  plot = TRUE,
+  ...
+){
+
+##============================================================================##
+##CHECK INPUT DATA
+##============================================================================##
+
+  if(!is(data, "data.frame")){stop("'data' needs to be of type data.frame.")}
+  if(!is(gSGC.type, "character")){stop("'gSGC.type' needs to be of type character.")}
+
+  ##check length of input data
+  if(ncol(data) != 5){stop("Structure of 'data' does not fit the expectations.")}
+
+  ##rename columns for consistency reasons
+  colnames(data) <- c('LnTn', 'LnTn.error', 'Lr1Tr1', 'Lr1Tr1.error', 'Dr1')
+
+
+##============================================================================##
+##DEFINE FUNCTION
+##============================================================================##
+
+    ##define function, nomenclature according to publication that should be solved
+    f <- function(x,A,D0,c,Y0,Dr1,Lr1Tr1,LnTn) {
+      (((A * (1 - exp( - Dr1 / D0))) + c * Dr1 + Y0)/Lr1Tr1) -
+      (((A * (1 - exp( - x/D0))) + c * x + Y0)/LnTn)
+  }
+
+    ##set general parameters
+    if (!missing(gSGC.parameters)) {
+      A <- gSGC.parameters$A
+      A.error <- gSGC.parameters$A.error
+      D0 <- gSGC.parameters$D0
+      D0.error <- gSGC.parameters$D0.error
+      c <- gSGC.parameters$c
+      c.error <- gSGC.parameters$c.error
+      Y0 <- gSGC.parameters$Y0
+      Y0.error <- gSGC.parameters$Y0.error
+      range <- gSGC.parameters$range
+
+    }else{
+      if (gSGC.type == "0-450") {
+        A <- 0.723
+        A.error <- 0.014
+        D0 <- 65.1
+        D0.error <- 0.9
+        c <- 0.001784
+        c.error <- 0.000016
+        Y0 <- 0.009159
+        Y0.error <- 0.004795
+
+        range <- c(0.1,250)
+
+      }else if (gSGC.type == "0-250") {
+        A <- 0.787
+        A.error <- 0.051
+        D0 <- 73.9
+        D0.error <- 2.2
+        c <- 0.001539
+        c.error <- 0.000068
+        Y0 <- 0.01791
+        Y0.error <- 0.00490
+
+        range <- c(0.1,250)
+
+      }else{
+        stop("Unknown input for 'gSGC.type'")
+
+      }
+
+    }
+
+    ##Define size of output objects
+    output.data <- data.table(
+      DE = numeric(length = nrow(data)),
+      DE.ERROR =  numeric(length = nrow(data)),
+      ETA =  numeric(length = nrow(data))
+    )
+
+    ##set list for De.MC
+    output.De.MC <- vector("list",  nrow(data))
+
+    ##set list for uniroot
+    output.uniroot <-  vector("list",  nrow(data))
+
+
+##============================================================================##
+##CALCULATION
+##============================================================================##
+
+
+ for(i in 1:nrow(data)){
+
+    Lr1Tr1 <-data[i,"Lr1Tr1"]
+    Lr1Tr1.error <- data[i,"Lr1Tr1.error"]
+    Dr1 <- data[i,"Dr1"]
+    Dr1.error <- data[i,"Dr1.error"]
+
+    LnTn <- data[i,"LnTn"]
+    LnTn.error <- data[i,"LnTn.error"]
+
+  ##calculate mean value
+    temp <- try(uniroot(
+      f,
+      interval = c(0.1,450),
+      tol = 0.001,
+      A = A,
+      D0 = D0,
+      c = c,
+      Y0 = Y0,
+      Dr1 = Dr1,
+      Lr1Tr1 = Lr1Tr1,
+      LnTn = LnTn,
+      extendInt = 'yes',
+      check.conv = TRUE,
+      maxiter = 1000
+    ), silent = TRUE)
+
+  if(!inherits(temp, "try-error")){
+
+    ##get De
+    De <- temp$root
+
+    ##calculate Eta, which is the normalisation factor
+    Eta <- ((A * (1 - exp( - Dr1 / D0))) + c * Dr1 + Y0)/Lr1Tr1
+
+    ##--------------------------------------------------------------------------##
+    ##Monte Carlo simulation for error estimation
+
+    ##set matrix
+    temp.MC.matrix <- matrix(nrow = n.MC, ncol = 8)
+
+    ##fill matrix
+    temp.MC.matrix[,1:6] <- matrix(rnorm(
+      n.MC * 6,
+      mean = c(LnTn, Lr1Tr1, A, D0, c, Y0),
+      sd = c(LnTn.error, Lr1Tr1.error, A.error, D0.error, c.error, Y0.error)
+    ), ncol = 6, byrow = TRUE)
+
+
+      ##run uniroot to get the De
+      temp.MC.matrix[,7] <- vapply(X = 1:n.MC, FUN = function(x){
+
+        uniroot(f,
+                interval = c(0.1,450),
+                tol = 0.001,
+                A = temp.MC.matrix[x,3],
+                D0 = temp.MC.matrix[x,4],
+                c = temp.MC.matrix[x,5],
+                Y0 = temp.MC.matrix[x,6],
+                Dr1 = Dr1,
+                Lr1Tr1 =temp.MC.matrix[x,2],
+                LnTn = temp.MC.matrix[x,1],
+                check.conv = TRUE,
+                extendInt = 'yes',
+                maxiter = 1000
+                )$root
+
+      }, FUN.VALUE = vector(mode = "numeric", length = 1))
+
+      ##calculate also the normalisation factor
+      temp.MC.matrix[,8] <- (temp.MC.matrix[,3] * (1 - exp( - Dr1 / temp.MC.matrix[,4])) +
+        temp.MC.matrix[,5] * Dr1 + temp.MC.matrix[,6])/temp.MC.matrix[,2]
+
+
+      ##re-name matrix
+      colnames(temp.MC.matrix) <- c("LnTn","Lr1Tr1","A","D0","c","Y0","De","Eta")
+
+      ##get De error as SD
+      De.error <- sd(temp.MC.matrix[,7])
+
+  }else{
+    warning("No solution was found!")
+    De <- NA
+    Eta <- NA
+    De.error <- NA
+
+    ##set matrix
+    temp.MC.matrix <- matrix(nrow = n.MC, ncol = 8)
+
+    ##fill matrix
+    temp.MC.matrix[,1:6] <- matrix(rnorm(
+      n.MC * 6,
+      mean = c(LnTn, Lr1Tr1, A, D0, c, Y0),
+      sd = c(LnTn.error, Lr1Tr1.error, A.error, D0.error, c.error, Y0.error)
+    ), ncol = 6, byrow = TRUE)
+
+
+  }
+
+##============================================================================##
+##PLOT OUTPUT
+##============================================================================##
+
+  if (plot) {
+
+    ##set plot settings
+    plot.settings <- list(
+      main = "gSGC and resulting De",
+      xlab = "Dose [a.u.]",
+      ylab = expression(paste("Re-norm. ", L[x]/T[x])),
+      xlim = NULL,
+      ylim = NULL,
+      lwd = 1,
+      lty = 1,
+      pch = 21,
+      col = "red",
+      grid = expression(nx = 10, ny = 10),
+      mtext = ""
+    )
+
+    plot.settings <-  modifyList(plot.settings, list(...))
+
+
+
+    ##graphical feedback
+    x <- NA
+    curve(
+      A * (1 - exp(-x / D0)) + c * x + Y0, from = 0, to = 500,
+      xlab = plot.settings$xlab,
+      ylab = plot.settings$ylab,
+      main = plot.settings$main,
+      xlim = plot.settings$xlim,
+      ylim = plot.settings$ylim,
+      lwd = plot.settings$lwd,
+      lty = plot.settings$lty
+    )
+
+    mtext(side = 3, plot.settings$mtext)
+
+    if(!is.null(plot.settings$grid)){
+      graphics::grid(eval(plot.settings$grid))
+
+    }
+
+    if(!inherits(temp, "try-error")){
+
+      if(temp$root < 450 & temp$root > 0){
+        points(temp$root,Eta*LnTn, col = plot.settings$col, pch = plot.settings$pch)
+
+        segments(De - De.error,Eta * LnTn,
+                 De + De.error,Eta * LnTn)
+
+        hist <-
+          hist(
+            temp.MC.matrix[, 7],
+            freq = FALSE,
+            add = TRUE,
+            col = rgb(0, 0, 0, 0.2),
+            border = rgb(0, 0, 0, 0.5)
+          )
+        lines(hist$mids,hist$density)
+
+      }else{
+
+        if(temp$root < 450){
+          shape::Arrows(
+            x0 = 450,
+            y0 = par()$usr[4] - 0.2,
+            x1 = 500,
+            y1 = par()$usr[4] - 0.2,
+            arr.type = "triangle",
+            col = "red"
+          )
+
+        }else{
+
+            shape::Arrows(
+              x0 = 50,
+              y0 = par()$usr[4] - 0.2,
+              x1 = 0,
+              y1 = par()$usr[4] - 0.2,
+              arr.type = "triangle",
+              col = "red"
+            )
+
+
+        }
+
+        mtext(side = 1, text = "Out of bounds!", col = "red")
+
+
+      }
+
+
+    }else{
+      mtext(side = 1, text = "No solution found!", col = "red")
+
+    }
+
+
+  }
+
+##============================================================================##
+##OUTPUT VISUALISATION
+##============================================================================##
+
+    if (verbose) {
+      cat("\n[calc_gSGC()]")
+      cat("\n\t Corresponding De based on the gSGC\n")
+
+      cat(paste0("\n\t"," Ln/Tn:\t\t ",LnTn," \u00B1 ", LnTn.error,"\n"))
+      cat(paste0("\t"," Lr1/Tr1:\t ",Lr1Tr1," \u00B1 ", Lr1Tr1.error,"\n"))
+      cat(paste0("\t"," Dr1:\t\t ",Dr1,"\n"))
+      cat(paste0("\t"," f(D):\t\t ",A," * (1 - exp(-D /",D0,")) + c * D + ",Y0,"\n"))
+      cat(paste0("\t"," n.MC:\t\t ",n.MC,"\n"))
+      cat(paste0("\t ------------------------------ \n"))
+      cat(paste0("\t De:\t\t",round(De,digits = 2)," \u00B1 ",round(De.error,digits = 2),"\n"))
+      cat(paste0("\t ------------------------------ \n"))
+
+    }
+
+
+##============================================================================##
+##CREATE OUTPUT OBJECTS
+##============================================================================##
+
+    ##needed for data.table
+    temp.De <- De
+    temp.De.error <- De.error
+    temp.Eta <- Eta
+
+    ##replace values in the data.table with values
+    output.data[i, `:=` (DE = temp.De,
+                         DE.ERROR = temp.De.error,
+                         ETA = temp.Eta)]
+
+    rm(list = c('temp.De', 'temp.De.error', 'temp.Eta'))
+
+    ##matrix - to prevent memory overload limit output
+    if(n.MC * nrow(data) > 1e6){
+
+      if(i == 1){
+
+        output.De.MC[[i]] <- temp.MC.matrix
+
+      }else{
+
+        output.De.MC[[i]] <- NA
+
+      }
+
+      warning("Only the first MC matrix is returned to prevent memory overload!")
+
+    }else{
+
+      output.De.MC[[i]] <- temp.MC.matrix
+
+    }
+
+
+    output.uniroot[[i]] <- temp
+
+
+}##end for loop
+
+##============================================================================##
+##OUTPUT RLUM
+##============================================================================##
+
+    temp.RLum.Results <- set_RLum(
+      class = "RLum.Results",
+      data = list(
+        De = as.data.frame(output.data),
+        De.MC =  output.De.MC,
+        uniroot = output.uniroot
+      ),
+      info = list( call = sys.call())
+    )
+
+  return(temp.RLum.Results)
+}
diff --git a/R/extract_IrradiationTimes.R b/R/extract_IrradiationTimes.R
new file mode 100644
index 0000000..0230c7e
--- /dev/null
+++ b/R/extract_IrradiationTimes.R
@@ -0,0 +1,443 @@
+#' Extract irradiation times from an XSYG file
+#'
+#' Extracts irradiation times, dose and times since last irradiation, from a
+#' Freiberg Instruments XSYG-file. These information can be further used to
+#' update an existing BINX-file
+#'
+#' The function was written to compensate missing information in the BINX-file
+#' output of Freiberg Instruments lexsyg readers. As all information are
+#' available within the XSYG-file anyway, these information can be extracted
+#' and used for further analysis or/and to stored in a new BINX-file, which can
+#' be further used by other software, e.g. Analyst (Geoff Duller). \cr
+#'
+#' Typical application example: g-value estimation from fading measurements
+#' using the Analyst or any other self written script.\cr
+#'
+#' Beside the some simple data transformation steps the function applies the
+#' functions \code{\link{read_XSYG2R}}, \code{\link{read_BIN2R}},
+#' \code{\link{write_R2BIN}} for data import and export.
+#'
+#' @param object \code{\link{character}} (\bold{required}) or
+#' \code{\linkS4class{RLum.Analysis}} object or \code{\link{list}}: path and file name of the XSYG
+#' file or an \code{\linkS4class{RLum.Analysis}} produced by the function
+#' \code{\link{read_XSYG2R}}; alternatively a \code{list} of \code{\linkS4class{RLum.Analysis}} can
+#' be provided. \cr
+#'
+#' \bold{Note}: If an \code{\linkS4class{RLum.Analysis}} is used, any input for
+#' the arguments \code{file.BINX} and \code{recordType} will be ignored!
+#'
+#' @param file.BINX \code{\link{character}} (optional): path and file name of
+#' an existing BINX-file. If a file name is provided the file will be updated
+#' with the information from the XSYG file in the same folder as the original
+#' BINX-file.\cr Note: The XSYG and the BINX-file have to be originate from the
+#' same measurement!
+#'
+#' @param recordType \code{\link{character}} (with default): select relevant
+#' curves types from the XSYG file or \code{\linkS4class{RLum.Analysis}}
+#' object. As the XSYG-file format comprises much more information than usually
+#' needed for routine data analysis and allowed in the BINX-file format, only
+#' the relevant curves are selected by using the function
+#' \code{\link{get_RLum}}. The argument \code{recordType} works as
+#' described for this function. \cr
+#'
+#' Note: A wrong selection will causes a function error. Please change this
+#' argument only if you have reasons to do so.
+#' @param compatibility.mode \code{\link{logical}} (with default): this option
+#' is parsed only if a BIN/BINX file is produced and it will reset all position
+#' values to a max. value of 48, cf.\code{\link{write_R2BIN}}
+#' @param txtProgressBar \code{\link{logical}} (with default): enables
+#' \code{TRUE} or disables \code{FALSE} the progression bars during import and
+#' export
+#'
+#' @return An \code{\linkS4class{RLum.Results}} object is returned with the
+#' following structure:\cr .. $irr.times (data.frame)\cr
+#'
+#' If a BINX-file path and name is set, the output will be additionally
+#' transferred into a new BINX-file with the function name as suffix. For the
+#' output the path of the input BINX-file itself is used. Note that this will
+#' not work if the input object is a file path to an XSYG-file. In this case
+#' the argument input is ignored.\cr
+#'
+#' In the self call mode (input is a \code{list} of \code{\linkS4class{RLum.Analysis}} objects
+#' a list of \code{\linkS4class{RLum.Results}} is returned.
+#'
+#' @note The produced output object contains still the irradiation steps to
+#' keep the output transparent. However, for the BINX-file export this steps
+#' are removed as the BINX-file format description does not allow irradiations
+#' as separat sequences steps.\cr
+#'
+#' Know issue: The 'fading correction' menu in the Analyst will not work appear
+#' with the produced BIN/BINX-file due to hidden bits, which are not reproduced
+#' by the function \code{write_R2BIN()} or if it appears it stops with a
+#' floating point error. \cr
+#'
+#' Negative values for \code{TIMESINCELAS.STEP}? Yes, this is possible and no
+#' bug, as in the XSYG file multiple curves are stored for one step. Example: A
+#' TL step may comprise three curves: (a) counts vs. time, (b) measured
+#' temperature vs. time and (c) predefined temperature vs. time. Three curves,
+#' but they are all belonging to one TL measurement step, but with regard to
+#' the time stamps this could produce negative values as the important function
+#' (\code{\link{read_XSYG2R}}) do not change the order of entries for one step
+#' towards a correct time order.
+#'
+#' @section Function version: 0.3.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}, \code{\linkS4class{Risoe.BINfileData}},
+#' \code{\link{read_XSYG2R}}, \code{\link{read_BIN2R}}, \code{\link{write_R2BIN}}
+#'
+#' @references Duller, G., 2007. Analyst.
+#'
+#' @keywords IO manip
+#'
+#' @examples
+#'
+#'
+#' ## (1) - example for your own data
+#' ##
+#' ## set files and run function
+#' #
+#' #   file.XSYG <- file.choose()
+#' #   file.BINX <- file.choose()
+#' #
+#' #     output <- extract_IrradiationTimes(file.XSYG = file.XSYG, file.BINX = file.BINX)
+#' #     get_RLum(output)
+#' #
+#' ## export results additionally to a CSV.file in the same directory as the XSYG-file
+#' #       write.table(x = get_RLum(output),
+#' #                   file = paste0(file.BINX,"_extract_IrradiationTimes.csv"),
+#' #                   sep = ";",
+#' #                   row.names = FALSE)
+#'
+#' @export
+extract_IrradiationTimes <- function(
+  object,
+  file.BINX,
+  recordType = c("irradiation (NA)", "IRSL (UVVIS)", "OSL (UVVIS)", "TL (UVVIS)"),
+  compatibility.mode = TRUE,
+  txtProgressBar = TRUE
+){
+
+  # SELF CALL -----------------------------------------------------------------------------------
+  if(is.list(object)){
+
+    ##show message for non-supported arguments
+    if(!missing(file.BINX)){
+      warning("[extract_IrradiationTimes()] argument 'file.BINX' is not supported in the self call mode.",
+              call. = FALSE)
+
+    }
+
+    ##extent arguments
+      ##extent recordType
+      if(is(recordType, "list")){
+        recordType <-
+          rep(recordType, length = length(object))
+
+
+      }else{
+        recordType <-
+          rep(list(recordType), length = length(object))
+
+      }
+
+      ##run function
+      results <- lapply(1:length(object), function(x) {
+        extract_IrradiationTimes(
+          object = object[[x]],
+          recordType = recordType[[x]],
+          txtProgressBar = txtProgressBar
+        )
+
+      })
+
+      ##DO NOT use invisible here, this will stop the function from stopping
+      if(length(results) == 0){
+        return(NULL)
+
+      }else{
+        return(results)
+
+      }
+
+  }
+
+
+
+  # Integrity tests -----------------------------------------------------------------------------
+
+  ##check whether an character or an RLum.Analysis object is provided
+  if(is(object)[1] != "character" & is(object)[1] != "RLum.Analysis"){
+
+    stop("[extract_IrradiationTimes()] Input object is neither of type 'character' nor of type 'RLum.Analysis'.")
+
+  }else if(is(object)[1] == "character"){
+
+    ##set object to file.XSYG
+    file.XSYG <- object
+
+    ##XSYG
+    ##check if file exists
+    if(file.exists(file.XSYG) == FALSE){
+
+      stop("[extract_IrradiationTimes()] Wrong XSYG file name or file does not exsits!")
+
+    }
+
+    ##check if file is XML file
+    if(tail(unlist(strsplit(file.XSYG, split = "\\.")), 1) != "xsyg" &
+         tail(unlist(strsplit(file.XSYG, split = "\\.")), 1) != "XSYG" ){
+
+      stop("[extract_IrradiationTimes()] File is not of type 'XSYG'!")
+
+    }
+
+    ##BINX
+    if(!missing(file.BINX)){
+
+      ##check if file exists
+      if(file.exists(file.BINX) == FALSE){
+
+        stop("[extract_IrradiationTimes()] Wrong BINX file name or file does not exsits!")
+
+      }
+
+      ##check if file is XML file
+      if(tail(unlist(strsplit(file.BINX, split = "\\.")), 1) != "binx" &
+           tail(unlist(strsplit(file.BINX, split = "\\.")), 1) != "BINX" ){
+
+        stop("[extract_IrradiationTimes()] File is not of type 'BINX'!")
+
+      }
+
+    }
+
+    # Settings and import XSYG --------------------------------------------------------------------
+
+    temp.XSYG <- read_XSYG2R(file.XSYG, txtProgressBar = txtProgressBar)
+
+    if(!missing(file.BINX)){
+      temp.BINX <- read_BIN2R(file.BINX, txtProgressBar = txtProgressBar)
+      temp.BINX.dirname <- (dirname(file.XSYG))
+    }
+
+
+    # Some data preparation -----------------------------------------------------------------------
+    ##set list
+    temp.sequence.list <- list()
+
+    ##select all analysis objects and combinde them
+    for(i in 1:length(temp.XSYG)){
+
+      ##select sequence and reduce the data set to really wanted values
+      temp.sequence.list[[i]] <- get_RLum(temp.XSYG[[i]]$Sequence.Object,
+                                                   recordType = recordType,
+                                                   drop = FALSE)
+
+
+      ##get corresponding position number, this will be needed later on
+      temp.sequence.position <- as.numeric(as.character(temp.XSYG[[i]]$Sequence.Header["position",]))
+
+    }
+
+
+
+  }else{
+
+    ##now we assume a single RLum.Analysis object
+    ##select sequence and reduce the data set to really wanted values, note that no
+    ##record selection was made!
+    temp.sequence.list <- list(object)
+
+  }
+
+
+
+
+
+  ##merge objects
+  if(length(temp.sequence.list)>1){
+
+    temp.sequence <- merge_RLum(temp.sequence.list)
+
+  }else{
+
+    temp.sequence <- temp.sequence.list[[1]]
+
+  }
+
+
+  # Grep relevant information -------------------------------------------------------------------
+
+  ##Sequence STEP
+  STEP <- vapply(X = 1:length_RLum(temp.sequence), FUN = function(x){
+    get_RLum(temp.sequence, record.id = x)@recordType
+
+  }, FUN.VALUE = vector(mode = "character", length = 1))
+
+  #START time of each step
+  temp.START <- unname(vapply(X = 1:length_RLum(temp.sequence), FUN = function(x){
+    get_RLum(get_RLum(temp.sequence, record.id = x), info.object = c("startDate"))
+
+  }, FUN.VALUE = vector(mode = "character", length = 1)))
+
+
+  ##DURATION of each STEP
+  DURATION.STEP <- vapply(X = 1:length_RLum(temp.sequence), FUN = function(x){
+   # get_RLum(get_RLum(temp.sequence, record.id = x), info.object = c("endDate"))
+    max(get_RLum(get_RLum(temp.sequence, record.id = x))[,1])
+   #print(get_RLum(temp.sequence, record.id = x))
+
+  }, FUN.VALUE = vector(mode = "numeric", length = 1))
+
+  #print(DURATION.STEP)
+
+  ##a little bit reformatting.
+  START <- strptime(temp.START, format = "%Y%m%d%H%M%S", tz = "GMT")
+
+  ##Calculate END time of each STEP
+  END <- START + DURATION.STEP
+
+  ##add position number so far an XSYG file was the input
+  if(exists("file.XSYG")){
+
+    POSITION <- rep(temp.sequence.position, each = length_RLum(temp.sequence))
+
+  }else if(!inherits(try(
+    get_RLum(
+      get_RLum(temp.sequence, record.id = 1), info.object = "position"),
+    silent = TRUE), "try-error")){
+
+    ##DURATION of each STEP
+    POSITION <- unname(sapply(1:length_RLum(temp.sequence), function(x){
+
+      get_RLum(get_RLum(temp.sequence, record.id = x),info.object = "position")
+
+    }))
+
+  }else{
+
+    POSITION <- NA
+
+  }
+
+
+  ##Combine the results
+  temp.results <- data.frame(POSITION,STEP,START,DURATION.STEP,END)
+
+
+  # Calculate irradiation duration ------------------------------------------------------------
+
+  ##set objects
+  time.irr.duration <- NA
+
+  IRR_TIME <- unlist(sapply(1:nrow(temp.results), function(x){
+
+    if(temp.results[x,"STEP"] == "irradiation (NA)"){
+
+      time.irr.duration <<- temp.results[x,"DURATION.STEP"]
+      return(0)
+
+    }else{
+
+      if(is.na(time.irr.duration)){
+
+        return(0)
+
+      }else{
+
+        return(time.irr.duration)
+
+      }
+
+    }
+
+  }))
+
+
+  # Calculate time since irradiation ------------------------------------------------------------
+
+  ##set objects
+  time.irr.end <- NA
+
+  TIMESINCEIRR <- unlist(sapply(1:nrow(temp.results), function(x){
+
+    if(temp.results[x,"STEP"] == "irradiation (NA)"){
+
+      time.irr.end<<-temp.results[x,"END"]
+      return(-1)
+
+    }else{
+
+      if(is.na(time.irr.end)){
+
+        return(-1)
+
+      }else{
+
+        return(difftime(temp.results[x,"START"],time.irr.end, units = "secs"))
+
+      }
+
+    }
+
+  }))
+
+
+
+  # Calculate time since last step --------------------------------------------------------------
+
+
+  TIMESINCELAST.STEP <- unlist(sapply(1:nrow(temp.results), function(x){
+
+    if(x == 1){
+      return(0)
+    }else{
+      return(difftime(temp.results[x,"START"],temp.results[x-1, "END"], units = "secs"))
+    }
+
+
+  }))
+
+
+  # Combine final results -----------------------------------------------------------------------
+
+  ##results table, export as CSV
+  results <- cbind(temp.results,IRR_TIME, TIMESINCEIRR,TIMESINCELAST.STEP)
+
+  # Write BINX-file if wanted -------------------------------------------------------------------
+  if(!missing(file.BINX)){
+
+    ##(1) remove all irradiation steps as there is no record in the BINX file and update information
+    results.BINX <- results[-which(results[,"STEP"] == "irradiation (NA)"),]
+
+    ##(1a)  update information
+    temp.BINX at METADATA[,c("IRR_TIME", "TIMESINCEIRR")] <- results.BINX[,c("IRR_TIME","TIMESINCEIRR")]
+
+    ##(2) compare entries in the BINX-file with the entries in the table to make sure
+    ## that both have the same length
+    if(!missing(file.BINX)){
+      if(nrow(results.BINX) == nrow(temp.BINX at METADATA)){
+
+        ##update BINX-file
+        write_R2BIN(temp.BINX, version = "06",
+                   file = paste0(file.BINX,"_extract_IrradiationTimes.BINX"),
+                   compatibility.mode =  compatibility.mode,
+                   txtProgressBar = txtProgressBar)
+
+
+      }
+    }else{
+
+      warning("XSYG and BINX-file do not contain similar entries. BINX-file update skipped!")
+
+    }
+  }
+
+
+  # Output --------------------------------------------------------------------------------------
+  return(set_RLum(class = "RLum.Results", data = list(irr.times = results)))
+}
diff --git a/R/fit_CWCurve.R b/R/fit_CWCurve.R
new file mode 100644
index 0000000..17808c7
--- /dev/null
+++ b/R/fit_CWCurve.R
@@ -0,0 +1,816 @@
+#' Nonlinear Least Squares Fit for CW-OSL curves [beta version]
+#'
+#' The function determines the weighted least-squares estimates of the
+#' component parameters of a CW-OSL signal for a given maximum number of
+#' components and returns various component parameters. The fitting procedure
+#' uses the \code{\link{nls}} function with the \code{port} algorithm.
+#'
+#' \bold{Fitting function}\cr\cr The function for the CW-OSL fitting has the
+#' general form: \deqn{y = I0_{1}*\lambda_{1}*exp(-\lambda_1*x) + ,\ldots, +
+#' I0_{i}*\lambda_{i}*exp(-\lambda_i*x) } where \eqn{0 < i < 8}\cr\cr and
+#' \eqn{\lambda} is the decay constant and \eqn{N0} the intial number of
+#' trapped electrons.\cr (for the used equation cf. Boetter-Jensen et al.,
+#' 2003)\cr\cr \bold{Start values}\cr
+#'
+#' Start values are estimated automatically by fitting a linear function to the
+#' logarithmized input data set. Currently, there is no option to manually
+#' provide start parameters. \cr\cr \bold{Goodness of fit}\cr\cr The goodness
+#' of the fit is given as pseudoR^2 value (pseudo coefficient of
+#' determination). According to Lave (1970), the value is calculated as:
+#' \deqn{pseudoR^2 = 1 - RSS/TSS} where \eqn{RSS = Residual~Sum~of~Squares} \cr
+#' and \eqn{TSS = Total~Sum~of~Squares}\cr\cr
+#'
+#' \bold{Error of fitted component parameters}\cr\cr The 1-sigma error for the
+#' components is calculated using the function \code{\link{confint}}. Due to
+#' considerable calculation time, this option is deactived by default. In
+#' addition, the error for the components can be estimated by using internal R
+#' functions like \code{\link{summary}}. See the \code{\link{nls}} help page
+#' for more information.\cr\cr \emph{For details on the nonlinear regression in
+#' R, see Ritz & Streibig (2008).}
+#'
+#' @param values \code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+#' (\bold{required}): x, y data of measured values (time and counts). See
+#' examples.
+#' @param n.components.max \link{vector} (optional): maximum number of
+#' components that are to be used for fitting. The upper limit is 7.
+#' @param fit.failure_threshold \link{vector} (with default): limits the failed
+#' fitting attempts.
+#' @param fit.method \link{character} (with default): select fit method,
+#' allowed values: \code{'port'} and \code{'LM'}. \code{'port'} uses the 'port'
+#' routine usint the funtion \code{\link{nls}} \code{'LM'} utilises the
+#' function \code{nlsLM} from the package \code{minpack.lm} and with that the
+#' Levenberg-Marquardt algorithm.
+#' @param fit.trace \link{logical} (with default): traces the fitting process
+#' on the terminal.
+#' @param fit.calcError \link{logical} (with default): calculate 1-sigma error
+#' range of components using \code{\link{confint}}
+#' @param LED.power \link{numeric} (with default): LED power (max.) used for
+#' intensity ramping in mW/cm^2. \bold{Note:} The value is used for the
+#' calculation of the absolute photoionisation cross section.
+#' @param LED.wavelength \link{numeric} (with default): LED wavelength used for
+#' stimulation in nm. \bold{Note:} The value is used for the calculation of the
+#' absolute photoionisation cross section.
+#' @param cex.global \link{numeric} (with default): global scaling factor.
+#' @param sample_code \link{character} (optional): sample code used for the
+#' plot and the optional output table (mtext).
+#' @param output.path \link{character} (optional): output path for table output
+#' containing the results of the fit. The file name is set automatically. If
+#' the file already exists in the directory, the values are appended.
+#' @param output.terminal \link{logical} (with default): terminal ouput with
+#' fitting results.
+#' @param output.terminalAdvanced \link{logical} (with default): enhanced
+#' terminal output. Requires \code{output.terminal = TRUE}. If
+#' \code{output.terminal = FALSE} no advanced output is possible.
+#' @param plot \link{logical} (with default): returns a plot of the fitted
+#' curves.
+#' @param \dots further arguments and graphical parameters passed to
+#' \code{\link{plot}}.
+#' @return \item{plot}{(optional) the fitted CW-OSL curves are returned as
+#' plot.} \item{table}{(optional) an output table (*.csv) with parameters of
+#' the fitted components is provided if the \code{output.path} is set.}
+#' \item{list(list("RLum.Results"))}{beside the plot and table output options,
+#' an \code{\linkS4class{RLum.Results}} object is returned.\cr\cr \code{fit}:
+#' an \code{nls} object (\code{$fit}) for which generic R functions are
+#' provided, e.g. \link{summary}, \link{confint}, \link{profile}. For more
+#' details, see \link{nls}.\cr\cr \code{output.table}: a \link{data.frame}
+#' containing the summarised parameters including the error\cr
+#' \code{component.contribution.matrix}: \link{matrix} containing the values
+#' for the component to sum contribution plot
+#' (\code{$component.contribution.matrix}).\cr
+#'
+#' Matrix structure:\cr Column 1 and 2: time and \code{rev(time)} values\cr
+#' Additional columns are used for the components, two for each component,
+#' containing I0 and n0. The last columns \code{cont.} provide information on
+#' the relative component contribution for each time interval including the row
+#' sum for this values. }\item{ object}{beside the plot and table output
+#' options, an \code{\linkS4class{RLum.Results}} object is returned.\cr\cr
+#' \code{fit}: an \code{nls} object (\code{$fit}) for which generic R functions
+#' are provided, e.g. \link{summary}, \link{confint}, \link{profile}. For more
+#' details, see \link{nls}.\cr\cr \code{output.table}: a \link{data.frame}
+#' containing the summarised parameters including the error\cr
+#' \code{component.contribution.matrix}: \link{matrix} containing the values
+#' for the component to sum contribution plot
+#' (\code{$component.contribution.matrix}).\cr
+#'
+#' Matrix structure:\cr Column 1 and 2: time and \code{rev(time)} values\cr
+#' Additional columns are used for the components, two for each component,
+#' containing I0 and n0. The last columns \code{cont.} provide information on
+#' the relative component contribution for each time interval including the row
+#' sum for this values. }
+#' @note \bold{Beta version - This function has not been properly tested yet
+#' and should therefore not be used for publication purposes!}\cr\cr The
+#' pseudo-R^2 may not be the best parameter to describe the goodness of the
+#' fit. The trade off between the \code{n.components} and the pseudo-R^2 value
+#' is currently not considered.\cr\cr The function \bold{does not} ensure that
+#' the fitting procedure has reached a global minimum rather than a local
+#' minimum!
+#' @section Function version: 0.5.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#' @seealso \code{\link{fit_LMCurve}}, \code{\link{plot}},\code{\link{nls}},
+#' \code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Results}},
+#' \code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}
+#' @references Boetter-Jensen, L., McKeever, S.W.S., Wintle, A.G., 2003.
+#' Optically Stimulated Luminescence Dosimetry. Elsevier Science B.V.
+#'
+#' Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of
+#' Economics and Statistics, 52 (3), 320-323.
+#'
+#' Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. In: R.
+#' Gentleman, K. Hornik, G. Parmigiani, eds., Springer, p. 150.
+#' @keywords dplot models
+#' @examples
+#'
+#'
+#' ##load data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' ##fit data
+#' fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve,
+#'                    main = "CW Curve Fit",
+#'                    n.components.max = 4,
+#'                    log = "x")
+#'
+#' @export
+fit_CWCurve<- function(
+  values,
+  n.components.max,
+  fit.failure_threshold = 5,
+  fit.method = "port",
+  fit.trace = FALSE,
+  fit.calcError = FALSE,
+  LED.power = 36,
+  LED.wavelength = 470,
+  cex.global = 0.6,
+  sample_code = "Default",
+  output.path,
+  output.terminal = TRUE,
+  output.terminalAdvanced = TRUE,
+  plot = TRUE,
+  ...
+){
+  ##TODO
+  ##remove output.path
+
+  # INTEGRITY CHECKS --------------------------------------------------------
+
+  ##INPUT OBJECTS
+  if(is(values, "RLum.Data.Curve") == FALSE & is(values, "data.frame") == FALSE){
+    stop("[fit_CWCurve()] Input object is not of type 'RLum.Data.Curve' or 'data.frame'!")
+  }
+
+
+  if(is(values, "RLum.Data.Curve") == TRUE){
+
+    x <- values at data[,1]
+    y <- values at data[,2]
+
+    ##needed due to inconsistencies in the R code below
+    values <- data.frame(x,y)
+
+  }else{
+
+    ##set x and y values
+    x<-values[,1]
+    y<-values[,2]
+
+  }
+
+
+  # Deal with extra arguments -----------------------------------------------
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {"CW-OSL Curve Fit"}
+
+  log <- if("log" %in% names(extraArgs)) {extraArgs$log} else
+  {""}
+
+  xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else
+  {"Time [s]"}
+
+  ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
+  {paste("OSL [cts/",round(max(x)/length(x), digits = 2)," s]",sep="")}
+
+
+  ##============================================================================##
+  ## FITTING
+  ##============================================================================##
+  ##
+  ##////equation used for fitting////(start)
+  fit.equation <- function(I0.i,lambda.i){
+    equation<-parse(
+      text=paste("I0[",I0.i,"]*lambda[",lambda.i,"]*exp(-lambda[",lambda.i,"]*x)",
+                 collapse="+",sep=""))
+    return(equation)
+  }
+  ##////equation used for fitting///(end)
+
+  ##////equation used for fitting////(start)
+  fit.equation.simple <- function(I0.i,lambda.i){
+    equation<-parse(
+      text=paste("I0[",I0.i,"]*exp(-lambda[",lambda.i,"]*x)",
+                 collapse="+",sep=""))
+    return(equation)
+  }
+  ##////equation used for fitting///(end)
+
+
+  ##set formula elements for fitting functions
+  ## the upper two funtions should be removed ... but chances are needed ... TODO
+  ##////equation used for fitting////(start)
+  fit.formula <- function(n.components){
+
+    I0 <- paste0("I0.",1:n.components)
+    lambda <- paste0("lambda.",1:n.components)
+    as.formula(paste0("y ~ ", paste(I0," * ", lambda, "* exp(-",lambda," * x)", collapse=" + ")))
+
+  }
+  ##////equation used for fitting///(end)
+
+  ##////equation used for fitting////(start)
+  fit.formula.simple <- function(n.components){
+
+    I0 <- paste0("I0.",1:n.components)
+    lambda <- paste0("lambda.",1:n.components)
+    as.formula(paste0("y ~ ", paste(I0," * exp(-",lambda," * x)", collapse=" + ")))
+
+  }
+  ##////equation used for fitting///(end)
+
+  ##set variables
+  fit.trigger <- TRUE #triggers if the fitting should stopped
+  n.components <- 1 #number of components used for fitting - start with 1
+  fit.failure_counter <- 0 #counts the failed fitting attempts
+
+  ##if n.components_max is missing, then it is Inf
+  if(missing(n.components.max)==TRUE){n.components.max<-Inf}
+
+
+  ##
+  ##++++Fitting loop++++(start)
+  while(fit.trigger==TRUE & n.components <= n.components.max){
+
+    ##(0) START PARAMETER ESTIMATION
+    ##rough automatic start parameter estimation
+
+    ##I0
+    I0<-rep(values[1,2]/3,n.components)
+    names(I0) <- paste0("I0.",1:n.components)
+
+    ##lambda
+    ##ensure that no values <=0 are included remove them for start parameter
+    ##estimation and fit an linear function a first guess
+    if(min(y)<=0){
+      temp.values<-data.frame(x[-which(y<=0)], log(y[-which(y<=0)]))
+    }else{
+      temp.values<-data.frame(x, log(y))
+    }
+
+    temp<-lm(temp.values)
+    lambda<-abs(temp$coefficient[2])/nrow(values)
+
+    k<-2
+    while(k<=n.components){
+      lambda[k]<-lambda[k-1]/100
+      k<-k+1
+    }
+    names(lambda) <- paste0("lambda.",1:n.components)
+
+    ##(1) FIRST FIT WITH A SIMPLE FUNCTION
+    if(fit.method == "LM"){
+
+      ##try fit simple
+      fit.try<-suppressWarnings(try(minpack.lm::nlsLM(fit.formula.simple(n.components),
+                                          data=values,
+                                          start=c(I0,lambda),
+                                          na.action = "na.exclude",
+                                          trace = fit.trace,
+                                          control = minpack.lm::nls.lm.control(
+                                            maxiter = 500
+                                          )),
+                                    silent = TRUE
+
+      ))#end try
+
+
+    }else if(fit.method == "port"){
+
+      ##try fit simple
+      fit.try<-suppressWarnings(try(nls(fit.formula.simple(n.components),
+                                        data=values,
+                                        trace = fit.trace,
+                                        algorithm="port",
+                                        na.action = "na.exclude",
+                                        start=c(I0,lambda),
+                                        nls.control(
+                                          tol = 1,
+                                          maxiter=100,
+                                          warnOnly=FALSE,
+                                          minFactor=1/1024
+                                        ),
+                                        lower=rep(0,n.components * 2)# set lower boundaries for components
+      ), silent=TRUE# nls
+      ))#end try
+
+    }else{
+
+      stop("[fit_CWCurve()] fit.method unknown.")
+
+    }
+
+
+    ##(3) FIT WITH THE FULL FUNCTION
+    if(inherits(fit.try,"try-error") == FALSE){
+
+      ##grep parameters from simple fit to further work with them
+      parameters <- coef(fit.try)
+
+      ##grep parameters an set new starting parameters, here just lambda is choosen as
+      ##it seems to be the most valuable parameter
+      lambda <- parameters[(n.components+1):length(parameters)]
+
+      if(fit.method == "LM"){
+
+        ##try fit simple
+        fit.try<-suppressWarnings(try(minpack.lm::nlsLM(fit.formula(n.components),
+                                            data=values,
+                                            start=c(I0,lambda),
+                                            trace = fit.trace,
+                                            na.action = "na.exclude",
+                                            lower = rep(0,n.components * 2),
+                                            control = minpack.lm::nls.lm.control(
+                                              maxiter = 500
+                                            )),
+                                      silent = TRUE))
+        
+        ## HACK: 
+        # minpack.lm::nlsLM() stores the 'lower' argument as class "call" rather
+        # than "numeric" as nls() does. Before running confint() on this object
+        # we overwrite the "lower" slot with the numeric values again. 
+        if (!inherits(fit.try, "try-error")) {
+          fit.try$call$lower <- rep(0,n.components * 2)
+        }
+        
+      }else{
+
+
+        ##try fit
+        fit.try<-suppressWarnings(try(nls(fit.formula(n.components),
+                                          trace=fit.trace,
+                                          data=values,
+                                          algorithm="port",
+                                          na.action = "na.exclude",
+                                          start=c(I0,lambda),
+                                          nls.control(
+                                            maxiter = 500,
+                                            warnOnly = FALSE,
+                                            minFactor = 1/4096
+                                          ),
+                                          lower=rep(0,n.components * 2)# set lower boundaries for components
+        ), silent=TRUE# nls
+        ))#end try
+
+      }#fit.method
+    }
+    
+    ##count failed attempts for fitting
+    if(inherits(fit.try,"try-error")==FALSE){
+
+      fit <- fit.try
+      n.components <- n.components + 1
+
+    }else{
+
+      n.components<-n.components+1
+      fit.failure_counter <- fit.failure_counter+1
+      if(n.components==fit.failure_counter & exists("fit")==FALSE){fit<-fit.try}}
+
+
+    ##stop fitting after a given number of wrong attempts
+    if(fit.failure_counter>=fit.failure_threshold){
+
+      fit.trigger <- FALSE
+      if(!exists("fit")){fit <- fit.try}
+
+    }else if(n.components == n.components.max & exists("fit") == FALSE){
+
+      fit <- fit.try
+
+    }
+
+  }##end while
+  ##++++Fitting loop++++(end)
+
+  ##============================================================================##
+  ## FITTING OUTPUT
+  ##============================================================================##
+
+  ##grep parameters
+  if(inherits(fit,"try-error")==FALSE){
+
+    parameters <- coef(fit)
+
+    ##correct fit equation for the de facto used number of components
+    I0.i<-1:(length(parameters)/2)
+    lambda.i<-1:(length(parameters)/2)
+    fit.function<-fit.equation(I0.i=I0.i,lambda.i=lambda.i)
+    n.components<-length(I0.i)
+
+    ##write parameters in vectors and order by decreasing lambda value
+    I0<-parameters[1:(length(parameters)/2)]
+    lambda<-parameters[(1+(length(parameters)/2)):length(parameters)]
+
+    o<-order(lambda,decreasing=TRUE)
+    I0<-I0[o]
+    lambda<-lambda[o]
+
+    ##============================================================================##
+    ## Additional Calculation
+    ##============================================================================##
+
+
+    ## ---------------------------------------------
+    ##calculate stimulation intensity Schmidt (2008)
+
+    ##Energy - E = h*v
+    h<-6.62606957e-34 #in W*s^2 - Planck constant
+    ny<-299792458/(LED.wavelength/10^9) #frequency of light
+    E<-h*ny
+
+    ##transform LED.power in W/cm^2
+    LED.power<-LED.power/1000
+
+    ##gets stimulation intensity
+    stimulation_intensity<-LED.power/E
+
+    ## ---------------------------------------------
+    ##calculate photoionisation cross section and print on terminal
+
+    ##using EQ (5) in Kitis
+    cs<-as.vector(lambda/stimulation_intensity)
+    cs.rel<-round(cs/cs[1],digits=4)
+
+    ## ---------------------------------------------
+    ##coefficient of determination after law
+
+    RSS <- sum(residuals(fit)^2) #residual sum of squares
+    TSS <- sum((y - mean(y))^2) #total sum of squares
+    pR<-round(1-RSS/TSS,digits=4)
+
+    if(pR<0){
+      warning("pseudo-R^2 < 0!")
+    }
+
+    ## ---------------------------------------------
+    ##calculate 1- sigma CONFIDENCE INTERVALL
+
+    lambda.error<-rep(NA, n.components)
+    I0.error<-rep(NA, n.components)
+
+    if(fit.calcError==TRUE){
+      ##option for confidence interval
+      values.confint<-confint(fit, level=0.68)
+      I0.confint<-values.confint[1:(length(values.confint[,1])/2),]
+      lambda.confint<-values.confint[((length(values.confint[,1])/2)+1):length(values.confint[,1]),]
+
+      ##error calculation
+      I0.error<-as.vector(abs(I0.confint[,1]-I0.confint[,2]))
+      lambda.error<-as.vector(abs(lambda.confint[,1]-lambda.confint[,2]))
+
+    }#endif::fit.calcError
+
+    ##============================================================================##
+    ## Terminal Output
+    ##============================================================================##
+
+    if (output.terminal==TRUE){
+
+      ##print rough fitting information - use the nls() control for more information
+      writeLines("\n[fit_CWCurve()]")
+      writeLines(paste("\nFitting was finally done using a ",n.components,
+                       "-component function (max=",n.components.max,"):",sep=""))
+      writeLines("------------------------------------------------------------------------------")
+      writeLines(paste0("y ~ ", as.character(fit.formula(n.components))[3], "\n"))
+
+      ##combine values and change rows names
+      fit.results<-cbind(I0,I0.error,lambda,lambda.error,cs, cs.rel)
+      row.names(fit.results)<-paste("c", 1:(length(parameters)/2), sep="")
+
+      ##print parameters
+      print(fit.results)
+
+      #print some additional information
+      if(fit.calcError==TRUE){writeLines("(errors quoted as 1-sigma values)")}
+      writeLines("------------------------------------------------------------------------------")
+    }#end if
+
+    ##============================================================================##
+    ## Terminal Output (advanced)
+    ##============================================================================##
+    if (output.terminalAdvanced==TRUE && output.terminal==TRUE){
+
+      ##sum of squares
+      writeLines(paste("pseudo-R^2 = ",pR,sep=""))
+    }#end if
+    ##============================================================================##
+    ## Table Output
+    ##============================================================================##
+
+    ##write output table if values exists
+    if (exists("fit")){
+
+      ##set data.frame for a max value of 7 components
+      output.table<-data.frame(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,
+                               NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,
+                               NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA)
+      output.tableColNames<-c("I01","I01.error","lambda1", "lambda1.error",
+                              "cs1","cs1.rel",
+                              "I02","I02.error","lambda2", "lambda2.error",
+                              "cs2","cs2.rel",
+                              "I03","I03.error","lambda3", "lambda3.error",
+                              "cs3","cs3.rel",
+                              "I04","I04.error","lambda4", "lambda4.error",
+                              "cs4","cs4.rel",
+                              "I05","I05.error","lambda5", "lambda5.error",
+                              "cs5","cs5.rel",
+                              "I06","I06.error","lambda6", "lambda6.error",
+                              "cs6","cs6.rel",
+                              "I07","I07.error","lambda7", "lambda7.error",
+                              "cs7","cs7.rel"
+      )
+
+      ##write components in output table
+      i<-0
+      k<-1
+      while(i<=n.components*6){
+        output.table[1,i+1]<-I0[k]
+        output.table[1,i+2]<-I0.error[k]
+        output.table[1,i+3]<-lambda[k]
+        output.table[1,i+4]<-lambda.error[k]
+        output.table[1,i+5]<-cs[k]
+        output.table[1,i+6]<-cs.rel[k]
+        i<-i+6
+        k<-k+1
+      }
+
+      ##add pR and n.components
+      output.table<-cbind(sample_code,n.components,output.table,pR)
+
+      ##alter column names
+      colnames(output.table)<-c("sample_code","n.components",
+                                output.tableColNames,"pseudo-R^2")
+
+      if(missing(output.path)==FALSE){
+
+        ##write file with just the header if the file not exists
+        if(file.exists(paste(output.path,"fit_CWCurve_Output_",sample_code,".csv",sep=""))==FALSE){
+          write.table(output.table,file=paste(output.path,"fit_CWCurve_Output_",
+                                              sample_code,".csv",sep=""), sep=";"
+                      ,row.names=FALSE)
+        }else{
+          write.table(output.table,file=paste(output.path,"fit_CWCurve_Output_",
+                                              sample_code,".csv",sep=""), sep=";"
+                      ,row.names=FALSE, append=TRUE, col.names=FALSE)
+
+        }#endif::for write option
+
+      }#endif::table output
+
+      ##============================================================================##
+      ## COMPONENT TO SUM CONTRIBUTION PLOT
+      ##============================================================================##
+
+      ##+++++++++++++++++++++++++++++++
+      ##set matrix
+      ##set polygon matrix for optional plot output
+      component.contribution.matrix <- matrix(NA,
+                                              nrow = length(values[,1]),
+                                              ncol = (2*length(I0)) + 2)
+
+      ##set x-values
+      component.contribution.matrix[,1] <- values[,1]
+      component.contribution.matrix[,2] <- rev(values[,1])
+
+      ##+++++++++++++++++++++++++++++++
+      ##set 1st polygon
+      ##1st polygon (calculation)
+      y.contribution_first<-(I0[1]*lambda[1]*exp(-lambda[1]*x))/(eval(fit.function))*100
+
+      ##avoid NaN values (might happen with synthetic curves)
+      y.contribution_first[is.nan(y.contribution_first)==TRUE] <- 0
+
+      ##set values in matrix
+      component.contribution.matrix[,3] <- 100
+      component.contribution.matrix[,4] <- 100 - rev(y.contribution_first)
+
+      ##+++++++++++++++++++++++++++++++
+      ##set polygons in between
+      ##polygons in between (calculate and plot)
+      if (length(I0)>2){
+
+        y.contribution_prev <- y.contribution_first
+        i<-2
+
+        ##matrix stepping
+        k <- seq(3, ncol(component.contribution.matrix), by=2)
+
+        while (i<=length(I0)-1) {
+
+          y.contribution_next<-I0[i]*lambda[i]*exp(-lambda[i]*x)/(eval(fit.function))*100
+
+          ##avoid NaN values
+          y.contribution_next[is.nan(y.contribution_next)==TRUE] <- 0
+
+          ##set values in matrix
+          component.contribution.matrix[,k[i]] <- 100 - y.contribution_prev
+          component.contribution.matrix[, k[i]+1] <- rev(100-y.contribution_prev-
+                                                           y.contribution_next)
+
+          y.contribution_prev <- y.contribution_prev + y.contribution_next
+
+          i <- i+1
+
+        }#end while loop
+      }#end if
+
+      ##+++++++++++++++++++++++++++++++
+      ##set last polygon
+
+      ##last polygon (calculation)
+      y.contribution_last <- I0[length(I0)]*lambda[length(lambda)]*exp(-lambda[length(lambda)]*x)/
+        (eval(fit.function))*100
+
+      ##avoid NaN values
+      y.contribution_last[is.nan(y.contribution_last)==TRUE]<-0
+
+      component.contribution.matrix[,((2*length(I0))+1)] <- y.contribution_last
+      component.contribution.matrix[,((2*length(I0))+2)] <- 0
+
+      ##change names of matrix to make more easy to understand
+      component.contribution.matrix.names <- c(
+        "x", "rev.x",
+        paste(c("y.c","rev.y.c"),rep(1:n.components,each=2), sep=""))
+
+      ##calculate area for each component, for each time interval
+      component.contribution.matrix.area <- sapply(
+        seq(3,ncol(component.contribution.matrix),by=2),
+        function(x){
+
+          matrixStats::rowDiffs(cbind(rev(component.contribution.matrix[,(x+1)]),
+                         component.contribution.matrix[,x]))
+
+        })
+
+      ##append to existing matrix
+      component.contribution.matrix <- cbind(
+        component.contribution.matrix,
+        component.contribution.matrix.area,
+        rowSums(component.contribution.matrix.area)
+      )
+
+      ##set final column names
+      colnames(component.contribution.matrix) <- c(
+        component.contribution.matrix.names,
+        paste(c("cont.c"),rep(1:n.components,each=1), sep=""),
+        "cont.sum")
+
+
+    }#endif :: (exists("fit"))
+
+  }else{writeLines("[fit_CWCurve()] Fitting Error >> Plot without fit produced!")
+        output.table<-NA
+        component.contribution.matrix <- NA
+  }
+
+  ##============================================================================##
+  ## PLOTTING
+  ##============================================================================##
+  if(plot==TRUE){
+
+    ##grep par parameters
+    par.default <- par(no.readonly = TRUE)
+
+    ##set colors gallery to provide more colors
+    col <- get("col", pos = .LuminescenceEnv)
+
+    ##set plot frame
+    if(!inherits(fit, "try-error")){
+      layout(matrix(c(1,2,3),3,1,byrow=TRUE),c(1.6,1,1), c(1,0.3,0.4),TRUE)
+      par(oma=c(1,1,1,1),mar=c(0,4,3,0),cex=cex.global)
+    }else{
+      par(cex=cex.global)
+    }
+
+
+    ##==uppper plot==##
+    ##open plot area
+
+    plot(NA,NA,
+         xlim=c(min(x),max(x)),
+         ylim=if(log=="xy"){c(1,max(y))}else{c(0,max(y))},
+         xlab=if(!inherits(fit, "try-error")){""}else{xlab},
+         xaxt=if(!inherits(fit, "try-error")){"n"}else{"s"},
+         ylab=ylab,
+         main=main,
+         log=log)
+
+    ##plotting measured signal
+    points(x,y,pch=20, col="grey")
+
+    ##add additional labeling (fitted function)
+    mtext(side=3, sample_code, cex=0.7*cex.global)
+
+    ##plot sum function
+    if(inherits(fit,"try-error")==FALSE){
+      lines(x,eval(fit.function), lwd=2, col="black")
+      legend.caption<-"sum curve"
+      curve.col <- 1
+
+      ##plot signal curves
+
+      ##plot curve for additional parameters
+      if(length(I0)>1){
+
+        for (i in 1:length(I0)) {
+          curve(I0[i]*lambda[i]*exp(-lambda[i]*x),col=col[i+1],
+                lwd = 2,
+                add = TRUE)
+          legend.caption<-c(legend.caption,paste("component ",i,sep=""))
+          curve.col<-c(curve.col,i+1)
+        }
+      }#end if
+      ##plot legend
+      #legend(y=max(y)*1,"measured values",pch=20, col="gray", bty="n")
+      legend("topright",legend.caption,lty=rep(1,n.components+1,NA),lwd=2,col=col[curve.col], bty="n")
+
+      ##==lower plot==##
+      ##plot residuals
+      par(mar=c(4.2,4,0,0))
+      plot(x,residuals(fit),
+           xlim=c(min(x),max(x)),
+           xlab=xlab,
+           type="l",
+           col="grey",
+           ylab="Residual [a.u.]",
+           lwd=2,
+           log=if(log=="x" | log=="xy"){log="x"}else{""}
+      )
+
+      ##add 0 line
+      abline(h=0)
+
+      ##------------------------------------------------------------------------##
+      ##++component to sum contribution plot ++##
+      ##------------------------------------------------------------------------##
+
+      ##plot component contribution to the whole signal
+      #open plot area
+      par(mar=c(4,4,3.2,0))
+      plot(NA,NA,
+           xlim=c(min(x),max(x)),
+           ylim=c(0,100),
+           ylab="Contribution [%]",
+           xlab=xlab,
+           main="Component contribution to sum curve",
+           log=if(log=="x" | log=="xy"){log="x"}else{""})
+
+      stepping <- seq(3,length(component.contribution.matrix[1,]),2)
+
+      for(i in 1:length(I0)){
+
+        polygon(c(component.contribution.matrix[,1],
+                  component.contribution.matrix[,2]),
+                c(component.contribution.matrix[,stepping[i]],
+                  component.contribution.matrix[,stepping[i]+1]),
+                col = col[i+1])
+      }
+      rm(stepping)
+
+
+    }#end if try-error for fit
+
+    par(par.default)
+    rm(par.default)
+  }
+
+  ##============================================================================##
+  ## Return Values
+  ##============================================================================##
+
+  newRLumResults.fit_CWCurve <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      fit = fit,
+      output.table = output.table,
+      component.contribution.matrix = list(component.contribution.matrix)))
+
+  rm(fit)
+  rm(output.table)
+  rm(component.contribution.matrix)
+
+  invisible(newRLumResults.fit_CWCurve)
+
+}
diff --git a/R/fit_LMCurve.R b/R/fit_LMCurve.R
new file mode 100644
index 0000000..c286b4d
--- /dev/null
+++ b/R/fit_LMCurve.R
@@ -0,0 +1,1014 @@
+#' Nonlinear Least Squares Fit for LM-OSL curves
+#'
+#' The function determines weighted nonlinear least-squares estimates of the
+#' component parameters of an LM-OSL curve (Bulur 1996) for a given number of
+#' components and returns various component parameters. The fitting procedure
+#' uses the function \code{\link{nls}} with the \code{port} algorithm.
+#'
+#' \bold{Fitting function}\cr\cr The function for the fitting has the general
+#' form: \deqn{y = (exp(0.5)*Im_1*x/xm_1)*exp(-x^2/(2*xm_1^2)) + ,\ldots, +
+#' exp(0.5)*Im_i*x/xm_i)*exp(-x^2/(2*xm_i^2))} where \eqn{1 < i < 8}\cr This
+#' function and the equations for the conversion to b (detrapping probability)
+#' and n0 (proportional to initially trapped charge) have been taken from Kitis
+#' et al. (2008): \deqn{xm_i=\sqrt{max(t)/b_i}} \deqn{Im_i=exp(-0.5)n0/xm_i}\cr
+#' \bold{Background subtraction}\cr\cr Three methods for background subtraction
+#' are provided for a given background signal (\code{values.bg}).\cr
+#' \code{polynomial}: default method. A polynomial function is fitted using
+#' \link{glm} and the resulting function is used for background subtraction:
+#' \deqn{y = a*x^4 + b*x^3 + c*x^2 + d*x + e}\cr \code{linear}: a linear
+#' function is fitted using \link{glm} and the resulting function is used for
+#' background subtraction: \deqn{y = a*x + b}\cr \code{channel}: the measured
+#' background signal is subtracted channelwise from the measured signal.\cr\cr
+#' \bold{Start values}\cr
+#'
+#' The choice of the initial parameters for the \code{nls}-fitting is a crucial
+#' point and the fitting procedure may mainly fail due to ill chosen start
+#' parameters. Here, three options are provided:\cr\cr \bold{(a)} If no start
+#' values (\code{start_values}) are provided by the user, a cheap guess is made
+#' by using the detrapping values found by Jain et al. (2003) for quartz for a
+#' maximum of 7 components. Based on these values, the pseudo start parameters
+#' xm and Im are recalculated for the given data set. In all cases, the fitting
+#' starts with the ultra-fast component and (depending on \code{n.components})
+#' steps through the following values. If no fit could be achieved, an error
+#' plot (for \code{plot = TRUE}) with the pseudo curve (based on the
+#' pseudo start parameters) is provided. This may give the opportunity to
+#' identify appropriate start parameters visually.\cr\cr \bold{(b)} If start
+#' values are provided, the function works like a simple \code{\link{nls}}
+#' fitting approach.\cr\cr \bold{(c)} If no start parameters are provided and
+#' the option \code{fit.advanced = TRUE} is chosen, an advanced start paramter
+#' estimation is applied using a stochastical attempt. Therefore, the
+#' recalculated start parameters \bold{(a)} are used to construct a normal
+#' distribution. The start parameters are then sampled randomly from this
+#' distribution. A maximum of 100 attempts will be made. \bold{Note:} This
+#' process may be time consuming. \cr\cr \bold{Goodness of fit}\cr\cr The
+#' goodness of the fit is given by a pseudoR^2 value (pseudo coefficient of
+#' determination). According to Lave (1970), the value is calculated as:
+#' \deqn{pseudoR^2 = 1 - RSS/TSS} where \eqn{RSS = Residual~Sum~of~Squares} \cr
+#' and \eqn{TSS = Total~Sum~of~Squares}\cr\cr \bold{Error of fitted component
+#' parameters}\cr\cr The 1-sigma error for the components is calculated using
+#' the function \link{confint}. Due to considerable calculation time, this
+#' option is deactived by default. In addition, the error for the components
+#' can be estimated by using internal R functions like \link{summary}. See the
+#' \link{nls} help page for more information.\cr \emph{For more details on the
+#' nonlinear regression in R, see Ritz & Streibig (2008).}
+#'
+#' @param values \code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+#' (\bold{required}): x,y data of measured values (time and counts). See
+#' examples.
+#'
+#' @param values.bg \code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+#' (optional): x,y data of measured values (time and counts) for background
+#' subtraction.
+#'
+#' @param n.components \link{integer} (with default): fixed number of
+#' components that are to be recognised during fitting (min = 1, max = 7).
+#'
+#' @param start_values \link{data.frame} (optional): start parameters for lm
+#' and xm data for the fit. If no start values are given, an automatic start
+#' value estimation is attempted (see details).
+#'
+#' @param input.dataType \link{character} (with default): alter the plot output
+#' depending on the input data: "LM" or "pLM" (pseudo-LM). See: \link{CW2pLM}
+#'
+#' @param fit.method \code{\link{character}} (with default): select fit method,
+#' allowed values: \code{'port'} and \code{'LM'}. \code{'port'} uses the 'port'
+#' routine usint the funtion \code{\link{nls}} \code{'LM'} utilises the
+#' function \code{nlsLM} from the package \code{minpack.lm} and with that the
+#' Levenberg-Marquardt algorithm.
+#'
+#' @param sample_code \link{character} (optional): sample code used for the
+#' plot and the optional output table (mtext).
+#'
+#' @param sample_ID \link{character} (optional): additional identifier used as
+#' column header for the table output.
+#'
+#' @param LED.power \link{numeric} (with default): LED power (max.) used for
+#' intensity ramping in mW/cm^2. \bold{Note:} This value is used for the
+#' calculation of the absolute photoionisation cross section.
+#'
+#' @param LED.wavelength \link{numeric} (with default): LED wavelength in nm
+#' used for stimulation. \bold{Note:} This value is used for the calculation of
+#' the absolute photoionisation cross section.
+#'
+#' @param fit.trace \link{logical} (with default): traces the fitting process
+#' on the terminal.
+#'
+#' @param fit.advanced \link{logical} (with default): enables advanced fitting
+#' attempt for automatic start parameter recognition. Works only if no start
+#' parameters are provided. \bold{Note:} It may take a while and it is not
+#' compatible with \code{fit.method = "LM"}.
+#'
+#' @param fit.calcError \link{logical} (with default): calculate 1-sigma error
+#' range of components using \link{confint}.
+#'
+#' @param bg.subtraction \link{character} (with default): specifies method for
+#' background subtraction (\code{polynomial}, \code{linear}, \code{channel},
+#' see Details). \bold{Note:} requires input for \code{values.bg}.
+#'
+#' @param verbose \link{logical} (with default): terminal output with
+#' fitting results.
+#'
+#' @param plot \link{logical} (with default): returns a plot of the
+#' fitted curves.
+#'
+#' @param plot.BG \link{logical} (with default): returns a plot of the
+#' background values with the fit used for the background subtraction.
+#'
+#' @param \dots Further arguments that may be passed to the plot output, e.g.
+#' \code{xlab}, \code{xlab}, \code{main}, \code{log}.
+#'
+#' @return
+#' Various types of plots are returned. For details see above.\cr
+#' Furthermore an \code{RLum.Results} object is returned with the following structure:\cr
+#'
+#' data:\cr
+#' .. $fit : \code{nls} (nls object)\cr
+#' .. $output.table : \code{data.frame} with fitting results\cr
+#' .. $component.contribution.matrix : \code{list} component distribution matrix\cr
+#' .. $call : \code{call} the original function call
+#'
+#' Matrix structure for the distribution matrix:\cr
+#'
+#' Column 1 and 2: time and \code{rev(time)} values\cr
+#' Additional columns are used for the components, two for each component,
+#' containing I0 and n0. The last columns \code{cont.} provide information on
+#' the relative component contribution for each time interval including the row
+#' sum for this values.
+#'
+#' @note The pseudo-R^2 may not be the best parameter to describe the goodness
+#' of the fit. The trade off between the \code{n.components} and the pseudo-R^2
+#' value currently remains unconsidered. \cr
+#'
+#' The function \bold{does not} ensure that the fitting procedure has reached a
+#' global minimum rather than a local minimum! In any case of doubt, the use of
+#' manual start values is highly recommended.
+#'
+#' @section Function version: 0.3.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{fit_CWCurve}}, \code{\link{plot}}, \code{\link{nls}},
+#' \code{\link[minpack.lm]{nlsLM}}, \code{\link{get_RLum}}
+#'
+#' @references Bulur, E., 1996. An Alternative Technique For Optically
+#' Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 5,
+#' 701-709.
+#'
+#' Jain, M., Murray, A.S., Boetter-Jensen, L., 2003. Characterisation of
+#' blue-light stimulated luminescence components in different quartz samples:
+#' implications for dose measurement. Radiation Measurements, 37 (4-5),
+#' 441-449.
+#'
+#' Kitis, G. & Pagonis, V., 2008. Computerized curve deconvolution analysis for
+#' LM-OSL. Radiation Measurements, 43, 737-741.
+#'
+#' Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of
+#' Economics and Statistics, 52 (3), 320-323.
+#'
+#' Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. R. Gentleman,
+#' K. Hornik, & G. Parmigiani, eds., Springer, p. 150.
+#'
+#' @keywords dplot models
+#'
+#' @examples
+#'
+#'
+#' ##(1) fit LM data without background subtraction
+#' data(ExampleData.FittingLM, envir = environment())
+#' fit_LMCurve(values = values.curve, n.components = 3, log = "x")
+#'
+#' ##(2) fit LM data with background subtraction and export as JPEG
+#' ## -alter file path for your preferred system
+#' ##jpeg(file = "~/Desktop/Fit_Output\%03d.jpg", quality = 100,
+#' ## height = 3000, width = 3000, res = 300)
+#' data(ExampleData.FittingLM, envir = environment())
+#' fit_LMCurve(values = values.curve, values.bg = values.curveBG,
+#'             n.components = 2, log = "x", plot.BG = TRUE)
+#' ##dev.off()
+#'
+#' ##(3) fit LM data with manual start parameters
+#' data(ExampleData.FittingLM, envir = environment())
+#' fit_LMCurve(values = values.curve,
+#'             values.bg = values.curveBG,
+#'             n.components = 3,
+#'             log = "x",
+#'             start_values = data.frame(Im = c(170,25,400), xm = c(56,200,1500)))
+#'
+#' @export
+fit_LMCurve<- function(
+  values,
+  values.bg,
+  n.components = 3,
+  start_values,
+  input.dataType = "LM",
+  fit.method = "port",
+  sample_code = "",
+  sample_ID = "",
+  LED.power = 36,
+  LED.wavelength = 470,
+  fit.trace = FALSE,
+  fit.advanced = FALSE,
+  fit.calcError = FALSE,
+  bg.subtraction = "polynomial",
+  verbose = TRUE,
+  plot = TRUE,
+  plot.BG = FALSE,
+  ...
+){
+
+  # (0) Integrity checks -------------------------------------------------------
+
+  ##(1) data.frame or RLum.Data.Curve object?
+  if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
+
+    stop("[fit_LMCurve()] 'values' object has to be of type
+         'data.frame' or 'RLum.Data.Curve'!")
+
+  }else{
+
+    if(is(values, "RLum.Data.Curve") == TRUE && (
+      values at recordType!="RBR" & values at recordType!="LM-OSL")){
+
+      stop("[fit_LMCurve()] recordType should be 'RBR' or 'LM-OSL'!
+           Consider as(object,'data.frame') if you had used the pseudo transformation functions.")
+
+    }else if(is(values, "RLum.Data.Curve") == TRUE){
+
+      values <- as(values,"data.frame")
+
+    }
+  }
+
+  ##(2) data.frame or RLum.Data.Curve object?
+  if(missing(values.bg)==FALSE){
+
+    if(is(values.bg, "data.frame") == FALSE & is(values.bg,
+                                                 "RLum.Data.Curve") == FALSE){
+
+      stop("[fit_LMCurve()] 'values.bg' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
+
+    }else{
+
+      if(is(values, "RLum.Data.Curve") == TRUE && values at recordType!="RBR"){
+
+        stop("[fit_LMCurve()] recordType should be 'RBR'!")
+
+      }else if(is(values.bg, "RLum.Data.Curve") == TRUE){
+
+        values.bg <- as(values.bg,"data.frame")
+
+      }
+    }
+  }
+
+  ## Set plot format parameters -----------------------------------------------
+  extraArgs <- list(...) # read out additional arguments list
+
+  log       <- if("log" %in% names(extraArgs)) {extraArgs$log}
+  else {""}
+
+  xlim      <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim}
+  else {c(min(values[,1]),max(values[,1]))}
+
+  ylim      <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim}
+  else {
+
+    if(input.dataType=="pLM"){
+      c(0,max(values[,2]*1.1))
+    }else{
+      c(min(values[,2]),max(values[,2]*1.1))
+    }
+
+  }
+
+  xlab      <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab}
+  else {
+
+    if(input.dataType=="LM"){"Time [s]"}else{"u [s]"}
+
+  }
+
+  ylab     <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab}
+  else {
+
+    if(input.dataType=="LM"){
+      paste("LM-OSL [cts/",round(max(values[,1])/length(values[,1]),digits=2)," s]",sep="")
+    }else{"pLM-OSL [a.u.]"}
+  }
+
+
+  main      <- if("main" %in% names(extraArgs)) {extraArgs$main}
+  else {"Default"}
+
+  cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex}
+  else {0.8}
+
+
+  fun       <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE}
+
+
+  ##============================================================================##
+  ##  BACKGROUND SUBTRACTION
+  ##============================================================================##
+
+  #   ##perform background subtraction if background LM measurment exists
+
+  if(missing(values.bg)==FALSE){
+
+    #set graphical parameters
+    par.default <- par(mfrow=c(1,1), cex=1.5*cex)
+
+    ##check if length of bg and signal is consistent
+    if(length(values[,2])!=length(values.bg[,2])){stop("[fit_LMCurve] Length of values and values.bg differs!")}
+
+    if(bg.subtraction=="polynomial"){
+
+      #fit polynom function to background
+      glm.fit<-glm(values.bg[,2] ~ values.bg[,1]+I(values.bg[,1]^2)+I(values.bg[,1]^3))
+      glm.coef<-coef(glm.fit)
+
+      #subtract background with fitted function
+      values[,2]<-values[,2]-
+        (glm.coef[4]*values[,1]^3+glm.coef[3]*values[,1]^2+glm.coef[2]*values[,1]+glm.coef[1])
+      writeLines("[fit_LMCurve] >> Background subtracted (method=\"polynomial\")!")
+
+      ##plot Background measurement if needed
+      if(plot.BG==TRUE){
+
+        plot(values.bg, ylab="LM-OSL [a.u.]", xlab="Time [s]", main="Background")
+        curve((glm.coef[4]*x^3+glm.coef[3]*x^2+glm.coef[2]*x+glm.coef[1]),add=TRUE,col="red",lwd=2)
+        text(0,max(values.bg[,2]),paste("y = ", round(glm.coef[4],digits=2),
+                                        "*x^3+",
+                                        round(glm.coef[3],digits=2),
+                                        "*x^2+",
+                                        round(glm.coef[2],digits=2),
+                                        "*x+",
+                                        round(glm.coef[1],digits=2),
+                                        sep=""),pos=4)
+        mtext(side=3,sample_code,cex=.8*cex)
+      }
+
+    }else if(bg.subtraction=="linear"){
+
+      #fit linear function to background
+      glm.fit<-glm(values.bg[,2] ~ values.bg[,1])
+      glm.coef<-coef(glm.fit)
+
+      ##substract bg
+      values[,2]<-values[,2]-(glm.coef[2]*values[,1]+glm.coef[1])
+      writeLines("[fit_LMCurve.R] >> Background subtracted (method=\"linear\")!")
+
+      ##plot Background measurement if needed
+      if(plot.BG){
+
+        plot(values.bg, ylab="LM-OSL [a.u.]", xlab="Time [s]", main="Background")
+        curve((glm.coef[2]*x+glm.coef[1]),add=TRUE,col="red",lwd=1.5)
+        text(0,max(values.bg[,2]),paste("y = ",
+                                        round(glm.coef[2],digits=2),
+                                        "*x+",
+                                        round(glm.coef[1],digits=2),
+                                        sep=""),pos=4)
+        mtext(side=3,sample_code,cex=.8*cex)
+
+      }#endif::plot BG
+
+    }else if(bg.subtraction=="channel"){
+
+      values[,2]<-values[,2]-values.bg[,2]
+      writeLines("[fit_LMCurve.R] >> Background subtracted (method=\"channel\")!")
+
+      if(plot.BG==TRUE){
+
+        plot(values.bg, ylab="LM-OSL [a.u.]", xlab="Time [s]", main="Background")
+        mtext(side=3,sample_code,cex=.8*cex)
+      }
+
+    }else{stop("Error: Invalid method for background subtraction")}
+
+    ##reset par values
+    par(par.default)
+    rm(par.default)
+  }
+
+
+  ##============================================================================##
+  ##  FITTING
+  ##============================================================================##
+
+  ##------------------------------------------------------------------------##
+  ##set function for fit equation (according Kitis and Pagonis, 2008)
+  ##////equation used for fitting////(start)
+  fit.equation<-function(Im.i,xm.i){
+    equation<-parse(
+      text=paste("exp(0.5)*Im[",Im.i,"]*(values[,1]/xm[",xm.i,"])*exp(-values[,1]^2/(2*xm[",xm.i,"]^2))",
+                 collapse="+",sep=""))
+    return(equation)
+  }
+  ##////equation used for fitting///(end)
+  ##------------------------------------------------------------------------##
+
+  ##set formula elements for fitting functions
+  ## the upper two funtions should be removed ... but chances are needed ... TODO
+  ##////equation used for fitting////(start)
+  fit.formula <- function(n.components){
+
+    Im <- paste0("Im.",1:n.components)
+    xm <- paste0("xm.",1:n.components)
+
+    as.formula(paste0("y ~ ", paste("(exp(0.5) * ", Im, "* x/", xm, ") * exp(-x^2/(2 *",xm,"^2))", collapse=" + ")))
+
+  }
+  ##////equation used for fitting///(end)
+
+  ##------------------------------------------------------------------------##
+  ##automatic start parameter estimation
+
+  ##set fit function
+  fit.function<-fit.equation(Im.i=1:n.components,xm.i=1:n.components)
+
+  if(missing(start_values)){
+
+    ##set b (detrapping) values for a 7-component function taken from Jain et al. (2003)
+    b.pseudo<-c(32,2.5,0.65,0.15,0.025,0.0025,0.00030)
+
+    ##calculate xm parameters from values set based on the pseudo curves
+    xm.pseudo<-sqrt(max(values[,1])/b.pseudo)
+
+    ##the Im values obtaind by calculating residuals
+    xm.residual<-sapply(1:length(b.pseudo),function(x){abs(values[,1]-xm.pseudo[x])})
+    xm.residual<-cbind(xm.residual,values[,1])
+    Im.pseudo<-sapply(1:length(xm.pseudo),function(x){
+      min(xm.residual[which(xm.residual[,x]==min(xm.residual[,x])),8])#8 is time index
+    })
+
+    ##set additional variables
+    b.pseudo_start<-1
+    b.pseudo_end<-0
+    fit.trigger<-FALSE
+
+    while(fit.trigger==FALSE){
+
+
+      xm <- xm.pseudo[b.pseudo_start:(n.components + b.pseudo_end)]
+      Im <- Im.pseudo[b.pseudo_start:(n.components + b.pseudo_end)]
+
+      if(fit.advanced){
+        ##---------------------------------------------------------------##
+        ##MC for fitting parameter
+        ##make the fitting more stable by small variations of the parameters
+
+        ##sample input parameters values from a normal distribution
+        xm.MC<-sapply(1:length(xm),function(x){
+          xm.MC<-sample(rnorm(30,mean=xm[x],sd=xm[x]/10), replace=TRUE)
+        })
+
+
+        Im.MC<-sapply(1:length(xm),function(x){
+          Im.MC<-sample(rnorm(30,mean=Im[x],sd=Im[x]/10), replace=TRUE)
+
+        })
+        ##---------------------------------------------------------------##
+
+        for(i in 1:length(xm.MC[,1])){
+
+          ##NLS          ##try fit
+          fit<-try(nls(y~eval(fit.function),
+                       trace=fit.trace,
+                       data=data.frame(x=values[,1],y=values[,2]),
+                       algorithm="port",
+                       start=list(Im=Im.MC[i,],xm=xm.MC[i,]),#end start values input
+                       nls.control(
+                         maxiter=500
+                       ),#end nls control
+                       lower=c(xm=min(values[,1]),Im=0),
+                       upper=c(xm=max(values[,1]),Im=max(values[,2]*1.1))
+          ),# nls
+          silent=TRUE)# end try
+          ##graphical output
+          if(i==1){cat(paste("[fit_LMCurve()] >> advanced fitting attempt (#",
+                             b.pseudo_start,"): ",sep=""))}
+          cat("*")
+
+          if(inherits(fit,"try-error") == FALSE){break}
+        }#end::forloop
+
+        cat("\n")
+
+      }else{
+
+
+        if(fit.method == "port") {
+          fit <- try(nls(
+            y ~ eval(fit.function),
+            trace = fit.trace,
+            data = data.frame(x = values[,1],y = values[,2]),
+            algorithm = "port",
+            start = list(Im = Im,xm = xm),#end start values input
+            nls.control(maxiter = 500),#end nls control
+            lower = c(xm = 0,Im = 0)
+          ),# nls
+          silent = TRUE)
+          # end try
+
+        }else if (fit.method == "LM") {
+          ##re-name for method == "LM"
+          names(Im) <- paste0("Im.", 1:n.components)
+          names(xm) <- paste0("xm.", 1:n.components)
+          start.list <- c(as.list(Im), as.list(xm))
+          lower <-
+            vapply(start.list, function(x) {
+              start.list[[x]] <- 0
+            }, FUN.VALUE = vector(mode = "numeric", length = 1))
+
+          fit <- try(minpack.lm::nlsLM(
+            fit.formula(n.components),
+            data = data.frame(x = values[,1],
+                              y = values[,2]),
+            start = start.list,
+            lower = lower,
+            trace = fit.trace,
+            control = minpack.lm::nls.lm.control(maxiter = 500)
+          ), silent = TRUE)
+
+        }else{
+
+          stop("[fit_LMCurve()] unknow method for 'fit.method'")
+
+        }
+
+
+      }#endifelse::fit.advanced
+
+
+      if(inherits(fit,"try-error")==FALSE){fit.trigger<-TRUE}
+      else{
+
+        if((n.components+b.pseudo_end)==7){fit.trigger<-TRUE
+        }else{
+          b.pseudo_start<-b.pseudo_start+1
+          b.pseudo_end<-b.pseudo_end+1
+        }#endif::maximum loops
+      }#endif::try-error
+    }#end:whileloop fit trigger
+
+  }else{#endif::missing start values
+    ##------------------------------------------------------------------------##
+
+    fit<-try(nls(y~eval(fit.function),
+                 trace=fit.trace, data.frame(x=values[,1],y=values[,2]),
+                 algorithm="port", start=list(Im=start_values[,1],xm=start_values[,2]),#end start values input
+                 nls.control(maxiter=500),
+                 lower=c(xm=0,Im=0),
+                 #upper=c(xm=max(x),Im=max(y)*1.1)# set lower boundaries for components
+    )# nls
+    )# end try
+  }#endif::startparameter
+
+  ##------------------------------------------------------------------------##
+
+  ##grep parameters
+  if(inherits(fit,"try-error")==FALSE){
+    parameters<-coef(fit)
+
+    ##write parameters in vectors and order parameters
+    Im<-parameters[1:(length(parameters)/2)]
+    Im.names <- names(Im)
+    xm<-parameters[(1+(length(parameters)/2)):length(parameters)]
+    xm.names <- names(xm)
+
+    ##order parameters
+    o <- order(xm)
+    xm <- xm[o]
+    names(xm) <- xm.names
+    Im <- Im[o]
+    names(Im) <- Im.names
+
+    if (verbose){
+      ##print rough fitting information - use the nls() control for more information
+      writeLines("\n[fit_LMCurve()]")
+      writeLines(paste("\nFitting was done using a ",n.components, "-component function:\n",sep=""))
+
+      ##print parameters
+      print(c(xm, Im))
+
+      #print some additional information
+      writeLines("\n(equation used for fitting according Kitis & Pagonis, 2008)")
+    }#end if
+
+    ##============================================================================##
+    ##  Additional Calculations
+    ##============================================================================##
+
+    ##calculate stimulation intensity Schmidt (2008)
+
+    ##Energy - E = h*v
+    h<-6.62606957e-34 #in W*s^2 - Planck constant
+    ny<-299792458/(LED.wavelength/10^9) #frequency of the light
+    E<-h*ny
+
+    ##transform LED.power in W/cm^2
+    LED.power<-LED.power/1000
+
+    stimulation_intensity<-LED.power/E
+
+
+    ##calculate b and n from the equation of Bulur(1996) to compare results
+    ##Using Equation 5 and 6 from Kitis (2008)
+    b<-as.vector(max(values[,1])/xm^2) #detrapping probability
+    n0<-as.vector((Im/exp(-0.5))*xm)
+
+
+    ##CALCULATE 1- sigma CONFIDENCE INTERVAL
+    ##------------------------------------------------------------------------##
+    b.error<-rep(NA, n.components)
+    n0.error<-rep(NA, n.components)
+
+    if(fit.calcError==TRUE){
+      ##option for confidence interval
+      values.confint<-confint(fit, level=0.68)
+      Im.confint<-values.confint[1:(length(values.confint[,1])/2),]
+      xm.confint<-values.confint[((length(values.confint[,1])/2)+1):length(values.confint[,1]),]
+
+      ##error calculation
+      b.error<-as.vector(abs((max(values[,1])/xm.confint[,1]^2)-(max(values[,1])/xm.confint[,2]^2)))
+      n0.error<-as.vector(abs(((Im.confint[,1]/exp(-0.5))*xm.confint[,1]) - ((Im.confint[,2]/exp(-0.5))*xm.confint[,2])))
+    }
+    ##------------------------------------------------------------------------##
+
+
+    ##calculate photoionisation cross section and print on terminal
+    ##using EQ (5) in Kitis
+    cs<-as.vector((max(values[,1])/xm^2)/stimulation_intensity)
+    rel_cs<-round(cs/cs[1],digits=4)
+
+    ##coefficient of determination after law
+    RSS <- sum(residuals(fit)^2) #residual sum of squares
+    TSS <- sum((values[,2] - mean(values[,2]))^2) #total sum of squares
+    pR<-round(1-RSS/TSS,digits=4)
+
+    ##============================================================================##
+    ## COMPONENT TO SUM CONTRIBUTION MATRIX
+    ##============================================================================##
+
+    ##+++++++++++++++++++++++++++++++
+    ##set matrix
+    ##set polygon matrix for optional plot output
+    component.contribution.matrix <- matrix(NA,
+                                            nrow = length(values[,1]),
+                                            ncol = (2*length(xm)) + 2)
+
+    ##set x-values
+    component.contribution.matrix[,1] <- values[,1]
+    component.contribution.matrix[,2] <- rev(values[,1])
+
+    ##+++++++++++++++++++++++++++++++
+    ##set 1st polygon
+    ##1st polygon (calculation)
+    y.contribution_first <- (exp(0.5)*Im[1]*values[,1]/
+                               xm[1]*exp(-values[,1]^2/(2*xm[1]^2))/
+                               (eval(fit.function))*100)
+
+    ##avoid NaN values (might happen with synthetic curves)
+    y.contribution_first[is.nan(y.contribution_first)==TRUE] <- 0
+
+    ##set values in matrix
+    component.contribution.matrix[,3] <- 100
+    component.contribution.matrix[,4] <- 100-rev(y.contribution_first)
+
+    ##+++++++++++++++++++++++++++++++
+    ##set polygons in between
+    ##polygons in between (calculate and plot)
+    if (length(xm)>2){
+
+      y.contribution_prev <- y.contribution_first
+      i<-2
+
+      ##matrix stepping
+      k <- seq(3, ncol(component.contribution.matrix), by=2)
+
+      while (i<=length(xm)-1) {
+        y.contribution_next<-(exp(0.5)*Im[i]*values[,1]/
+                                xm[i]*exp(-values[,1]^2/(2*xm[i]^2))/
+                                (eval(fit.function))*100)
+
+        ##avoid NaN values
+        y.contribution_next[is.nan(y.contribution_next)==TRUE] <- 0
+
+        ##set values in matrix
+        component.contribution.matrix[, k[i]] <- 100-y.contribution_prev
+        component.contribution.matrix[, k[i]+1] <- rev(100-y.contribution_prev-
+                                                         y.contribution_next)
+
+        y.contribution_prev <- y.contribution_prev + y.contribution_next
+
+        i<-i+1
+      }#end while loop
+    }#end if
+
+    ##+++++++++++++++++++++++++++++++
+    ##set last polygon
+
+    ##last polygon (calculation)
+    y.contribution_last<-(exp(0.5)*Im[length(xm)]*values[,1]/
+                            xm[length(xm)]*exp(-values[,1]^2/
+                                                 (2*xm[length(xm)]^2))/
+                            (eval(fit.function))*100)
+
+    ##avoid NaN values
+    y.contribution_last[is.nan(y.contribution_last)==TRUE]<-0
+
+    component.contribution.matrix[,((2*length(xm))+1)] <- y.contribution_last
+    component.contribution.matrix[,((2*length(xm))+2)] <- 0
+
+    ##change names of matrix to make more easy to understand
+    component.contribution.matrix.names <- c("x", "rev.x",
+                                             paste(c("y.c","rev.y.c"),rep(1:n.components,each=2), sep=""))
+
+
+    ##calculate area for each component, for each time interval
+    component.contribution.matrix.area <- sapply(
+      seq(3,ncol(component.contribution.matrix),by=2),
+      function(x){
+
+        matrixStats::rowDiffs(cbind(rev(component.contribution.matrix[,(x+1)]),
+                       component.contribution.matrix[,x]))
+
+      })
+
+    ##append to existing matrix
+    component.contribution.matrix <- cbind(
+      component.contribution.matrix,
+      component.contribution.matrix.area,
+      rowSums(component.contribution.matrix.area)
+    )
+
+    ##set final column names
+    colnames(component.contribution.matrix) <- c(
+      component.contribution.matrix.names,
+      paste(c("cont.c"),rep(1:n.components,each=1), sep=""),
+      "cont.sum")
+
+    ##============================================================================##
+    ##  Terminal Output (advanced)
+    ##============================================================================##
+    if (verbose){
+      ##write fill lines
+      writeLines("------------------------------------------------------------------------------")
+      writeLines("(1) Corresponding values according the equation in Bulur, 1996 for b and n0:\n")
+      for (i in 1:length(b)){
+        writeLines(paste("b",i," = ",format(b[i],scientific=TRUE)," +/- ",format(b.error[i],scientific=TRUE),sep=""))
+        writeLines(paste("n0",i," = ",format(n0[i],scientific=TRUE)," +/- ",format(n0.error[i],scientific=TRUE),"\n",sep=""))
+      }#end for loop
+
+      ##write photoionisation cross section on terminal
+      for (i in 1:length(cs)){
+        writeLines(paste("cs from component.",i," = ",format(cs[i],scientific=TRUE, digits=4), " cm^2",
+                         "\t >> relative: ",round(cs[i]/cs[1],digits=4),sep=""))
+
+      }#end for loop
+
+      writeLines(paste(
+        "\n(stimulation intensity value used for calculation: ",format(stimulation_intensity,scientific=TRUE)," 1/s 1/cm^2)",sep=""))
+      writeLines("(errors quoted as 1-sigma uncertainties)")
+      writeLines("------------------------------------------------------------------------------\n")
+
+      #sum of squares
+      writeLines(paste("pseudo-R^2 = ",pR,sep=""))
+    }#end if
+
+    ##============================================================================##
+    ##  COMPOSE RETURN VALUES (data.frame)
+    ##============================================================================##
+
+    ##write output table if values exists
+    if (exists("fit")){
+
+      ##set data.frame for a max value of 7 components
+      output.table <- data.frame(NA,NA,NA,NA,NA,NA,NA,NA,
+                                 NA,NA,NA,NA,NA,NA,NA,NA,
+                                 NA,NA,NA,NA,NA,NA,NA,NA,
+                                 NA,NA,NA,NA,NA,NA,NA,NA,
+                                 NA,NA,NA,NA,NA,NA,NA,NA,
+                                 NA,NA,NA,NA,NA,NA,NA,NA,
+                                 NA,NA,NA,NA,NA,NA,NA,NA)
+
+      output.tableColNames<-c("Im1","xm1",
+                              "b1","b1.error","n01","n01.error",
+                              "cs1","rel_cs1",
+                              "Im2","xm2",
+                              "b2","b2.error","n02","n02.error",
+                              "cs2","rel_cs2",
+                              "Im3","xm3",
+                              "b3","b3.error","n03","n03.error",
+                              "cs3","rel_cs3",
+                              "Im4","xm4",
+                              "b4","b4.error","n04","n04.error",
+                              "cs4","rel_cs4",
+                              "Im5","xm5",
+                              "b5","b5.error","n05","n05.error",
+                              "cs5","rel_cs5",
+                              "Im6","xm6",
+                              "b6","b6.error","n06","n06.error",
+                              "cs6","rel_cs6",
+                              "Im7","xm7",
+                              "b7","b7.error","n07","n07.error",
+                              "cs7","rel_cs7")
+
+
+      ##write components in output table
+      i<-0
+      k<-1
+      while(i<=n.components*8){
+        output.table[1,i+1]<-Im[k]
+        output.table[1,i+2]<-xm[k]
+        output.table[1,i+3]<-b[k]
+        output.table[1,i+4]<-b.error[k]
+        output.table[1,i+5]<-n0[k]
+        output.table[1,i+6]<-n0.error[k]
+        output.table[1,i+7]<-cs[k]
+        output.table[1,i+8]<-rel_cs[k]
+        i<-i+8
+        k<-k+1
+      }
+
+      ##add pR and n.components
+      output.table<-cbind(sample_ID,sample_code,n.components,output.table,pR)
+
+      ###alter column names
+      colnames(output.table)<-c("ID","sample_code","n.components",output.tableColNames,"pseudo-R^2")
+
+      ##----------------------------------------------------------------------------##
+    }#endif::exists fit
+  }else{
+
+    output.table <- NA
+    component.contribution.matrix <- NA
+    writeLines("[fit_LMCurve] Fitting Error: Plot without fit produced!")
+
+  }
+  ##============================================================================##
+  ##  PLOTTING
+  ##============================================================================##
+  if(plot){
+
+    ##cheat the R check routine
+    x <- NULL; rm(x)
+
+    ##grep package colour gallery
+    col <- get("col", pos = .LuminescenceEnv)
+
+    ##change xlim values in case of the log plot the avoid problems
+    if((log == "x" | log == "xy") && xlim[1] == 0){
+      warning("[fit_LMCurve()] x-axis limitation change to avoid 0 values for log-scale!", call. = FALSE)
+      xlim <- c(2^0.5/2 * max(values[,1])/length(values[,1]), xlim[2])
+
+
+    }
+
+
+    ##set plot frame
+    par.default <- par(no.readonly = TRUE)
+
+    layout(matrix(c(1,2,3),3,1,byrow=TRUE),c(1.6,1,1), c(1,0.3,0.4),TRUE)
+    par(oma=c(1,1,1,1),mar=c(0,4,3,0), cex=cex)
+
+    ##==uppper plot==##
+    ##open plot area
+    plot(
+      NA,
+      NA,
+      xlim = xlim,
+      ylim = ylim,
+      xlab = "",
+      xaxt = "n",
+      main = main,
+      log = log,
+      ylab = ylab
+    )#endplot
+
+    mtext(side=3,sample_code,cex=0.8*cex)
+
+    ##plotting measured signal
+    points(values[, 1],
+           values[, 2],
+           pch = 20,
+           col = rgb(0.4, 0.4, 0.4, 0.5))
+
+    ##==pseudo curve==##------------------------------------------------------#
+
+    ##curve for used pseudo values
+    if(inherits(fit,"try-error")==TRUE & missing(start_values)==TRUE){
+      fit.function<-fit.equation(Im.i=1:n.components,xm.i=1:n.components)
+      Im<-Im.pseudo[1:n.components]
+      xm<-xm.pseudo[1:n.components]
+
+      ##draw pseudo curve
+      lines(values[,1],eval(fit.function), lwd=2, col="red", lty=2)
+
+      axis(side=1)
+      mtext(side=1,xlab, cex=.9*cex,line=2)
+
+      mtext(side=4,paste(n.components, " component pseduo function is shown",sep=""),cex=0.7, col="blue")
+
+      ##draw information text on plot
+      text(min(values[,1]),max(values[,2]),"FITTING ERROR!",pos=4)
+
+      ##additional legend
+      legend("topright",c("pseudo sum function"),lty=2,lwd=2,col="red",bty="n")
+
+    }
+    ##==pseudo curve==##------------------------------------------------------##
+
+    ##plot sum function
+    if(inherits(fit,"try-error")==FALSE){
+      lines(values[,1],eval(fit.function), lwd=2, col="black")
+      legend.caption<-"sum curve"
+      curve.col<-1
+
+      ##plot signal curves
+
+      ##plot curve for additional parameters
+      for (i in 1:length(xm)) {
+        curve(exp(0.5)*Im[i]*x/xm[i]*exp(-x^2/(2*xm[i]^2)),col=col[i+1], lwd=2,add=TRUE)
+        legend.caption<-c(legend.caption,paste("component ",i,sep=""))
+        curve.col<-c(curve.col,i+1)
+      }
+      ##plot legend
+      legend(if(log=="x"| log=="xy"){
+        if(input.dataType=="pLM"){"topright"}else{"topleft"}}else{"topright"},
+        legend.caption,lty=1,lwd=2,col=col[curve.col], bty="n")
+
+
+      ##==lower plot==##
+      ##plot residuals
+      par(mar=c(4.2,4,0,0))
+      plot(values[,1],residuals(fit),
+           xlim=xlim,
+           xlab=xlab,
+           type="l",
+           col="grey",
+           ylab="Residual",
+           lwd=2,
+           log=log)
+
+      ##ad 0 line
+      abline(h=0)
+
+
+      ##------------------------------------------------------------------------#
+      ##++component to sum contribution plot ++##
+      ##------------------------------------------------------------------------#
+
+      ##plot component contribution to the whole signal
+      #open plot area
+      par(mar=c(4,4,3.2,0))
+      plot(NA,NA,
+           xlim=xlim,
+           ylim=c(0,100),
+           ylab="Contribution [%]",
+           xlab=xlab,
+           main="Component contribution to sum curve",
+           log=if(log=="xy"){"x"}else{log})
+
+      stepping <- seq(3,length(component.contribution.matrix),2)
+
+      for(i in 1:length(xm)){
+
+        polygon(c(component.contribution.matrix[,1],
+                  component.contribution.matrix[,2]),
+                c(component.contribution.matrix[,stepping[i]],
+                  component.contribution.matrix[,stepping[i]+1]),
+                col = col[i+1])
+      }
+      rm(stepping)
+
+      ##reset par
+      par(par.default)
+      rm(par.default)
+      ##------------------------------------------------------------------------##
+    }#end if try-error for fit
+
+    if(fun==TRUE){sTeve()}
+  }
+  ##-----------------------------------------------------------------------------
+  ##remove objects
+  try(unlist("parameters"))
+
+  ##============================================================================#
+  ## Return Values
+  ##============================================================================#
+
+  newRLumResults.fit_LMCurve <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      fit = fit,
+      output.table = output.table,
+      component.contribution.matrix = list(component.contribution.matrix),
+      call = sys.call()
+    )
+  )
+
+  invisible(newRLumResults.fit_LMCurve)
+
+}
diff --git a/R/get_Layout.R b/R/get_Layout.R
new file mode 100644
index 0000000..128143c
--- /dev/null
+++ b/R/get_Layout.R
@@ -0,0 +1,643 @@
+#' Collection of layout definitions
+#'
+#' This helper function returns a list with layout definitions for homogeneous
+#' plotting.
+#'
+#' The easiest way to create a user-specific layout definition is perhaps to
+#' create either an empty or a default layout object and fill/modify the
+#' definitions (\code{user.layout <- get_Layout(data = "empty")}).
+#'
+#' @param layout \code{\link{character}} or \code{\link{list}} object
+#' (required): name of the layout definition to be returned. If name is
+#' provided the respective definition is returned. One of the following
+#' supported layout definitions is possible: \code{"default"},
+#' \code{"journal.1"}, \code{"small"}, \code{"empty"}. User-specific layout
+#' definitions must be provided as a list object of predefined structure, see
+#' details.
+#' @return A list object with layout definitions for plot functions.
+#' @section Function version: 0.1
+#' @author Michael Dietze, GFZ Potsdam (Germany)
+#' @examples
+#'
+#' ## read example data set
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## show structure of the default layout definition
+#' layout.default <- get_Layout(layout = "default")
+#' str(layout.default)
+#'
+#' ## show colour definitions for Abanico plot, only
+#' layout.default$abanico$colour
+#'
+#' ## set Abanico plot title colour to orange
+#' layout.default$abanico$colour$main <- "orange"
+#'
+#' ## create Abanico plot with modofied layout definition
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  layout = layout.default)
+#'
+#' ## create Abanico plot with predefined layout "journal"
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  layout = "journal")
+#'
+#' @export
+get_Layout <- function(
+  layout
+) {
+
+  ## pre-defined layout selections
+  if(is.character(layout) == TRUE & length(layout) == 1) {
+
+    if(layout == "empty") {
+
+      layout = list(
+        
+        ## empty Abanico plot -------------------------------------------------
+        abanico = list(
+          font.type = list(
+            main    = character(1),
+            xlab1   = character(1),
+            xlab2   = character(1),
+            ylab    = character(1),
+            zlab    = character(1),
+            xtck1   = character(1),
+            xtck2   = character(1),
+            xtck3   = character(1),
+            ytck    = character(1),
+            ztck    = character(1),
+            mtext   = character(1),
+            summary = character(1), # optionally vector
+            stats   = character(1), # optionally vector
+            legend  = character(1) # optionally vector
+          ),
+          
+          font.size = list(
+            main    = numeric(1),
+            xlab1   = numeric(1),
+            xlab2   = numeric(1),
+            xlab3   = numeric(1),
+            ylab    = numeric(1),
+            zlab    = numeric(1),
+            xtck1   = numeric(1),
+            xtck2   = numeric(1),
+            xtck3   = numeric(1),
+            ytck    = numeric(1),
+            ztck    = numeric(1),
+            mtext   = numeric(1),
+            summary = numeric(1), # optionally vector
+            stats   = numeric(1), # optionally vector
+            legend  = numeric(1)  # optionally vector
+          ),
+          
+          font.deco = list(
+            main    = character(1),
+            xlab1   = character(1),
+            xlab2   = character(1),
+            xlab3   = character(1),
+            ylab    = character(1),
+            zlab    = character(1),
+            xtck1   = character(1),
+            xtck2   = character(1),
+            xtck3   = character(1),
+            ytck    = character(1),
+            ztck    = character(1),
+            mtext   = character(1),
+            summary = character(1), # optionally vector
+            stats   = character(1), # optionally vector
+            legend  = character(1) # optionally vector
+          ),
+          
+          colour = list(
+            main    = numeric(1), # plot title colour
+            xlab1   = numeric(1), # left x-axis label colour
+            xlab2   = numeric(1), # right x-axis label colour
+            xlab3   = numeric(1), # right x-axis label colour
+            ylab    = numeric(1), # y-axis label colour
+            zlab    = numeric(1), # z-axis label colour
+            xtck1   = numeric(1), # left x-axis tick colour
+            xtck2   = numeric(1), # right x-axis tick colour
+            xtck3   = numeric(1), # right x-axis tick colour
+            ytck    = numeric(1), # y-axis tick colour
+            ztck    = numeric(1), # z-axis tick colour
+            mtext   = numeric(1), # subheader text colour
+            summary = numeric(1), # statistic summary colour
+            stats   = numeric(1), # value statistics colour
+            legend  = numeric(1), # legend colour
+            centrality = numeric(1), # Centrality line colour
+            value.dot  = numeric(1), # De value dot colour
+            value.bar  = numeric(1), # De value error bar colour
+            value.rug  = numeric(1), # De value rug colour
+            poly.line  = numeric(1), # polygon line colour
+            poly.fill  = numeric(1), # polygon fill colour
+            bar.line   = numeric(1), # polygon line colour
+            bar.fill   = numeric(1), # polygon fill colour
+            kde.line   = numeric(1),
+            kde.fill   = numeric(1),
+            grid.major = numeric(1),
+            grid.minor = numeric(1),
+            border     = numeric(1),
+            background = numeric(1)),
+          
+          dimension = list(
+            figure.width    = numeric(1), # figure width in mm
+            figure.height   = numeric(1), # figure height in mm
+            margin          = numeric(4), # margin sizes in mm
+            main.line       = numeric(1), # line height in %
+            xlab1.line      = numeric(1), # line height in %
+            xlab2.line      = numeric(1), # line height in %
+            xlab3.line      = numeric(1), # line height in %
+            ylab.line       = numeric(1), # line height in %
+            zlab.line       = numeric(1), # line height in %
+            xtck1.line      = numeric(1), # line height in %
+            xtck2.line      = numeric(1), # line height in %
+            xtck3.line      = numeric(1), # line height in %
+            ytck.line       = numeric(1), # line height in %
+            ztck.line       = numeric(1), # line height in %
+            xtcl1           = numeric(1), # tick length in %
+            xtcl2           = numeric(1), # tick length in %
+            xtcl3           = numeric(1), # tick length in %
+            ytcl            = numeric(1), # tick length in %
+            ztcl            = numeric(1), # tick length in %
+            rugl            = numeric(1), # rug length in %
+            mtext           = numeric(1), # line height in %
+            summary.line    = numeric(1) # line height in %
+          )),
+        
+        ## empty KDE plot -----------------------------------------------------
+        kde = list(
+          font.type = list(
+            main   = character(1),
+            xlab   = character(1),
+            ylab1  = character(1),
+            ylab2  = character(1),
+            xtck   = character(1),
+            ytck1  = character(1),
+            ytck2  = character(1),
+            stats  = character(1), # optionally vector
+            legend = character(1) # optionally vector
+          ),
+          
+          font.size = list(
+            main   = numeric(1),
+            xlab   = numeric(1),
+            ylab1  = numeric(1),
+            ylab2  = numeric(1),
+            xtck   = numeric(1),
+            ytck1  = numeric(1),
+            ytck2  = numeric(1),
+            stats  = numeric(1), # optionally vector
+            legend = numeric(1) # optionally vector
+          ),
+          
+          font.deco = list(
+            main   = character(1),
+            xlab   = character(1),
+            ylab1  = character(1),
+            ylab2  = character(1),
+            xtck   = character(1),
+            ytck1  = character(1),
+            ytck2  = character(1),
+            stats  = character(1), # optionally vector
+            legend = character(1) # optionally vector
+          ),
+          
+          colour = list(
+            main   = numeric(1), # plot title colour
+            xlab   = numeric(1), # x-axis label colour
+            ylab1  = numeric(1), # primary y-axis label colour
+            ylab2  = numeric(1), # secondary y-axis label colour
+            xtck   = numeric(1), # x-axis tick colour
+            ytck1  = numeric(1), # primary y-axis tick colour
+            ytck2  = numeric(1), # secondary y-axis tick colour
+            box    = numeric(1), # plot frame box line colour
+            mtext  = numeric(1), # subheader text colour
+            stats  = numeric(1), # statistic summary colour
+            kde.line        = numeric(1),  # KDE line colour
+            kde.fill        = numeric(1),  # KDE fill colour
+            value.dot       = numeric(1),  # De value dot colour
+            value.bar       = numeric(1),  # De value error bar colour
+            value.rug       = numeric(1),  # De value rug colour
+            boxplot         = numeric(1),  # boxplot colour
+            mean.line       = numeric(1),  # mean line colour
+            sd.bar          = numeric(1),  # sd-line colour
+            background      = numeric(1)), # background colour
+          
+          dimension = list(
+            figure.width    = numeric(1), # figure width in mm
+            figure.height   = numeric(1), # figure height in mm
+            margin          = numeric(4), # margin sizes in mm
+            main.line       = numeric(1), # line height in %
+            xlab.line       = numeric(1), # line height in %
+            ylab1.line      = numeric(1), # line height in %
+            ylab2.line      = numeric(1), # line height in %
+            xtck.line       = numeric(1), # line height in %
+            ytck1.line      = numeric(1), # line height in %
+            ytck2.line      = numeric(1), # line height in %
+            xtcl            = numeric(1), # tick length in %
+            ytcl1           = numeric(1), # tick length in %
+            ytcl2           = numeric(1), # tick length in %
+            stats.line      = numeric(1) # line height in %
+          )
+        )
+      )
+    } else if(layout == "default") {
+
+      layout = list(
+        
+        ## default Abanico plot -----------------------------------------------
+        abanico = list(
+          font.type = list(
+            main    = "",
+            xlab1   = "",
+            xlab2   = "",
+            ylab    = "",
+            zlab    = "",
+            xtck1   = "",
+            xtck2   = "",
+            xtck3   = "",
+            ytck    = "",
+            ztck    = "",
+            mtext   = "",
+            summary = "", # optionally vector
+            stats   = "", # optionally vector
+            legend  = "" # optionally vector
+          ),
+          
+          font.size = list(
+            main    = 12,
+            xlab1   = 12,
+            xlab2   = 12,
+            xlab3   = 12,
+            ylab    = 12,
+            zlab    = 12,
+            xtck1   = 12,
+            xtck2   = 12,
+            xtck3   = 12,
+            ytck    = 12,
+            ztck    = 12,
+            mtext   = 10,
+            summary = 10, # optionally vector
+            stats   = 10, # optionally vector
+            legend  = 10 # optionally vector
+          ),
+          
+          font.deco = list(
+            main    = "bold",
+            xlab1   = "normal",
+            xlab2   = "normal",
+            xlab3   = "normal",
+            ylab    = "normal",
+            zlab    = "normal",
+            xtck1   = "normal",
+            xtck2   = "normal",
+            xtck3   = "normal",
+            ytck    = "normal",
+            ztck    = "normal",
+            mtext   = "normal",
+            summary = "normal", # optionally vector
+            stats   = "normal", # optionally vector
+            legend  = "normal" # optionally vector
+          ),
+          
+          colour = list(
+            main    = 1, # plot title colour
+            xlab1   = 1, # left x-axis label colour
+            xlab2   = 1, # right x-axis label colour
+            xlab3   = 1, # right x-axis label colour
+            ylab    = 1, # y-axis label colour
+            zlab    = 1, # z-axis label colour
+            xtck1   = 1, # left x-axis tick colour
+            xtck2   = 1, # right x-axis tick colour
+            xtck3   = 1, # right x-axis tick colour
+            ytck    = 1, # y-axis tick colour
+            ztck    = 1, # z-axis tick colour
+            mtext   = 1, # subheader text colour
+            summary = 1, # statistic summary colour
+            stats   = 1, # value statistics colour
+            legend  = 1, # legend colour
+            centrality = 1, # Centrality line colour
+            value.dot  = 1, # De value dot colour
+            value.bar  = 1, # De value error bar colour
+            value.rug = 1, # De value rug colour
+            poly.line  = NA, # polygon line colour
+            poly.fill  = adjustcolor("grey75", alpha.f = 0.6), # polygon fill colour
+            bar.line   = NA, # polygon line colour
+            bar.fill   = "grey60", # bar fill colour
+            kde.line   = 1,
+            kde.fill   = NA,
+            grid.major = "grey80",
+            grid.minor = "none",
+            border     = 1,
+            background = NA),
+          
+          dimension = list(
+            figure.width    = "auto", # figure width in mm
+            figure.height   = "auto", # figure height in mm
+            margin = c(10, 10, 10, 10), # margin sizes in mm
+            main.line       = 100, # line height in %
+            xlab1.line      = 90, # line height in %
+            xlab2.line      = 90, # line height in %
+            xlab3.line      = 90, # line height in %
+            ylab.line       = 100, # line height in %
+            zlab.line       = 70, # line height in %
+            xtck1.line      = 100, # line height in %
+            xtck2.line      = 100, # line height in %
+            xtck3.line      = 100, # line height in %
+            ytck.line       = 100, # line height in %
+            ztck.line       = 100, # line height in %
+            xtcl1           = 100, # tick length in %
+            xtcl2           = 100, # tick length in %
+            xtcl3           = 100, # tick length in %
+            ytcl            = 100, # tick length in %
+            ztcl            = 100, # tick length in %
+            rugl            = 100, # rug length in %
+            mtext           = 100, # line height in %
+            summary.line    = 100 # line height in %
+          )),
+        
+        ## default KDE plot ---------------------------------------------------
+        kde = list(
+          font.type = list(
+            main   = "",
+            xlab   = "",
+            ylab1  = "",
+            ylab2  = "",
+            xtck   = "",
+            ytck1  = "",
+            ytck2  = "",
+            stats  = "", # optionally vector
+            legend = "" # optionally vector
+          ),
+          
+          font.size = list(
+            main   = 14,
+            xlab   = 12,
+            ylab1  = 12,
+            ylab2  = 12,
+            xtck   = 12,
+            ytck1  = 12,
+            ytck2  = 12,
+            stats  = 12, # optionally vector
+            legend = 12 # optionally vector
+          ),
+          
+          font.deco = list(
+            main   = "bold",
+            xlab   = "normal",
+            ylab1  = "normal",
+            ylab2  = "normal",
+            xtck   = "normal",
+            ytck1  = "normal",
+            ytck2  = "normal",
+            stats  = "normal", # optionally vector
+            legend = "normal" # optionally vector
+          ),
+          
+          colour = list(
+            main   = 1, # plot title colour
+            xlab   = 1, # x-axis label colour
+            ylab1  = 1, # primary y-axis label colour
+            ylab2  = 1, # secondary y-axis label colour
+            xtck   = 1, # x-axis tick colour
+            ytck1  = 1, # primary y-axis tick colour
+            ytck2  = 1, # secondary y-axis tick colour
+            box    = 1, # plot frame box line colour
+            mtext  = 2, # subheader text colour
+            stats  = "#2062B3", # statistic summary colour
+            kde.line        = "#2062B3", # KDE line colour
+            kde.fill        = NULL, # KDE fill colour
+            value.dot       = 1, # De value dot colour
+            value.bar       = 1, # De value error bar colour
+            value.rug       = 1, # De value rug colour
+            boxplot         = 1, # boxplot colour
+            mean.point       = 1, # mean line colour
+            sd.line          = 1, # sd bar colour
+            background      = NULL), # background colour
+          
+          dimension = list(
+            figure.width    = "auto", # figure width in mm
+            figure.height   = "auto", # figure height in mm
+            margin = c(10, 10, 10, 10), # margin sizes in mm
+            main.line       = 100, # line height in %
+            xlab.line       = 100, # line height in %
+            ylab1.line      = 100, # line height in %
+            ylab2.line      = 100, # line height in %
+            xtck.line       = 100, # line height in %
+            ytck1.line      = 100, # line height in %
+            ytck2.line      = 100, # line height in %
+            xtcl            = 100, # tick length in %
+            ytcl1           = 100, # tick length in %
+            ytcl2           = 100, # tick length in %
+            stats.line      = 100 # line height in %
+          )
+        )
+      )
+    } else if(layout == "journal") {
+
+      layout = list(
+        
+        ## journal Abanico plot -----------------------------------------------
+        abanico = list(
+          font.type = list(
+            main    = "",
+            xlab1   = "",
+            xlab2   = "",
+            ylab    = "",
+            zlab    = "",
+            xtck1   = "",
+            xtck2   = "",
+            xtck3   = "",
+            ytck    = "",
+            ztck    = "",
+            mtext   = "",
+            summary = "", # optionally vector
+            stats   = "", # optionally vector
+            legend  = "" # optionally vector
+          ),
+          
+          font.size = list(
+            main    = 8,
+            xlab1   = 7,
+            xlab2   = 7,
+            xlab3   = 7,
+            ylab    = 7,
+            zlab    = 7,
+            xtck1   = 7,
+            xtck2   = 7,
+            xtck3   = 7,
+            ytck    = 7,
+            ztck    = 7,
+            mtext   = 6,
+            summary = 6, # optionally vector
+            stats   = 6, # optionally vector
+            legend  = 6 # optionally vector
+          ),
+          
+          font.deco = list(
+            main    = "bold",
+            xlab1   = "normal",
+            xlab2   = "normal",
+            xlab3   = "normal",
+            ylab    = "normal",
+            zlab    = "normal",
+            xtck1   = "normal",
+            xtck2   = "normal",
+            xtck3   = "normal",
+            ytck    = "normal",
+            ztck    = "normal",
+            mtext   = "normal",
+            summary = "normal", # optionally vector
+            stats   = "normal", # optionally vector
+            legend  = "normal" # optionally vector
+          ),
+          
+          colour = list(
+            main    = 1, # plot title colour
+            xlab1   = 1, # left x-axis label colour
+            xlab2   = 1, # right x-axis label colour
+            xlab3   = 1, # right x-axis label colour
+            ylab    = 1, # y-axis label colour
+            zlab    = 1, # z-axis label colour
+            xtck1   = 1, # left x-axis tick colour
+            xtck2   = 1, # right x-axis tick colour
+            xtck3   = 1, # right x-axis tick colour
+            ytck    = 1, # y-axis tick colour
+            ztck    = 1, # z-axis tick colour
+            mtext   = 1, # subheader text colour
+            summary = 1, # statistic summary colour
+            stats   = 1, # value statistics colour
+            legend  = 1, # legend colour
+            centrality = 1, # Centrality line colour
+            value.dot  = 1, # De value dot colour
+            value.bar  = 1, # De value error bar colour
+            value.rug  = 1, # De value rug colour
+            poly.line  = NA, # polygon line colour
+            poly.fill  = adjustcolor("grey75", alpha.f = 0.6), # polygon fill colour
+            bar.line   = NA, # polygon line colour
+            bar.fill   = "grey60", # bar fill colour
+            kde.line   = 1,
+            kde.fill   = NA,
+            grid.major = "grey80",
+            grid.minor = "none",
+            border     = 1,
+            background = NA),
+          
+          dimension = list(
+            figure.width    = 100, # figure width in mm
+            figure.height   = 100, # figure height in mm
+            margin = c(10, 10, 10, 10), # margin sizes in mm
+            main.line       = 70, # line height in %
+            xlab1.line      = 30, # line height in %
+            xlab2.line      = 65, # line height in %
+            xlab3.line      = 30, # line height in %
+            ylab.line       = 30, # line height in %
+            zlab.line       = 40, # line height in %
+            xtck1.line      = 50, # line height in %
+            xtck2.line      = 50, # line height in %
+            xtck3.line      = 50, # line height in %
+            ytck.line       = 70, # line height in %
+            ztck.line       = 70, # line height in %
+            xtcl1           = 50, # tick length in %
+            xtcl2           = 50, # tick length in %
+            xtcl3           = 50, # tick length in %
+            ytcl            = 50, # tick length in %
+            ztcl            = 70, # tick length in %
+            rugl            = 70, # rug length in %
+            mtext           = 100, # line height in %
+            summary.line    = 70, # line height in %
+            pch             = 50  # point size in %
+          )),
+        
+        ## journal KDE plot ---------------------------------------------------
+        kde = list(
+          font.type = list(
+            main   = "",
+            xlab   = "",
+            ylab1  = "",
+            ylab2  = "",
+            xtck   = "",
+            ytck1  = "",
+            ytck2  = "",
+            stats  = "", # optionally vector
+            legend = "" # optionally vector
+          ),
+          
+          font.size = list(
+            main   = 8,
+            xlab   = 7,
+            ylab1  = 7,
+            ylab2  = 7,
+            xtck   = 7,
+            ytck1  = 7,
+            ytck2  = 7,
+            stats  = 7,
+            legend = 7
+          ),
+          
+          font.deco = list(
+            main   = "bold",
+            xlab   = "normal",
+            ylab1  = "normal",
+            ylab2  = "normal",
+            xtck   = "normal",
+            ytck1  = "normal",
+            ytck2  = "normal",
+            stats  = "normal", # optionally vector
+            legend = "normal" # optionally vector
+          ),
+          
+          colour = list(
+            main   = 1, # plot title colour
+            xlab   = 1, # x-axis label colour
+            ylab1  = 1, # primary y-axis label colour
+            ylab2  = 1, # secondary y-axis label colour
+            xtck   = 1, # x-axis tick colour
+            ytck1  = 1, # primary y-axis tick colour
+            ytck2  = 1, # secondary y-axis tick colour
+            box    = 1, # plot frame box line colour
+            mtext  = 1, # subheader text colour
+            stats  = "#2062B3", # statistic summary colour
+            kde.line        = "#2062B3", # KDE line colour
+            kde.fill        = NULL, # KDE fill colour
+            value.dot       = 1, # De value dot colour
+            value.bar       = 1, # De value error bar colour
+            value.rug       = 1, # De value rug colour
+            boxplot         = 1, # boxplot colour
+            mean.line       = adjustcolor(col = 1, 
+                                          alpha.f = 0.4), # mean line colour
+            sd.bar          = adjustcolor(col = 1, 
+                                          alpha.f = 0.4), # sd bar colour
+            background      = NULL),
+          
+          dimension = list(
+            figure.width    = 80, # figure width in mm
+            figure.height   = 80, # figure height in mm
+            margin = c(10, 10, 10, 10), # margin sizes in mm
+            main.line       = 70, # line height in %
+            xlab.line       = 30, # line height in %
+            ylab1.line      = 40, # line height in %
+            ylab2.line      = 30, # line height in %
+            xtck.line       = 50, # line height in %
+            ytck1.line      = 65, # line height in %
+            ytck2.line      = 50, # line height in %
+            xtcl            = 50, # tick length in %
+            ytcl1           = 20, # tick length in %
+            ytcl2           = 50, # tick length in %
+            stats.line      = 70 # line height in %
+          )
+        )
+      )
+    } else {
+      warning("Layout definition not supported! Default layout is used.")
+      
+      layout <- get_Layout(layout = "default")
+    }
+  } else if(is.list(layout) == TRUE) {
+
+    ## user-specific layout definition assignment
+    layout <- layout
+  }
+
+  ## return layout parameters
+  return(layout)
+}
diff --git a/R/get_Quote.R b/R/get_Quote.R
new file mode 100644
index 0000000..78c7a6b
--- /dev/null
+++ b/R/get_Quote.R
@@ -0,0 +1,94 @@
+#' Function to return essential quotes
+#'
+#' This function returns one of the collected essential quotes in the
+#' growing library. If called without any parameters, a random quote is
+#' returned.
+#'
+#' @param ID \code{\link{character}}, qoute ID to be returned.
+#' @param author \code{\link{character}}, all quotes by specified author.
+#' @param separated \code{\link{logical}}, return result in separated form.
+#' @return Returns a character with quote and respective (false) author.
+#' @section Function version: 0.1.1
+#' @author Michael Dietze, GFZ Potsdam (Germany)
+#' @examples
+#'
+#' ## ask for an arbitrary qoute
+#' get_Quote()
+#'
+#' @export
+get_Quote <- function(
+  ID,
+  author,
+  separated = FALSE
+) {
+
+  ## definition of the ever growing quote data set
+  quotes <- rbind(
+    c("Anonymous student hotel employee", "Let me double check this."),
+    c("The ordinary reviewer", "I love it when a plan comes together."),
+    c("A tunnelling electron", "God does not play dice."),
+    c("Goldfinger", "You cannot get this machine better and cheaper than from us."),
+    c("A PhD supervisor", "Live long and in prosper."),
+    c("A PhD supervisor", "You are not depressive, you simply have a crappy life."),
+    c("A trapped charge", "I want to break free."),
+    c("The R-package Luminescence manual", "Call unto me, and I will answer thee, and will shew thee great things, and difficult, which thou knowest not."),
+    c("A stimulated feldspar grain", "I'm so excited and I just can't hide it."),
+    c("The true age", "How many roads..."),
+    c("The undecided OSL component", "Should I stay or should I go?"),
+    c("A fluvially transported quartz grain at night", "Always look at the bright side of life."),
+    c("An arctic sediment outcrop", "Marmor, Stein und Eisen bricht..."),
+    c("A common luminescence reader customer", "If anything can go wrong, it will."),
+    c("A blue LED to a trapped electron", "Resistance is futile."),
+    c("A trapped electron to a yellow LED", "Well, that's all?"),
+    c("A weathering rock", "Who wants to live forever?"),
+    c("A new pIRIR derivative", "20000 miles below the sea."),
+    c("Robert Oppenheimer", "I want this thing to work by just pressing one button."),
+    c("An arbitrary member of the CRAN team", "No shirt, no shoes, no service!"),
+    c("Rubber mallet to steel cylinder", "Let's rock and roll."),
+    c("A data import function", "Better late than never."),
+    c("A luminescence lab staff member to its customer", "Tell me the age, I tell you the price."),
+    c("The NSA", "O'zapft is."),
+    c("The natural dose", "You only live once."),
+    c("A Windows user", "An apple a day keeps the doctor away."),
+    c("The authors of sTeve", "We love to entertain you."),
+    c("Any arbitrary independent OSL device manufacturer", "Sure it will work, it was me who built it!"),
+    c("Response to the reviewer", "You are right, it was just a guess."),
+    c("An aliquot disc", "The answer [...] is: 48"),
+    c("Push Pin", "Made of used sample carriers"),
+    c("A motivated R-Team member", "We are doing this not just for statistical reasons, there is real science behind it!"),
+    c("An enthusiastic cabaret artist", "Political elections are like brushing teeth: if you don't do it, things become brown."),
+    c("An unbiased reviewer", "The data is too poor to be published in QG, try a higher ranked journal."),
+    c("R Team member, asked about statistical details", "No idea, I'm just here for visualisation."),
+    c("An arbitrary unexperienced RLum-user", "Little by little, the bird builds its nest."),
+    c("The answer to life, the universe and everything", "get_rightAnswer()")
+
+    )
+
+  ## Check input data
+  if(missing(ID) == TRUE & missing(author) == TRUE) {
+    ID <- sample(x = seq(from = 1,
+                         to = nrow(quotes)),
+                 size = 1)
+  } else if(missing(ID) == TRUE) {
+    ID <- seq(from = 1,
+              to = nrow(quotes))[quotes[,1] == author]
+  }
+
+  ## check for correct ID and generate qoute
+  if(length(ID) < 1 | ID > nrow(quotes)) {
+
+    quote.out <- "Sorry, but this was an impossible task!"
+
+  } else {
+
+    ## generate qoute(s)
+    if(separated == FALSE) {
+      quote.out <- paste(quotes[ID,1], ": '", quotes[ID,2], "'", sep = "")
+    } else {
+      quote.out <- quotes[ID,]
+    }
+  }
+
+  ## return quotes
+  return(quote.out)
+}
diff --git a/R/get_RLum.R b/R/get_RLum.R
new file mode 100644
index 0000000..f4a6879
--- /dev/null
+++ b/R/get_RLum.R
@@ -0,0 +1,117 @@
+#' General accessor function for RLum S4 class objects
+#'
+#' Function calls object-specific get functions for RLum S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+#' corresponding get function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+#'
+#' @param object \code{\linkS4class{RLum}} (\bold{required}): S4 object of
+#' class \code{RLum} or an object of type \code{\link{list}} containing only objects of type
+#' \code{\linkS4class{RLum}}
+#'
+#' @param \dots further arguments that will be passed to the object specific methods. For
+#' furter details on the supported arguments please see the class
+#' documentation: \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Spectrum}}, \code{\linkS4class{RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Analysis}} and \code{\linkS4class{RLum.Results}}
+#'
+#' @return Return is the same as input objects as provided in the list.
+#'
+#' @section Function version: 0.3.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso
+#' \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}
+#'
+#' @keywords utilities
+#'
+#' @examples
+#'
+#'
+#' ##Example based using data and from the calc_CentralDose() function
+#'
+#' ##load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ##apply the central dose model 1st time
+#' temp1 <- calc_CentralDose(ExampleData.DeValues$CA1)
+#'
+#' ##get results and store them in a new object
+#' temp.get <- get_RLum(object = temp1)
+#'
+#'
+#' @export
+setGeneric("get_RLum", function (object, ...) {standardGeneric("get_RLum") })
+
+# Method for get_RLum method for RLum objects in a list for a list of objects  -------------------
+#' @describeIn get_RLum
+#' Returns a list of \code{\linkS4class{RLum}} objects that had been passed to \code{\link{get_RLum}}
+#'
+#' @param null.rm \code{\link{logical}} (with default): option to get rid of empty and NULL objects
+#'
+#' @export
+setMethod("get_RLum",
+          signature = "list",
+          function(object, null.rm = FALSE, ...){
+
+
+            selection <- lapply(1:length(object), function(x){
+
+              ##get rid of all objects that are not of type RLum, this is better than leaving that
+              ##to the user
+              if(inherits(object[[x]], what = "RLum")){
+
+                ##it might be the case the object already comes with empty objects, this would
+                ##cause a crash
+                if(is(object[[x]], "RLum.Analysis") && length(object[[x]]@records) != 0){
+                  get_RLum(object[[x]],...)
+
+                }else{
+                  return(NULL)
+
+                }
+
+              }else{
+
+               warning(paste0("[get_RLum()] object #",x," in the list was not of type 'RLum' and has been removed!"),
+                       call. = FALSE)
+                return(NULL)
+
+              }
+
+            })
+
+
+            ##remove empty or NULL objects after the selection ... if wanted
+            if(null.rm){
+
+
+                ##first set all empty objects to NULL ... for RLum.Analysis objects
+                selection <- lapply(1:length(selection), function(x){
+                  if(is(selection[[x]], "RLum.Analysis") && length(selection[[x]]@records) == 0){
+                    return(NULL)
+
+                  }else{
+                    return(selection[[x]])
+
+                  }
+
+                })
+
+                ##get rid of all NULL objects
+                selection <- selection[!sapply(selection, is.null)]
+
+
+            }
+
+            return(selection)
+
+          })
diff --git a/R/get_Risoe.BINfileData.R b/R/get_Risoe.BINfileData.R
new file mode 100644
index 0000000..b01724b
--- /dev/null
+++ b/R/get_Risoe.BINfileData.R
@@ -0,0 +1,29 @@
+#' General accessor function for RLum S4 class objects
+#'
+#' Function calls object-specific get functions for RisoeBINfileData S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{Risoe.BINfileData}} objects.\cr Depending on the input object, the
+#' corresponding get function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{Risoe.BINfileData}} class.
+#'
+#' @param object \code{\linkS4class{Risoe.BINfileData}} (\bold{required}): S4 object of
+#' class \code{RLum}
+#' @param \dots further arguments that one might want to pass to the specific
+#' get function
+#' @return Return is the same as input objects as provided in the list.
+#' @section Function version: 0.1.0
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#' @seealso
+#' \code{\linkS4class{Risoe.BINfileData}}
+#' @keywords utilities
+#'
+#' @export
+setGeneric(
+  name = "get_Risoe.BINfileData",
+  def = function(object, ...) {
+    standardGeneric("get_Risoe.BINfileData")
+  },
+  package = "Luminescence"
+)
diff --git a/R/get_rightAnswer.R b/R/get_rightAnswer.R
new file mode 100644
index 0000000..8d40c1e
--- /dev/null
+++ b/R/get_rightAnswer.R
@@ -0,0 +1,17 @@
+#' Function to get the right answer
+#'
+#' This function returns just the right answer
+#'
+#' @param ... you can pass an infinite number of further arguments
+#' @return Returns the right answer
+#' @section Function version: 0.1.0
+#' @author inspired by R.G.
+#' @examples
+#'
+#' ## you really want to know?
+#' get_rightAnswer()
+#'
+#' @export
+get_rightAnswer <- function(...) {
+  return(46)
+}
diff --git a/R/internal_as.latex.table.R b/R/internal_as.latex.table.R
new file mode 100644
index 0000000..0e79b7a
--- /dev/null
+++ b/R/internal_as.latex.table.R
@@ -0,0 +1,219 @@
+#' Create LaTex tables from data.frames and RLum objects
+#' 
+#' This function takes a data.frame and returns a table in LaTex code that
+#' can be copied in any tex document.
+#'
+#' @param x a \code{\link{data.frame}} or \code{RLum} object
+#' @param row.names currently unused
+#' @param col.names currently unused
+#' @param comments \code{\link{logical}} insert LaTex comments
+#' @param pos \code{\link{character}} of length one specifying the alignment
+#' of each column, e.g., pos'clr' for a three column data frame and center, left
+#' and right alignment
+#' @param digits \code{\link{numeric}} number of digits (numeric fields)
+#' @param select a \code{\link{character}} vector passed to \code{\link{subset}}
+#' @param split an \code{\link{integer}} specifying the number of individual tables
+#' the data frame is split into. Useful for wide tables. Currently unnused.
+#' @param ... options: \code{verbose}
+#'
+#' @section TODO:
+#' - Improve by using RegEx to dynamically find error fields, eg. ( "([ ]err)|(^err)" )
+#' - 
+#' 
+#' @return
+#' Returns LaTex code
+#'
+#' @examples
+#' df <- data.frame(x = 1:10, y = letters[1:10])
+#' .as.latex.table(df)
+#' .as.latex.table(df, pos = "lr")
+#' .as.latex.table(df, select = "y", pos = "r")
+#' 
+#' @noRd
+.as.latex.table <- function(x, 
+                            row.names = NULL, 
+                            col.names = NULL, 
+                            comments = TRUE,
+                            pos = "c",
+                            digits = 3,
+                            select,
+                            split = NULL,
+                            ...) {
+  
+  args <- list(x = x,
+               row.names = row.names,
+               col.names = col.names,
+               comments = comments,
+               pos = pos,
+               digits = digits,
+               split = split,
+               ... = ...)
+  if (!missing(select))
+    args$select <- select
+  
+  switch(class(x)[1],
+         data.frame = do.call(".as.latex.table.data.frame", args),
+         DRAC.highlights = do.call(".as.latex.table.data.frame", args),
+         RLum.Results = do.call(".as.latex.table.RLum.Results", args))
+}
+
+################################################################################
+## "Method"                  RLum.Results                                     ##
+##----------------------------------------------------------------------------##
+.as.latex.table.RLum.Results <- function(x, 
+                                         row.names = NULL, 
+                                         col.names = NULL, 
+                                         comments = TRUE,
+                                         pos = "c",
+                                         digits = 3,
+                                         select,
+                                         split = NULL,
+                                         ...) {
+  
+  ## Object: DRAC.highlights
+  if (x at originator == "use_DRAC") {
+    x <- get_RLum(x)$highlights
+    x <- .digits(x, digits)
+    fields.w.error <- seq(4, 25, 2)
+    for(i in fields.w.error)
+      x[ ,i] <- paste0(x[ ,i], "$\\pm{}$", x[ ,i+1])
+    x <- x[-c(fields.w.error + 1)]
+    .as.latex.table(x, comments = comments, pos = pos, split = split, ...)
+  }# EndOf::use_DRAC
+  
+}
+
+################################################################################
+## "Method"                     data.frame                                    ##
+##----------------------------------------------------------------------------##
+.as.latex.table.data.frame <- function(x, 
+                                       row.names = NULL, 
+                                       col.names = NULL, 
+                                       comments = TRUE,
+                                       pos = "c",
+                                       digits = 3,
+                                       select,
+                                       split = NULL,
+                                       ...) {
+  ## Integrity checks ----
+  if (!is.data.frame(x))
+    stop("x must be a data frame", call. = FALSE)
+  if (!is.null(col.names) && length(col.names) != ncol(x))
+    stop("length of col.names does not match the number of columns",
+         call. = FALSE)
+  if (!is.null(row.names) && length(row.names) != nrow(x))
+    stop("length of row.names does not match the number of rows",
+         call. = FALSE)
+  if (length(pos) != 1) 
+    stop("length of pos does not match the number of columns", 
+         call. = FALSE)
+  
+  ## Default settings ----
+  options <- list(verbose = TRUE)
+  
+  ## Override settings ----
+  options <- modifyList(options, list(...))
+  
+  ## Subset data frame ----
+  if (!missing(select)) {
+    is.name <- select %in% names(x)
+    if (any(!is.name))
+      stop("Undefined columns selected. Please check provided column names in 'select'.", 
+           call. = FALSE)
+    x <- subset(x, select = select)
+  }
+  
+  ## Format numeric fields ----
+  x <- .digits(x, digits)
+  
+  ## Split the table
+  if (is.null(split))
+    split <- 1
+  chunks <- ceiling(ncol(x) / split)
+  chunks.start <- seq(1, ncol(x), chunks)
+  chunks.end <- chunks.start + chunks - 1
+  chunks.end[length(chunks.end)] <- ncol(x)
+  
+  tex.table.list <- vector("list", split)
+  
+  for (i in 1:length(tex.table.list)) {
+    
+    x.chunk <- x[ ,chunks.start[i]:chunks.end[i]]
+    
+    if (ncol(x) == 1) {
+      x.chunk <- as.data.frame(x.chunk)
+      colnames(x.chunk) <- names(x[i])
+    }
+      
+    
+    ## Comments ----
+    tex.comment.usePackage <- ifelse(comments,
+                                     "% add usepackage{adjustbox} to latex preamble \n",
+                                     "")
+    
+    ## Header ----
+    col.names <- tex.table.header <- gsub(pattern = " ", 
+                                          x = names(x.chunk), 
+                                          replacement = " \\\\\\\\ ")
+    tex.table.header <- paste0("\t", 
+                               paste("\\multicolumn{1}{p{2cm}}{\\centering", 
+                                     col.names, 
+                                     "}", 
+                                     collapse = " & \n\t"),
+                               "\\\\ \n")
+    
+    ## Rows ----
+    tex.table.rows <- ""
+    for (j in 1:nrow(x.chunk)) {
+      tex.table.rows <- paste0(tex.table.rows, 
+                               paste(paste(x.chunk[j, ], collapse = " & "),
+                                     "\\\\ \n"))
+    }
+    
+    ## Tex table ----
+    if (nchar(pos) != 1 && nchar(pos) != ncol(x))
+      pos <- "c"
+    if (!any(strsplit(pos, split = "")[[1]] %in% c("l", "c", "r")))
+      pos <- "c"
+    if (nchar(pos) == 1)
+      pos <- paste0(rep(pos, ncol(x)), collapse = "")
+    
+    tex.table.begin <- paste0("\\begin{table}[ht] \n",
+                              "  \\centering \n",
+                              "  \\begin{adjustbox}{max width=\\textwidth} \n",
+                              paste("  \\begin{tabular}{", pos, "}\n"),
+                              "     \\hline \n")
+    
+    tex.table.end <-  paste0("     \\hline \n",
+                             "   \\end{tabular} \n",
+                             "   \\end{adjustbox} \n",
+                             "\\end{table}")
+    
+    tex.table <- paste0(tex.comment.usePackage,
+                        tex.table.begin,
+                        tex.table.header,
+                        "\\hline \n",
+                        tex.table.rows,
+                        tex.table.end)
+    
+    if (options$verbose)
+      cat(tex.table)
+    
+    tex.table.list[[i]] <- tex.table
+  }
+  
+  invisible(tex.table.list)
+}
+
+# This function takes a data.frame, checks each column and tries to
+# force the specified amount of digits if numeric or coercable to numeric
+.digits <- function(x, digits) {
+  for (i in 1:ncol(x)) {
+    if (is.factor(x[ ,i]))
+      x[ ,i] <- as.character(x[ ,i])
+    test.numeric <- suppressWarnings(as.numeric(x[ ,i]))
+    if (!is.na(test.numeric[1]))
+      x[ ,i] <- format(test.numeric, nsmall = digits, digits = digits)
+  }
+  return(x)
+}
\ No newline at end of file
diff --git a/R/internals_RLum.R b/R/internals_RLum.R
new file mode 100644
index 0000000..1dbe827
--- /dev/null
+++ b/R/internals_RLum.R
@@ -0,0 +1,45 @@
+####################################################################################################
+##                     INTERNAL HELPER FUNCTIONS                                                  ##
+####################################################################################################
+
+#+++++++++++++++++++++
+#+ .set_pid()        +
+#+++++++++++++++++++++
+
+#' Set unique id of the RLum.Analysis object as parent id for each RLum.Data object in the record list
+#'
+#' This function only applies on RLum.Analysis objects and was written for performance not
+#' usability, means the functions runs without any checks and is for internal usage only.
+#'
+#' @param \code{\linkS4class{RLum.Analysis}} (\bold{required}): input object where the function
+#' should be applied on
+#'
+#' @return
+#' Returns the same object as the input
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @examples
+#'
+#' ##example using self created data
+#' object <- set_RLum(
+#' "RLum.Analysis",
+#' records = list(
+#'  set_RLum("RLum.Data.Curve"),
+#'  set_RLum("RLum.Data.Curve")))
+#'
+#' object <- .set_pid(object)
+#'
+#' @noRd
+.set_pid <- function(object){
+
+  object at records <-
+    lapply(object at records, function(x) {
+      x at .pid  <- object at .uid
+      return(x)
+    })
+
+  return(object)
+}
diff --git a/R/length_RLum.R b/R/length_RLum.R
new file mode 100644
index 0000000..eb0f642
--- /dev/null
+++ b/R/length_RLum.R
@@ -0,0 +1,32 @@
+#' General accessor function for RLum S4 class objects
+#'
+#' Function calls object-specific get functions for RLum S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+#' corresponding get function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+#'
+#' @param object \code{\linkS4class{RLum}} (\bold{required}): S4 object of
+#' class \code{RLum}
+#'
+#' @return Return is the same as input objects as provided in the list.
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#' @seealso
+#' \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}
+#' @keywords utilities
+#'
+#'
+#' @export
+setGeneric("length_RLum", function(object) {
+  standardGeneric("length_RLum")
+})
+
diff --git a/R/merge_RLum.Analysis.R b/R/merge_RLum.Analysis.R
new file mode 100644
index 0000000..8c996f3
--- /dev/null
+++ b/R/merge_RLum.Analysis.R
@@ -0,0 +1,145 @@
+#' Merge function for RLum.Analysis S4 class objects
+#'
+#' Function allows merging of RLum.Analysis objects and adding of allowed
+#' objects to an RLum.Analysis.
+#'
+#' This function simply allowing to merge \code{\linkS4class{RLum.Analysis}}
+#' objects.  Additionally other \code{\linkS4class{RLum}} objects can be added
+#' to an existing \code{\linkS4class{RLum.Analysis}} object. Supported objects
+#' to be added are: \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Spectrum}} and
+#' \code{\linkS4class{RLum.Data.Image}}.\cr
+#'
+#' The order in the new \code{\linkS4class{RLum.Analysis}} object is the object
+#' order provided with the input list.
+#'
+#' @param objects \code{\link{list}} of \code{\linkS4class{RLum.Analysis}}
+#' (\bold{required}): list of S4 objects of class \code{RLum.Analysis}.
+#' Furthermore other objects of class \code{\linkS4class{RLum}} can be added,
+#' see details.
+#'
+#' @return Return an \code{\linkS4class{RLum.Analysis}} object.
+#'
+#' @note The information for the slot 'protocol' is taken from the first
+#' \code{\linkS4class{RLum.Analysis}} object in the input list. Therefore at
+#' least one object of type \code{\linkS4class{RLum.Analysis}} has to be
+#' provided.
+#'
+#' @section Function version: 0.2.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{merge_RLum}}, \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Data.Image}}, \code{\linkS4class{RLum}}
+#'
+#' @references -
+#'
+#' @keywords utilities
+#'
+#' @examples
+#'
+#'
+#' ##merge different RLum objects from the example data
+#' data(ExampleData.RLum.Analysis, envir = environment())
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+#' curve <- get_RLum(object)[[2]]
+#'
+#' temp.merged <- merge_RLum.Analysis(list(curve, IRSAR.RF.Data, IRSAR.RF.Data))
+#'
+#' @export
+merge_RLum.Analysis<- function(
+  objects
+){
+
+  # Ingegrity checks ----------------------------------------------------------------------------
+
+  ##check if object is of class RLum
+  temp.class.test <- sapply(1:length(objects), function(x){
+
+    if(is(objects[[x]], "RLum") == FALSE){
+
+      temp.text <- paste("[merge_RLum.Analysis()]: At least element", x, "is not of class 'RLum' or a derivative class!")
+      stop(temp.text)
+    }
+
+
+
+
+    ##provide class of objects
+    is(objects[[x]])[1]
+
+  })
+
+  ##check if at least one object of RLum.Analysis is provided
+  if(!"RLum.Analysis"%in%temp.class.test){
+
+    stop("[merge_RLum.Analysis()] At least one input object in the list has to be of class
+           'RLum.Analysis'!")
+
+  }
+
+
+
+  # Merge objects -------------------------------------------------------------------------------
+
+  ##(0) get recent environment to later set variable temp.meta.data.first
+  temp.environment  <- environment()
+  temp.meta.data.first <- NA; rm(temp.meta.data.first) #to avoid problems with the R check routine
+
+  ##(1) collect all elements in a list
+  temp.element.list <- unlist(lapply(1:length(objects), function(x){
+
+    ##Depending on the element the right functions is used
+    if(is(objects[[x]])[1] == "RLum.Analysis"){
+
+      ##grep export meta data from the first RLum.Analysis objects an write
+      if(!exists("temp.meta.data.first")){
+
+        assign("temp.meta.data.first", objects[[x]]@protocol, envir = temp.environment)
+
+      }
+
+      ##return to list
+      get_RLum(objects[[x]])
+
+    }else if((is(objects[[x]])[1] == "RLum.Data.Curve") |
+               (is(objects[[x]])[1] == "RLum.Data.Image") |
+               (is(objects[[x]])[1] == "RLum.Data.Spectrum")){
+
+      ##return to list
+      objects[[x]]
+
+    }else{
+
+      stop("[merge_RLum.Anlysis()] What ever was provided, this 'RLum' object is not supported!")
+
+    }
+
+
+  }))
+
+
+  # Build new RLum.Analysis object --------------------------------------------------------------
+  temp.new.RLum.Analysis <- set_RLum(
+    class = "RLum.Analysis",
+    originator = "merge_RLum.Analysis",
+    records = temp.element.list,
+    protocol = temp.meta.data.first,
+    info = unlist(lapply(objects, function(x) {
+      x at info
+    }), recursive = FALSE),
+    .pid = unlist(lapply(objects, function(x) {
+      x at .uid
+    }))
+    )
+
+
+  # Return object -------------------------------------------------------------------------------
+  return( temp.new.RLum.Analysis)
+
+}
diff --git a/R/merge_RLum.Data.Curve.R b/R/merge_RLum.Data.Curve.R
new file mode 100644
index 0000000..b5d1b36
--- /dev/null
+++ b/R/merge_RLum.Data.Curve.R
@@ -0,0 +1,303 @@
+#' Merge function for RLum.Data.Curve S4 class objects
+#'
+#' Function allows merging of RLum.Data.Curve objects in different ways
+#'
+#' This function simply allowing to merge \code{\linkS4class{RLum.Data.Curve}}
+#' objects without touching the objects itself. Merging is always applied on
+#' the 2nd colum of the data matrix of the object.\cr
+#'
+#' \bold{Supported merge operations are
+#' \code{\linkS4class{RLum.Data.Curve}}}\cr
+#'
+#' \code{"sum"}\cr
+#'
+#' All count values will be summed up using the function \code{\link{rowSums}}.
+#'
+#' \code{"mean"}\cr
+#'
+#' The mean over the count values is calculated using the function
+#' \code{\link{rowMeans}}.
+#'
+#' \code{"median"}\cr
+#'
+#' The median over the count values is calculated using the function
+#' \code{\link[matrixStats]{rowMedians}}.
+#'
+#' \code{"sd"}\cr
+#'
+#' The standard deviation over the count values is calculated using the function
+#' \code{\link[matrixStats]{rowSds}}.
+#'
+#' \code{"var"}\cr
+#'
+#' The variance over the count values is calculated using the function
+#' \code{\link[matrixStats]{rowVars}}.
+#'
+#' \code{"min"}\cr
+#'
+#' The min values from the count values is chosen using the function
+#' \code{\link[matrixStats]{rowMins}}.
+#'
+#' \code{"max"}\cr
+#'
+#' The max values from the count values is chosen using the function
+#' \code{\link[matrixStats]{rowMins}}.
+#'
+#' \code{"-"}\cr
+#'
+#' The row sums of the last objects are subtracted from the first object.
+#'
+#' \code{"*"}\cr
+#'
+#' The row sums of the last objects are mutliplied with the first object.
+#'
+#' \code{"/"}\cr
+#'
+#' Values of the first object are divided by row sums of the last objects.
+#'
+#' @param object \code{\link{list}} of \code{\linkS4class{RLum.Data.Curve}}
+#' (\bold{required}): list of S4 objects of class \code{RLum.Curve}.
+#'
+#' @param merge.method \code{\link{character}} (\bold{required}): method for
+#' combining of the objects, e.g.  \code{'mean'}, \code{'sum'}, see details for
+#' further information and allowed methods.  Note: Elements in slot info will
+#' be taken from the first curve in the list.
+#'
+#' @param method.info \code{\link{numeric}} (optional): allows to specify how
+#' info elements of the input objects are combined, e.g. \code{1} means that
+#' just the elements from the first object are kept, \code{2} keeps only the
+#' info elements from the 2 object etc.  If nothing is provided all elements
+#' are combined.
+#'
+#' @return Returns an \code{\linkS4class{RLum.Data.Curve}} object.
+#'
+#' @note The information from the slot 'recordType' is taken from the first
+#' \code{\linkS4class{RLum.Data.Curve}} object in the input list. The slot
+#' 'curveType' is filled with the name \code{merged}.
+#'
+#' @section S3-generic support:
+#'
+#' This function is fully operational via S3-generics:
+#' \code{`+`}, \code{`-`}, \code{`/`}, \code{`*`}, \code{merge}
+#'
+#' @section Function version: 0.2.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{merge_RLum}}, \code{\linkS4class{RLum.Data.Curve}}
+#'
+#' @references -
+#'
+#' @keywords utilities
+#'
+#' @examples
+#'
+#'
+#' ##load example data
+#' data(ExampleData.XSYG, envir = environment())
+#'
+#' ##grep first and 3d TL curves
+#' TL.curves  <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType = "TL (UVVIS)")
+#' TL.curve.1 <- TL.curves[[1]]
+#' TL.curve.3 <- TL.curves[[3]]
+#'
+#' ##plot single curves
+#' plot_RLum(TL.curve.1)
+#' plot_RLum(TL.curve.3)
+#'
+#' ##subtract the 1st curve from the 2nd and plot
+#' TL.curve.merged <- merge_RLum.Data.Curve(list(TL.curve.3, TL.curve.1), merge.method = "/")
+#' plot_RLum(TL.curve.merged)
+#'
+#' @export
+merge_RLum.Data.Curve<- function(
+  object,
+  merge.method = "mean",
+  method.info
+){
+
+  # Ingegrity checks ----------------------------------------------------------------------------
+
+  ##(1) check if object is of class RLum.Data.Curve
+  temp.recordType.test <- sapply(1:length(object), function(x){
+
+    if(is(object[[x]], "RLum.Data.Curve") == FALSE){
+
+      temp.text <- paste(
+        "[merge_RLum.Data.Curve()]: At least object", x, "is not of class 'RLum.Data.Curve'!")
+      stop(temp.text)
+    }
+
+    ##provide class of objects
+    return(object[[x]]@recordType)
+
+  })
+
+  ##(2) Check for similar record types
+  if(length(unique(temp.recordType.test))>1){
+
+    stop.text <- paste0("[merge_RLum.Data.Curve()] only similar record types are supported, you are trying to merge: ", paste0("'",unique(temp.recordType.test),"'", collapse = ", "))
+
+    stop(stop.text)
+  }
+
+
+
+  # Merge objects -------------------------------------------------------------------------------
+
+  ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  ##merge data objects
+  ##problem ... how to handle data with different resoultion or length?
+
+  ##(1) build new data matrix
+
+    ##first find shortest object
+    check.length <- sapply(1:length(object),function(x){
+      nrow(object[[x]]@data)
+    })
+
+  temp.matrix  <- sapply(1:length(object), function(x){
+
+    ##check if the objects are of equal length
+    if (length(unique(check.length)) != 1) {
+      ##but we have to at least check the x-range
+      if (object[[x]]@data[x,1] != object[[1]]@data[x,1]) {
+        stop(
+          "[merge_RLum.Data.Curve()] The objects seem not to have the same channel resolution!"
+        )
+
+      }
+
+      warning("[merge_RLum.Data.Curve()] The number of channels between the curves differes. Resulting curve has the length of shortest curve.")
+
+      ##if this is ok, we cann continue and shorten the rest of the objects
+      return(object[[x]]@data[1:min(check.length),2])
+
+      #stop("[merge_RLum.Data.Curve()] Input objects have to be of similar length.")
+      ##find out which curve is the shortest element
+
+
+    }else{
+      object[[x]]@data[,2]
+
+    }
+
+
+  })
+
+
+  ##(2) apply selected method for merging
+  if(merge.method == "sum"){
+
+    temp.matrix <- rowSums(temp.matrix)
+
+  }else if(merge.method == "mean"){
+
+    temp.matrix <- rowMeans(temp.matrix)
+
+  }else if(merge.method == "median"){
+
+    temp.matrix <- matrixStats::rowMedians(temp.matrix)
+
+  }else if(merge.method == "sd"){
+
+    temp.matrix <- matrixStats::rowSds(temp.matrix)
+
+  }else if(merge.method == "var"){
+
+    temp.matrix <- matrixStats::rowVars(temp.matrix)
+
+  }else if(merge.method == "max"){
+
+    temp.matrix <- matrixStats::rowMaxs(temp.matrix)
+
+  }else if(merge.method == "min"){
+
+    temp.matrix <- matrixStats::rowMins(temp.matrix)
+
+  }else if(merge.method == "-"){
+
+    if(ncol(temp.matrix) > 2){
+      temp.matrix  <- temp.matrix[,1] - rowSums(temp.matrix[,-1])
+    }else{
+      temp.matrix <-  temp.matrix[,1] - temp.matrix[,2]
+    }
+
+
+  }else if(merge.method == "*"){
+
+    if(ncol(temp.matrix) > 2){
+      temp.matrix  <- temp.matrix[,1] * rowSums(temp.matrix[,-1])
+    }else{
+      temp.matrix <-  temp.matrix[,1] * temp.matrix[,2]
+    }
+
+
+  }else if(merge.method == "/"){
+
+    if(ncol(temp.matrix) > 2){
+      temp.matrix  <- temp.matrix[,1] / rowSums(temp.matrix[,-1])
+    }else{
+      temp.matrix <-  temp.matrix[,1] / temp.matrix[,2]
+    }
+
+    ##get index of inf values
+    id.inf <- which(is.infinite(temp.matrix) == TRUE)
+
+    ##replace with 0 and provide warning
+    temp.matrix[id.inf]  <- 0
+
+    warning(paste0(length(id.inf), " 'inf' values have been replaced by 0 in the matrix."))
+
+  }else{
+    stop("[merge_RLum.Data.Curve()] unsupported or unknown merge method!")
+
+  }
+
+  ##add first column
+  temp.matrix <- cbind(object[[1]]@data[1:min(check.length),1], temp.matrix)
+
+
+  ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  ##merge info objects as simple as possible ... just keep them all ... other possiblity
+  ##would be to chose on the the input objects
+
+  ##unlist is needed here, as otherwise i would cause unexpected bevavhiour further using
+  ##the RLum.object
+  if(missing(method.info)){
+
+    temp.info <- unlist(lapply(1:length(object), function(x){
+
+      object[[x]]@info
+
+    }), recursive = FALSE)
+
+  }else{
+
+    temp.info <- object[[method.info]]@info
+
+  }
+
+
+  # Build new RLum.Data.Curve object --------------------------------------------------------------
+
+  temp.new.Data.Curve <- set_RLum(
+    class = "RLum.Data.Curve",
+    originator = "merge_RLum.Data.Curve",
+    recordType = object[[1]]@recordType,
+    curveType =  "merged",
+    data = temp.matrix,
+    info = temp.info,
+    .pid = unlist(lapply(object, function(x) {
+      x at .uid
+    }))
+
+  )
+
+
+  # Return object -------------------------------------------------------------------------------
+
+  return(temp.new.Data.Curve)
+
+}
diff --git a/R/merge_RLum.R b/R/merge_RLum.R
new file mode 100644
index 0000000..2279f9c
--- /dev/null
+++ b/R/merge_RLum.R
@@ -0,0 +1,124 @@
+#' General merge function for RLum S4 class objects
+#'
+#' Function calls object-specific merge functions for RLum S4 class objects.
+#'
+#' The function provides a generalised access point for merge specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+#' corresponding merge function will be selected.  Allowed arguments can be
+#' found in the documentations of each merge function. Empty list elements (\code{NULL}) are
+#' automatically removed from the input \code{list}.
+#'
+#' \tabular{lll}{
+#' \bold{object} \tab \tab \bold{corresponding merge function} \cr
+#'
+#' \code{\linkS4class{RLum.Data.Curve}} \tab : \tab \code{merge_RLum.Data.Curve} \cr
+#' \code{\linkS4class{RLum.Analysis}} \tab : \tab \code{merge_RLum.Analysis} \cr
+#' \code{\linkS4class{RLum.Results}} \tab : \tab \code{merge_RLum.Results}
+#
+#' }
+#'
+#' @param objects \code{\link{list}} of \code{\linkS4class{RLum}}
+#' (\bold{required}): list of S4 object of class \code{RLum}
+#'
+#' @param \dots further arguments that one might want to pass to the specific
+#' merge function
+#'
+#' @return Return is the same as input objects as provided in the list.
+#'
+#' @note So far not for every \code{RLum} object a merging function exists.
+#'
+#' @section Function version: 0.1.2
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Data.Spectrum}}, \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
+#'
+#' @references #
+#'
+#' @keywords utilities
+#'
+#' @examples
+#'
+#'
+#' ##Example based using data and from the calc_CentralDose() function
+#'
+#' ##load example data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ##apply the central dose model 1st time
+#' temp1 <- calc_CentralDose(ExampleData.DeValues$CA1)
+#'
+#' ##apply the central dose model 2nd time
+#' temp2 <- calc_CentralDose(ExampleData.DeValues$CA1)
+#'
+#' ##merge the results and store them in a new object
+#' temp.merged <- get_RLum(merge_RLum(objects = list(temp1, temp2)))
+#'
+#'
+#' @export
+merge_RLum<- function(
+  objects,
+  ...
+){
+
+  # Integrity check ----------------------------------------------------------
+    if(!is.list(objects)){
+      stop("[merge_RLum()] argument 'objects' needs to be of type list!")
+
+    }
+
+    ##we are friendly and remove all empty list elements, this helps a lot if we place things
+    ##we DO NOT provide a warning as this lower the computation speed in particular cases.
+    objects <- objects[!sapply(objects, is.null)]
+
+  ##if list is empty afterwards we do nothing
+   if(length(objects) != 0) {
+      ##check if objects are of class RLum
+      temp.class.test <- unique(sapply(1:length(objects), function(x) {
+        if (!is(objects[[x]], "RLum")) {
+          temp.text <-
+            paste(
+              "[merge_RLum()]: At least element", x, "is not of class 'RLum' or a derivative class!"
+            )
+          stop(temp.text, call. = FALSE)
+        }
+        ##provide class of objects ... so far they should be similar
+        is(objects[[x]])[1]
+      }))
+
+      ##check if objects are consitent
+      if (length(temp.class.test) > 1) {
+        ##This is not valid for RLum.Analysis objects
+        if (!"RLum.Analysis" %in% temp.class.test) {
+          stop("[merge_RLum()] So far only similar input objects in the list are supported!")
+        }
+      }
+
+      ##grep object class
+      objects.class <-
+        ifelse("RLum.Analysis" %in% temp.class.test, "RLum.Analysis", temp.class.test)
+
+      ##select which merge function should be used
+      switch (
+        objects.class,
+        RLum.Data.Image = stop(
+          "[merge_RLum()] Sorry, merging of 'RLum.Data.Image' objects is currently not supported!"
+        ),
+        RLum.Data.Spectrum = stop(
+          "[merge_RLum()] Sorry, merging of 'RLum.Data.Spectrum' objects is currently not supported!"
+        ),
+        RLum.Data.Curve = merge_RLum.Data.Curve(objects, ...),
+        RLum.Analysis = merge_RLum.Analysis(objects, ...),
+        RLum.Results = merge_RLum.Results(objects, ...)
+      )
+
+    }else{
+
+      warning("[merge_RLum()] Nothing was merged as the object list was found to be empty!")
+      return(NULL)
+
+    }
+
+}
diff --git a/R/merge_RLum.Results.R b/R/merge_RLum.Results.R
new file mode 100644
index 0000000..b4ea636
--- /dev/null
+++ b/R/merge_RLum.Results.R
@@ -0,0 +1,132 @@
+#' Merge function for RLum.Results S4-class objects
+#'
+#' Function merges objects of class \code{\linkS4class{RLum.Results}}. The slots in the objects
+#' are combined depending on the object type, e.g., for \code{\link{data.frame}} and \code{\link{matrix}}
+#' rows are appended.
+#'
+#' @note The originator is taken from the first element and not reset to \code{merge_RLum}
+#'
+#' @param objects \code{\link{list}} (required): a list of \code{\linkS4class{RLum.Results}} objects
+#'
+#' @section Function version: 0.2.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @export
+merge_RLum.Results <- function(
+    objects){
+
+            ##-------------------------------------------------------------
+            ##Some integrity checks
+
+            ##check if input object is a list
+            if(!is(objects, "list")){
+
+              stop("[merge_RLum.Results()] 'objects' has to of type 'list'!")
+
+            }else{
+
+              ##check if objects in the list are of type RLum.Results
+              temp.originator <- sapply(1:length(objects), function(x){
+
+                if(is(objects[[x]], "RLum.Results") == FALSE){
+
+                  stop("[merge_RLum.Results()] Objects to merge have
+                       to be of type 'RLum.Results'!")
+
+                }
+
+                objects[[x]]@originator
+
+              })
+              }
+
+            ##check if originator is different
+            if(length(unique(temp.originator))>1){
+
+              stop("[merge_RLum.Results()] 'RLum.Results' object originator
+                   differs!")
+            }
+
+            ##-------------------------------------------------------------
+            ##merge objects depending on the data structure
+
+            for(i in 1:length(objects[[1]]@data)){
+
+              ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+              ##numeric vector or data.frame or matrix
+              if(is(objects[[1]]@data[[i]], "data.frame")||
+                 is(objects[[1]]@data[[i]], "numeric") ||
+                 is(objects[[1]]@data[[i]], "matrix")){
+
+                ##grep elements and combine them into a list
+                temp.list <-
+                  lapply(1:length(objects), function(x) {
+                    objects[[x]]@data[[i]]
+
+                  })
+
+                ##check whetger the objects can be combined by rbind
+                if(length(unique(unlist(lapply(temp.list, FUN = ncol)))) > 1){
+
+                  stop("[merge_RLum.Results()] Objects cannot be combined, number of columns differs.")
+
+                }
+
+                ##combine them using rbind or data.table::rbindList (depends on the data type)
+                if(is(objects[[1]]@data[[i]], "numeric")){
+                  objects[[1]]@data[[i]] <- unlist(temp.list)
+
+                }else if(is(objects[[1]]@data[[i]], "matrix")){
+                  objects[[1]]@data[[i]] <- do.call("rbind", temp.list)
+
+                }else{
+                  objects[[1]]@data[[i]] <- as.data.frame(data.table::rbindlist(temp.list))
+
+                }
+
+
+              }else{
+
+                ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+                ##all other elements
+
+                ##grep elements and write them into a list
+                objects[[1]]@data[[i]] <- lapply(1:length(objects),
+                                                     function(x){
+
+                                                       objects[[x]]@data[[i]]
+
+                                                     })
+
+
+                ##unlist to flatten list if necessary for the elements
+                if(is(objects[[1]]@data[[i]][[1]])[1] == "list"){
+
+                  objects[[1]]@data[[i]] <- unlist(objects[[1]]@data[[i]],
+                                                       recursive = FALSE)
+                }
+              }
+
+
+            }##end loop
+
+            #return by setting a new RLum.Results (for the .uid)
+            #the originator is not reset
+            objects_merged <- set_RLum(
+              class = "RLum.Results",
+              originator = objects[[1]]@originator,
+              data = objects[[1]]@data,
+              info = unlist(lapply(objects, function(x) {
+                x at info
+              }), recursive = FALSE),
+              .pid = unlist(lapply(objects, function(x) {
+                x at .uid
+              }))
+
+            )
+
+            return(objects_merged)
+
+}
diff --git a/R/merge_Risoe.BINfileData.R b/R/merge_Risoe.BINfileData.R
new file mode 100644
index 0000000..d100e45
--- /dev/null
+++ b/R/merge_Risoe.BINfileData.R
@@ -0,0 +1,257 @@
+  #' Merge Risoe.BINfileData objects or Risoe BIN-files
+#'
+#' Function allows merging Risoe BIN/BINX files or Risoe.BINfileData objects.
+#'
+#' The function allows merging different measurements to one file or one
+#' object.\cr The record IDs are recalculated for the new object. Other values
+#' are kept for each object. The number of input objects is not limited. \cr
+#'
+#' \code{position.number.append.gap} option \cr
+#'
+#' If the option \code{keep.position.number = FALSE} is used, the position
+#' numbers of the new data set are recalculated by adding the highest position
+#' number of the previous data set to the each position number of the next data
+#' set. For example: The highest position number is 48, then this number will
+#' be added to all other position numbers of the next data set (e.g. 1 + 48 =
+#' 49)\cr
+#'
+#' However, there might be cases where an additional addend (summand) is needed
+#' before the next position starts. Example: \cr
+#'
+#' Position number set (A): \code{1,3,5,7}\cr Position number set (B):
+#' \code{1,3,5,7} \cr
+#'
+#' With no additional summand the new position numbers would be:
+#' \code{1,3,5,7,8,9,10,11}. That might be unwanted. Using the argument
+#' \code{position.number.append.gap = 1} it will become:
+#' \code{1,3,5,7,9,11,13,15,17}.
+#'
+#' @param input.objects \code{\link{character}} or
+#' \code{\linkS4class{Risoe.BINfileData}} (\bold{required}): Character vector
+#' with path and files names (e.g. \code{input.objects = c("path/file1.bin",
+#' "path/file2.bin")} or \code{\linkS4class{Risoe.BINfileData}} objects (e.g.
+#' \code{input.objects = c(object1, object2)})
+#'
+#'
+#' @param output.file \code{\link{character}} (optional): File output path and
+#' name. \cr If no value is given, a \code{\linkS4class{Risoe.BINfileData}} is
+#' returned instead of a file.
+#'
+#'
+#' @param keep.position.number \code{\link{logical}} (with default): Allows
+#' keeping the original position numbers of the input objects. Otherwise the
+#' position numbers are recalculated.
+#'
+#'
+#' @param position.number.append.gap \code{\link{integer}} (with default): Set
+#' the position number gap between merged BIN-file sets, if the option
+#' \code{keep.position.number = FALSE} is used. See details for further
+#' information.
+#'
+#'
+#' @return Returns a \code{file} or a \code{\linkS4class{Risoe.BINfileData}}
+#' object.
+#'
+#'
+#' @note The validity of the output objects is not further checked.
+#'
+#'
+#' @section Function version: 0.2.5
+#'
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#'
+#' @seealso \code{\linkS4class{Risoe.BINfileData}}, \code{\link{read_BIN2R}},
+#' \code{\link{write_R2BIN}}
+#'
+#'
+#' @references Duller, G., 2007. Analyst.
+#'
+#'
+#' @keywords IO manip
+#'
+#'
+#' @examples
+#'
+#'
+#' ##merge two objects
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' object1 <- CWOSL.SAR.Data
+#' object2 <- CWOSL.SAR.Data
+#'
+#' object.new <- merge_Risoe.BINfileData(c(object1, object2))
+#'
+#'
+#' @export
+merge_Risoe.BINfileData <- function(
+  input.objects,
+  output.file,
+  keep.position.number = FALSE,
+  position.number.append.gap = 0
+){
+
+
+  # Integrity Checks --------------------------------------------------------
+
+  if(length(input.objects) < 2){
+
+    stop("[merge_Risoe.BINfileData()] At least two input objects are needed!")
+
+  }
+
+  if(is(input.objects, "character") == TRUE){
+
+    for(i in 1:length(input.objects)){
+
+      if(file.exists(input.objects[i])==FALSE){
+
+        stop("[merge_Risoe.BINfileData()] File",input.objects[i],"does not exists!")
+
+      }
+
+    }
+
+  }else{
+
+    if(is(input.objects, "list") == TRUE){
+
+      for(i in 1:length(input.objects)){
+
+        if(is(input.objects[[i]], "Risoe.BINfileData") == FALSE){
+
+          stop("[merge_Risoe.BINfileData()] Input list does not contain Risoe.BINfileData objects!")
+
+        }
+
+      }
+
+    }else{
+
+      stop("[merge_Risoe.BINfileData()]
+                Input object is not a 'character' nor a 'list'!")
+
+    }
+
+  }
+
+
+  # Import Files ------------------------------------------------------------
+
+  ##loop over all files to store the results in a list
+  ##or the input is already a list
+
+  if(is(input.objects, "character") == TRUE){
+    temp <- lapply(input.objects, read_BIN2R)
+
+  }else{
+
+    temp <- input.objects
+
+  }
+
+  # Get POSITION values -------------------------------------------------------
+
+  ##grep maximum position value from the first file
+  temp.position.max <- max(temp[[1]]@METADATA[, "POSITION"])
+
+  ##grep all position values except from the first file
+  temp.position.values <- unlist(sapply(2:length(temp), function(x){
+
+    temp <- temp[[x]]@METADATA[, "POSITION"] +
+      temp.position.max +
+      position.number.append.gap
+
+    temp.position.max <<- max(temp)
+
+    return(temp)
+  }))
+
+  temp.position.values <- c(temp[[1]]@METADATA[, "POSITION"], temp.position.values)
+
+
+  # Get overall record length -----------------------------------------------
+  temp.record.length <- sum(sapply(1:length(temp), function(x){
+
+    length(temp[[x]]@METADATA[,"ID"])
+
+  }))
+
+
+  # Merge Files -------------------------------------------------------------
+
+  ##loop for similar input objects
+  for(i in 1:length(input.objects)){
+
+    if(exists("temp.new.METADATA") == FALSE){
+
+      temp.new.METADATA <- temp[[i]]@METADATA
+      temp.new.DATA <- temp[[i]]@DATA
+
+
+      if(inherits(try(temp[[i]]@.RESERVED, silent = TRUE), "try-error")){
+
+        temp.new.RESERVED <- list()
+
+      }else{
+
+        temp.new.RESERVED <- temp[[i]]@.RESERVED
+
+      }
+
+    }else{
+
+      temp.new.METADATA <- rbind(temp.new.METADATA, temp[[i]]@METADATA)
+      temp.new.DATA <- c(temp.new.DATA, temp[[i]]@DATA)
+
+      if(inherits(try(temp[[i]]@.RESERVED, silent = TRUE), "try-error")){
+
+        temp.new.RESERVED <- c(temp.new.RESERVED, list())
+
+      }else{
+
+        temp.new.RESERVED <- c(temp.new.RESERVED, temp[[i]]@.RESERVED)
+
+      }
+
+    }
+  }
+
+
+  ##SET RECORD ID in METADATA
+  temp.new.METADATA$ID <- 1:temp.record.length
+
+  ##SET POSITION VALUES
+  if(keep.position.number == FALSE){
+
+    temp.new.METADATA$POSITION <- temp.position.values
+
+  }
+
+  ##TODO version number?
+  # Produce BIN file object -------------------------------------------------
+
+  temp.new <- set_Risoe.BINfileData(
+    METADATA = temp.new.METADATA,
+    DATA = temp.new.DATA,
+    .RESERVED = temp.new.RESERVED
+
+  )
+
+
+
+  # OUTPUT ------------------------------------------------------------------
+
+  if(missing(output.file) == FALSE){
+
+    write_R2BIN(temp.new, output.file)
+
+  }else{
+
+    return(temp.new)
+
+  }
+
+}
diff --git a/R/methods_DRAC.R b/R/methods_DRAC.R
new file mode 100644
index 0000000..0c0d260
--- /dev/null
+++ b/R/methods_DRAC.R
@@ -0,0 +1,248 @@
+##################################################################################
+##                      METHODS FOR S3 GENERICS                                 ##
+##################################################################################
+
+## ---------------------------------------------------------------------------##
+## DATA FRAME COERCION METHOD
+
+## This is a method for the as.data.frame S3 generic. We need this to intercept the
+## DRAC list object after it hast passed the actual list-method. After it was 
+## coerced to a data.frame we assign new column names (DRAC ID keys) and 
+## make sure that all columns are either of class 'character' or 'numeric'.
+## Finally, we attach a further class name to identify it as a valid DRAC object 
+## when passed to use_DRAC
+
+#' @export
+as.data.frame.DRAC.list <- function(x, row.names = NULL, optional = FALSE, ...) {
+  DF <- as.data.frame.list(x)
+  colnames(DF) <- paste0("TI:", 1:ncol(DF))
+  for (i in 1:ncol(DF)) {
+    if (is.factor(DF[ ,i])) 
+      DF[ ,i] <- as.character(DF[, i])
+  }
+  class(DF) <- c("data.frame", "DRAC.data.frame")
+  return(DF)
+}
+
+
+## ---------------------------------------------------------------------------##
+## PRINT METHOD
+
+#' @export
+print.DRAC.highlights <- function(x, ...) {
+  x <- as.list(x)
+  names <- names(x)
+  mapply(function(el, name) { 
+    cat(paste0(attributes(el)$key, " = ", name,":\n  ", paste(el, collapse = ",\n  "), "\n"))
+    }, x, names)
+}
+
+#' @export
+print.DRAC.list <- function(x, blueprint = FALSE, ...) {
+  
+  ## CASE 1: Pretty print the structure of the DRAC list
+  if (!blueprint) {
+    limit <- 80
+    
+    for (i in 1:length(x)) {
+      # for pretty printing we insert newlines and tabs at specified lengths
+      ls <- attributes(x[[i]])$description
+      ls.n <- nchar(ls)
+      ls.block <- floor(ls.n / limit)
+      strStarts <- seq(0, ls.n, limit)
+      strEnds <- seq(limit-1, ls.n + limit, limit)
+      blockString <- paste(mapply(function(start, end) { 
+        trimmedString <- paste(substr(ls, start, end), "\n\t\t\t")
+        if (substr(trimmedString, 1, 1) == " ")
+          trimmedString <- gsub("^[ ]*", "", trimmedString)
+        return(trimmedString)
+      }, strStarts, strEnds), collapse="")
+      
+      msg <- paste(attributes(x[[i]])$key, "=>",names(x)[i], "\n",
+                   "\t VALUES =", paste(x[[i]], collapse = ", "), "\n",
+                   "\t ALLOWS 'X' = ", attributes(x[[i]])$allowsX, "\n",
+                   "\t REQUIRED =", attributes(x[[i]])$required, "\n",
+                   "\t DESCRIPTION = ", blockString, "\n"
+      )
+      if (!is.null(levels(x[[i]]))) {
+        msg <- paste(msg,
+                     "\t OPTIONS = ", paste(levels(x[[i]]), collapse = ", "),
+                     "\n\n")
+      } else {
+        msg <- paste(msg, "\n")
+      }
+      cat(msg)
+    }
+  }
+  
+  ## CASE 2: Return a 'blueprint' that can be copied from the console to a
+  ## script so the user does not need to write down all >50 fields by hand
+  if (blueprint) {
+    var <- as.list(sys.call())[[2]]
+    names <- names(x)
+    
+    for (i in 1:length(x)) {
+      
+      # in case of factors also show available levels as comments so you don't
+      # have to look it up
+      if (is.factor(x[[i]]))
+        options <- paste("# OPTIONS:", paste(levels(x[[i]]), collapse = ", "))
+      else
+        options <- ""
+      
+      # determine if values need brackets (strings)
+      if (is.numeric(x[[i]]) | is.integer(x[[i]]))
+        values <- paste(x[[i]], collapse = ", ")
+      if (is.character(x[[i]]) | is.factor(x[[i]]))
+        values <- paste0("'", paste0(x[[i]], collapse = "', '"), "'")
+      
+      cat(paste0(var, "$`", names[i], "` <- c(", values,") ", options ,"\n"))
+    }
+    message("\n\t You can copy all lines above to your script and fill in the data.")
+  }
+}
+
+
+## ---------------------------------------------------------------------------##
+## DOUBLE SQUARE BRACKETS METHOD
+
+#' @export
+`[[<-.DRAC.list` <- function(x, i, value) {
+  
+  
+  ## REJECT ALL INADEQUATE CLASSES ----
+  acceptedClasses <- c("integer", "character", "numeric", "factor")
+  if (is.na(match(class(value), acceptedClasses))) {
+    warning(paste("I cannot use objects of class", class(value)), 
+            call. = FALSE)
+    return(x)
+  }
+  
+  ## CHECK INPUT LENGTH ----
+  length.old <- length(x[[i]])
+  length.new <- length(value)
+  
+  if (length.old != length.new) {
+    warning(paste(names(x)[i], ": Input must be of length", length.old), 
+            call. = FALSE)
+    return(x)
+  }
+  
+  ## CHECK INPUT CLASS ----
+  class.old <- class(x[[i]])
+  class.new <- class(value)
+  
+  ## CHECK INPUT FIELDS THAT ALLOW 'X' -----
+  # the following checks apply to fields that are normally numeric, but also 
+  # accept 'X' as input. this EXCLUDES factors!
+  if (class.old != "factor") {
+    # some input fields allow 'X' as input, so in terms of R can be of class
+    # "character" or "numeric/integer". hence, we check if input is "X" and 
+    # if the filed allows it. If so, we change the old class to "character".
+    if (any(value == "X") && attributes(x[[i]])$allowsX) {
+      
+      if (any(is.na(as.numeric(value[which(value != "X")])))) {
+        warning(paste("Cannot coerce <", value[which(value != "X")], "> to a numeric value.",
+                      "Input must be numeric or 'X'."), 
+                call. = FALSE)
+        return(x)
+      }
+      class.old <- "character" 
+    }
+    
+    # where the input field is alreay "X" we have to check whether the new
+    # non-character input is allowed
+    if (any(x[[i]] == "X") && attributes(x[[i]])$allowsX) {
+      if (any(is.na(as.numeric(value[which(value != "X")])))) {
+        warning(paste("Cannot coerce <", value[which(value != "X")], "> to a numeric value.",
+                      "Input must be numeric or 'X'. \n"), 
+                call. = FALSE)
+        return(x)
+      }
+      class.new <- "character"
+      value <- as.character(value)
+    }
+    
+    # when a numeric input field was inserted an "X" it was coerced to class
+    # character. since we are now allowed to insert any character (later tests)
+    # we need to make sure that the new input can be coerced to class numeric.
+    # and if the new values are numeric, we coerce them to character
+    if (attributes(x[[i]])$allowsX && class.old == "character") {
+      if (any(is.na(as.numeric(value[which(value != "X")])))) {
+        warning(paste("Cannot coerce <", value[which(value != "X")], "> to a numeric value.",
+                      "Input must be numeric or 'X'. \n"), 
+                call. = FALSE)
+        return(x)
+      } 
+      class.new <- "character"
+      value <- as.character(value)
+    }
+  }
+  
+  
+  # numeric input can be both of class 'integer' or 'numeric'. We will
+  # allow any combination and reject only non-numeric/integer input
+  if (class.old == "numeric" || class.old == "integer") {
+    if (class.new != "numeric" && class.new != "integer") {
+      warning(paste(names(x)[i], ": Input must be of class", class.old),
+              call. = FALSE)
+      return(x)
+    }
+  }
+  
+  # for 'factor' and 'character' elements only 'character' input is allowed 
+  if (class.old == "factor" || class.old == "character") {
+    if (class.new != "character") {
+      warning(paste(names(x)[i], ": Input must be of class", "character"),
+              call. = FALSE)
+      return(x)
+    }
+  }
+  
+  ## CHECK IF VALID OPTION ----
+  # in case of 'factor's the user is only allowed input that matches one of 
+  # the options specified by the factor levels. if it is a valid option,
+  # the input is converted to a factor to keep the information.
+  if (class.old == "factor") {
+    levels <- levels(x[[i]])
+    if (any(`%in%`(value, levels) == FALSE)) {
+      warning(paste(names(x)[i], ": Invalid option. Valid options are:", paste(levels, collapse = ", ")),
+              call. = FALSE)
+      return(x)
+    } else {
+      value <- factor(value, levels)
+    }
+  }
+  
+  ## WRITE NEW VALUES ----
+  # we strip our custom class and the attributes, pass the object to the default generic and 
+  # finally re-attach our class and attributes
+  tmp.attributes <- attributes(x[[i]])[names(attributes(x[[i]])) != "class"]
+  class(x) <- "list"
+  x <- `[[<-`(x, i, value)
+  attributes(x[[i]]) <- tmp.attributes
+  if (class.old == "factor")
+    class(x[[i]]) <- "factor"
+  class(x) <- c("DRAC.list", "list")
+  return(x)
+}
+
+## ---------------------------------------------------------------------------##
+## SINGLE SQUARE BRACKET METHOD
+
+#' @export
+`[<-.DRAC.list` <- function(x, i, value) {
+  return(`[[<-`(x, i, value))
+}
+
+## ---------------------------------------------------------------------------##
+## DOLLAR SIGN METHOD
+
+#' @export
+`$<-.DRAC.list`<- function(x, name, value) {
+  # this is straightforward; retrieve the index and pass the object
+  # to the custom [[<- function, which does the data verification
+  index <- which(names(x) == name)
+  x[[index]] <- value
+  return(x)
+}
\ No newline at end of file
diff --git a/R/methods_RLum.R b/R/methods_RLum.R
new file mode 100644
index 0000000..448c00d
--- /dev/null
+++ b/R/methods_RLum.R
@@ -0,0 +1,500 @@
+##################################################################################
+##                      METHODS FOR S3 GENERICS                                 ##
+##################################################################################
+
+##CAUTION NOTE:
+##(1) Please DO NOT access to the S4 objects by using the slots this causes inconsistent
+## behaviour, please use the correspong RLum-methods instead!
+##
+##(2) Especially, please DO NOT include S3-methods for which no S4-method is implemented! Especially
+##for coercing.
+##
+##(3) Finally, what ever you want to implemnt, check whether a S4-method exists, it should
+##be just passed to this methods, not the opposite, otherwise this will yield in undesired behaviour.
+##
+##TODO: For this S3 generics so far no proper documentation exists ... we should consider
+##to provide an overview within a separat document, as it becomes otherwise rather
+##complicated for beginners to work with the documentation.
+##
+
+
+## -------------------- INTRODUCED WITH 0.5.0 ----------------------- ##
+
+
+#' methods_RLum
+#'
+#' Methods for S3-generics implemented for the package 'Luminescence'.
+#' This document summarises all implemented S3-generics. The name of the function
+#' is given before the first dot, after the dot the name of the object that is supported by this method
+#' is given, e.g. \code{plot.RLum.Data.Curve} can be called by \code{plot(object, ...)}, where
+#' \code{object} is the \code{RLum.Data.Curve} object.
+#'
+#' The term S3-generics sounds complicated, however, it just means that something has been implemented
+#' in the package to increase the usability for users new in R and who are not familiar with the
+#' underlying \code{RLum}-object structure of the package. The practical outcome is that
+#' operations and functions presented in standard books on R can be used without knowing the specifica
+#' of the R package 'Luminescence'. For examples see the example section.
+#'
+#' @param x \code{\linkS4class{RLum}} or \code{\linkS4class{Risoe.BINfileData}} (\bold{required}): input opject
+#'
+#' @param object \code{\linkS4class{RLum}} (\bold{required}): input opject
+#'
+#' @param y \code{\link{integer}} (optional): the row index of the matrix, data.frame
+#'
+#' @param z \code{\link{integer}} (optional): the column index of the matrix, data.frame
+#'
+#' @param i \code{\link{character}} (optional): name of the wanted record type or data object
+#'
+#' @param drop \code{\link{logical}} (with default): keep object structure or drop it
+#'
+#' @param subset \code{[subset]} \code{\link{expression}} (\bold{required}): logical expression indicating elements or rows to keep,
+#' this function works in \code{\linkS4class{Risoe.BINfileData}} objects like \code{\link{subset.data.frame}}, but takes care
+#' of the object structure
+#'
+#' @param row.names \code{\link{logical}} (with default): enables or disables row names (\code{as.data.frame})
+#'
+#' @param recursive \code{\link{logical}} (with default): enables or disables further subsetting (\code{unlist})
+#'
+#' @param optional \code{\link{logical}} (with default): logical. If TRUE, setting row names and
+#' converting column names (to syntactic names: see make.names) is optional (see \code{\link[base]{as.data.frame}})
+#'
+#' @param ... further arguments that can be passed to the method
+#'
+#' @note \code{methods_RLum} are not really new functions, everything given here are mostly just
+#' surrogates for existing functions in the package.
+#'
+#' @examples
+#'
+#' ##load example data
+#' data(ExampleData.RLum.Analysis, envir = environment())
+#'
+#' @name methods_RLum
+NULL
+
+####################################################################################################
+# methods for generic: plot()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @method plot list
+#' @export
+plot.list <- function(x, y, ...) {
+  if (all(sapply(x, function(x) inherits(x, "RLum")))) {
+    plot_RLum(object = x, ...)
+  }
+  else {
+    if (missing(y))
+      y <- NULL
+    plot.default(x, y, ...)
+  }
+}
+
+
+#' @rdname methods_RLum
+#' @method plot RLum.Results
+#' @export
+plot.RLum.Results <- function(x, y, ...) plot_RLum(object = x, ...)
+
+#' @rdname methods_RLum
+#' @method plot RLum.Analysis
+#' @export
+plot.RLum.Analysis <- function(x, y, ...) plot_RLum(object = x, ...)
+
+#' @rdname methods_RLum
+#' @method plot RLum.Data.Curve
+#' @export
+plot.RLum.Data.Curve <- function(x, y, ...) plot_RLum(object = x, ...)
+
+#' @rdname methods_RLum
+#' @method plot RLum.Data.Spectrum
+#' @export
+plot.RLum.Data.Spectrum <- function(x, y, ...) plot_RLum(object = x, ...)
+
+#' @rdname methods_RLum
+#' @method plot RLum.Data.Image
+#' @export
+plot.RLum.Data.Image <- function(x, y, ...) plot_RLum(object = x, ...)
+
+#' @rdname methods_RLum
+#' @method plot Risoe.BINfileData
+#' @export
+plot.Risoe.BINfileData <- function(x, y, ...) plot_Risoe.BINfileData(BINfileData = x, ...)
+
+####################################################################################################
+# methods for generic: hist()
+# ##################################################################################################
+
+#' @rdname methods_RLum
+#' @export
+hist.RLum.Results <- function(x, ...) plot_Histogram(data = x, ...)
+
+#' @rdname methods_RLum
+#' @export
+hist.RLum.Data.Image <- function(x, ...) hist(x =get_RLum(x)@data at values, ...)
+
+#' @rdname methods_RLum
+#' @export
+hist.RLum.Data.Curve <- function(x, ...) hist(as(get_RLum(x),"matrix")[,2])
+
+#' @rdname methods_RLum
+#' @export
+hist.RLum.Analysis <- function(x, ...) lapply(1:length_RLum(x), function(z){
+  hist(as(get_RLum(x, record.id = z, ...),"matrix")[,2])})
+
+####################################################################################################
+# methods for generic: summary()
+# ##################################################################################################
+# methods for generic: summary()
+#' @rdname methods_RLum
+#' @method summary RLum.Results
+#' @export
+summary.RLum.Results <- function(object, ...) get_RLum(object = object, ...)
+
+#' @rdname methods_RLum
+#' @method summary RLum.Analysis
+#' @export
+summary.RLum.Analysis <- function(object, ...) lapply(object at records, function(x) summary(x at data))
+
+#' @rdname methods_RLum
+#' @method summary RLum.Data.Image
+#' @export
+summary.RLum.Data.Image <- function(object, ...) summary(object at data@data at values)
+
+# summary.RLum.Data.Spectrum <- function(object, ...)
+
+#' @rdname methods_RLum
+#' @method summary RLum.Data.Curve
+#' @export
+summary.RLum.Data.Curve <- function(object, ...) summary(object at data, ...)
+
+####################################################################################################
+# methods for generic: subset()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @method subset Risoe.BINfileData
+#' @param records.rm [subset] \code{\link{logical}} (with default): remove records from data set, can
+#' be disabled, to just set the column \code{SET} to \code{TRUE} or \code{FALSE}
+#' @export
+subset.Risoe.BINfileData <- function(x, subset, records.rm = TRUE, ...) {
+
+  if(length(list(...))){
+    warning(paste("Argument not supported and skipped:", names(list(...))))
+
+  }
+
+  ##select relevant rows
+  sel <- eval(
+    expr = substitute(subset),
+    envir = x at METADATA,
+    enclos = parent.frame()
+  )
+
+  ##probably everything is FALSE now?
+  if (records.rm) {
+    if (any(sel)) {
+      x at METADATA <- x at METADATA[sel, ]
+      x at DATA <- x at DATA[sel]
+      x at METADATA[["ID"]] <- 1:length(x at METADATA[["ID"]])
+      return(x)
+
+    } else{
+      return(NULL)
+
+    }
+  }else{
+    x at METADATA[["SEL"]] <- sel
+    return(x)
+
+  }
+
+}
+
+####################################################################################################
+# methods for generic: bin()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+bin.RLum.Data.Curve <- function(x, ...) bin_RLum.Data(x)
+
+####################################################################################################
+# methods for generic: length()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+length.RLum.Results <- function(x, ...) length_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+length.RLum.Analysis <- function(x, ...) length_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+length.RLum.Data.Curve <- function(x, ...) length_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+length.Risoe.BINfileData <- function(x, ...) length(x at METADATA$ID)
+
+####################################################################################################
+# methods for generic: dim()
+# ##################################################################################################
+# methods for generic: dim()
+#' @rdname methods_RLum
+#' @export
+dim.RLum.Data.Curve <- function(x) dim(as(x, "matrix"))
+
+#' @rdname methods_RLum
+#' @export
+dim.RLum.Data.Spectrum <- function(x) dim(as(x, "matrix"))
+
+####################################################################################################
+# methods for generic: rep()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+rep.RLum <- function(x, ...) replicate_RLum(x, ...)
+
+####################################################################################################
+# methods for generic: name()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+names.RLum.Data.Curve <- function(x, ...) names_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+names.RLum.Data.Spectrum <- function(x, ...) names_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+names.RLum.Data.Image <- function(x, ...) names_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+names.RLum.Analysis <- function(x, ...) names_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+names.RLum.Results <- function(x, ...) names_RLum(x)
+
+#' @rdname methods_RLum
+#' @export
+names.Risoe.BINfileData <- function(x)  as.character(x at METADATA$LTYPE)
+
+####################################################################################################
+# methods for generic: row.name()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+row.names.RLum.Data.Spectrum <- function(x, ...) rownames(as(x, "matrix"))
+
+####################################################################################################
+# methods for generic: as.data.frame()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+as.data.frame.RLum.Data.Curve <- function(x, row.names = NULL, optional = FALSE, ...) as(x, "data.frame")
+
+#' @rdname methods_RLum
+#' @export
+as.data.frame.RLum.Data.Spectrum <- function(x,  row.names = NULL, optional = FALSE, ...) as(x, "data.frame")
+# for RLum.Results ... makes no sense and may yield in unpredictable behaviour
+
+####################################################################################################
+# methods for generic: as.list()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+as.list.RLum.Results <- function(x, ...) as(x, "list")
+
+#' @rdname methods_RLum
+#' @export
+as.list.RLum.Data.Curve <- function(x, ...) as(x, "list")
+
+#' @rdname methods_RLum
+#' @export
+as.list.RLum.Analysis <- function(x, ...) as(x, "list")
+
+####################################################################################################
+# methods for generic: as.matrix()
+# ##################################################################################################
+#' @rdname methods_RLum
+#' @export
+as.matrix.RLum.Data.Curve <- function(x, ...) as(x, "matrix")
+
+#' @rdname methods_RLum
+#' @export
+as.matrix.RLum.Data.Spectrum <- function(x, ...) as(x, "matrix")
+# for RLum.Results ... makes no sense and may yield in unpredictable behaviour
+
+####################################################################################################
+# methods for generic: is()
+####################################################################################################
+#For this function no S4 method was written, as this would come at the cost of performance and
+#is totally unnecessary
+
+#' @rdname methods_RLum
+#' @export
+is.RLum <- function(x, ...) is(x, "RLum")
+
+#' @rdname methods_RLum
+#' @export
+is.RLum.Data <- function(x, ...) is(x, "RLum.Data")
+
+#' @rdname methods_RLum
+#' @export
+is.RLum.Data.Curve <- function(x, ...) is(x, "RLum.Data.Curve")
+
+#' @rdname methods_RLum
+#' @export
+is.RLum.Data.Spectrum <- function(x, ...) is(x, "RLum.Data.Spectrum")
+
+#' @rdname methods_RLum
+#' @export
+is.RLum.Data.Image <- function(x, ...) is(x, "RLum.Data.Image")
+
+#' @rdname methods_RLum
+#' @export
+is.RLum.Analysis <- function(x, ...) is(x, "RLum.Analysis")
+
+#' @rdname methods_RLum
+#' @export
+is.RLum.Results <- function(x, ...) is(x, "RLum.Results")
+
+####################################################################################################
+# methods for generic: merge()
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+merge.RLum <- function(x, y, ...) merge_RLum(append(list(...), values = c(x, y)))
+
+####################################################################################################
+# methods for generic: unlist()
+####################################################################################################
+#' @rdname methods_RLum
+#' @method unlist RLum.Analysis
+#' @export
+unlist.RLum.Analysis <- function(x, recursive = TRUE, ...){
+
+  temp <- get_RLum(object = x, recursive = recursive, ... )
+  if(recursive){
+    unlist(lapply(1:length(temp), function(x){
+      get_RLum(temp)
+    }), recursive = FALSE)
+
+  }else{
+    return(temp)
+
+  }
+
+}
+
+####################################################################################################
+# methods for generic: `+`
+####################################################################################################
+#' @rdname methods_RLum
+#'
+#' @examples
+#'
+#' ##combine curve is various ways
+#' curve1 <- IRSAR.RF.Data[[1]]
+#' curve2 <-  IRSAR.RF.Data[[1]]
+#' curve1 + curve2
+#' curve1 - curve2
+#' curve1 / curve2
+#' curve1 * curve2
+#'
+#' @export
+`+.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "sum")
+
+####################################################################################################
+# methods for generic: `-`
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+`-.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "-")
+
+####################################################################################################
+# methods for generic: `*`
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+`*.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "*")
+
+####################################################################################################
+# methods for generic: `/`
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+`/.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "/")
+
+####################################################################################################
+# methods for generic: `[`
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+`[.RLum.Data.Curve` <- function(x,y,z, drop = TRUE) {as(x, "matrix")[y,z, drop = drop]}
+
+#' @rdname methods_RLum
+#' @export
+`[.RLum.Data.Spectrum` <- function(x,y,z, drop = TRUE) {as(x, "matrix")[y,z, drop = drop]}
+
+#' @rdname methods_RLum
+#' @export
+`[.RLum.Data.Image` <- function(x,y,z, drop = TRUE) {as(x, "matrix")[y,z, drop = drop]}
+
+#' @rdname methods_RLum
+#' @export
+`[.RLum.Analysis` <- function(x, i, drop = FALSE) {
+  if (is(i, "character")) {
+    get_RLum(x, recordType = i, drop = drop)
+
+  } else{
+    get_RLum(x, record.id = i, drop = drop)
+
+  }
+}
+
+#' @rdname methods_RLum
+#' @export
+`[.RLum.Results` <- function(x, i, drop = TRUE) {get_RLum(x, data.object = i, drop = drop)}
+
+
+####################################################################################################
+# methods for generic: `[[`
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+`[[.RLum.Analysis` <- function(x, i) {
+  if (is(i, "character")) {
+    get_RLum(x, recordType = i)
+
+  } else{
+    get_RLum(x, record.id = i)
+
+  }
+}
+
+#' @rdname methods_RLum
+#' @export
+`[[.RLum.Results` <- function(x, i) {get_RLum(x, data.object = i)}
+
+####################################################################################################
+# methods for generic: `$`
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+`$.RLum.Data.Curve` <- function(x, i) {get_RLum(x, info.object = i)}
+
+#' @rdname methods_RLum
+#'
+#' @examples
+#'
+#' ##`$` access curves
+#' IRSAR.RF.Data$RF
+#'
+#' @export
+`$.RLum.Analysis` <- function(x, i) {get_RLum(x, recordType = i)}
+
+#' @rdname methods_RLum
+#' @export
+`$.RLum.Results` <- function(x, i) {get_RLum(x, data.object = i)}
diff --git a/R/model_LuminescenceSignals.R b/R/model_LuminescenceSignals.R
new file mode 100644
index 0000000..7738967
--- /dev/null
+++ b/R/model_LuminescenceSignals.R
@@ -0,0 +1,41 @@
+#' Model Luminescence Signals (wrapper)
+#'
+#' Wrapper for the function \code{\link[RLumModel]{model_LuminescenceSignals}} from the package
+#' \link[RLumModel]{RLumModel-package}. For the further details and examples please
+#' see the manual of this package.
+#'
+#' @inheritParams RLumModel::model_LuminescenceSignals
+#'
+#' @author Johannes Friedrich, University of Bayreuth (Germany),\cr
+#' Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaige (France), \cr
+#'
+#'
+#' @section Function version: 0.1.0
+#'
+#' @export
+model_LuminescenceSignals <-
+  function(model,
+           sequence,
+           lab.dose_rate = 1,
+           simulate_sample_history = FALSE,
+           plot = TRUE,
+           verbose = TRUE,
+           show.structure = FALSE,
+           ...) {
+    
+    if (!requireNamespace("RLumModel", quietly = TRUE))
+      stop("Simulation of luminescence signals requires the 'RLumModel' package.",
+           " To install this package run 'install.packages('RLumModel')' in your R console.", 
+           call. = FALSE)
+    
+    RLumModel::model_LuminescenceSignals (
+      model = model,
+      sequence = sequence,
+      lab.dose_rate = lab.dose_rate,
+      simulate_sample_history = simulate_sample_history ,
+      plot = plot,
+      verbose = verbose,
+      show.structure = show.structure,
+      ...
+    )
+  }
diff --git a/R/names_RLum.R b/R/names_RLum.R
new file mode 100644
index 0000000..75f45f8
--- /dev/null
+++ b/R/names_RLum.R
@@ -0,0 +1,28 @@
+#' S4-names function for RLum S4 class objects
+#'
+#' Function calls object-specific names functions for RLum S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+#' corresponding 'names' function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+#'
+#' @param object \code{\linkS4class{RLum}} (\bold{required}): S4 object of
+#' class \code{RLum}
+#' @return Returns a \code{\link{character}}
+#' @section Function version: 0.1.0
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#' @seealso
+#' \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}
+#' @keywords utilities
+#' @aliases names_RLum
+#'
+#' @export
+setGeneric("names_RLum", function(object) {
+  standardGeneric("names_RLum")
+})
diff --git a/R/plot_AbanicoPlot.R b/R/plot_AbanicoPlot.R
new file mode 100644
index 0000000..7a0730d
--- /dev/null
+++ b/R/plot_AbanicoPlot.R
@@ -0,0 +1,3691 @@
+#' Function to create an Abanico Plot.
+#'
+#' A plot is produced which allows comprehensive presentation of data precision
+#' and its dispersion around a central value as well as illustration of a
+#' kernel density estimate, histogram and/or dot plot of the dose values.
+#'
+#' The Abanico Plot is a combination of the classic Radial Plot
+#' (\code{plot_RadialPlot}) and a kernel density estimate plot (e.g
+#' \code{plot_KDE}). It allows straightforward visualisation of data precision,
+#' error scatter around a user-defined central value and the combined
+#' distribution of the values, on the actual scale of the measured data (e.g.
+#' seconds, equivalent dose, years). The principle of the plot is shown in
+#' Galbraith & Green (1990). The function authors are thankful for the
+#' thoughtprovocing figure in this article. \cr The semi circle (z-axis) of the
+#' classic Radial Plot is bent to a straight line here, which actually is the
+#' basis for combining this polar (radial) part of the plot with any other
+#' cartesian visualisation method (KDE, histogram, PDF and so on). Note that
+#' the plot allows dispaying two measures of distribution. One is the 2-sigma
+#' bar, which illustrates the spread in value errors, and the other is the
+#' polygon, which stretches over both parts of the Abanico Plot (polar and
+#' cartesian) and illustrates the actual spread in the values themselfes. \cr
+#' Since the 2-sigma-bar is a polygon, it can be (and is) filled with shaded
+#' lines. To change density (lines per inch, default is 15) and angle (default
+#' is 45 degrees) of the shading lines, specify these parameters. See
+#' \code{?polygon()} for further help. \cr The Abanico Plot supports other than
+#' the weighted mean as measure of centrality. When it is obvious that the data
+#' is not (log-)normally distributed, the mean (weighted or not) cannot be a
+#' valid measure of centrality and hence central dose. Accordingly, the median
+#' and the weighted median can be chosen as well to represent a proper measure
+#' of centrality (e.g. \code{centrality = "median.weighted"}). Also
+#' user-defined numeric values (e.g. from the central age model) can be used if
+#' this appears appropriate. \cr The proportion of the polar part and the
+#' cartesian part of the Abanico Plot can be modfied for display reasons
+#' (\code{plot.ratio = 0.75}). By default, the polar part spreads over 75 \%
+#' and leaves 25 \% for the part that shows the KDE graph.\cr\cr
+#' A statistic summary, i.e. a collection of statistic measures of
+#' centrality and dispersion (and further measures) can be added by specifying
+#' one or more of the following keywords:
+#'
+#' \itemize{
+#' \item \code{"n"} (number of samples)
+#' \item \code{"mean"} (mean De value)
+#' \item \code{"median"} (median of the De values)
+#' \item \code{"sd.rel"} (relative standard deviation in percent)
+#' \item \code{"sd.abs"} (absolute standard deviation)
+#' \item \code{"se.rel"} (relative standard error)
+#' \item \code{"se.abs"} (absolute standard error)
+#' \item \code{"in.2s"} (percent of samples in 2-sigma range)
+#' \item \code{"kurtosis"} (kurtosis)
+#' \item \code{"skewness"} (skewness)
+#' }
+#'
+#' Note that the input data for the statistic summary is sent to the function
+#' \code{calc_Statistics()} depending on the log-option for the z-scale. If
+#' \code{"log.z = TRUE"}, the summary is based on the logarithms of the input
+#' data. If \code{"log.z = FALSE"} the linearly scaled data is used. \cr
+#' Note as well, that \code{"calc_Statistics()"} calculates these statistic
+#' measures in three different ways: \code{unweighted}, \code{weighted} and
+#' \code{MCM-based} (i.e., based on Monte Carlo Methods). By default, the
+#' MCM-based version is used. If you wish to use another method, indicate this
+#' with the appropriate keyword using the argument \code{summary.method}.\cr\cr
+#'
+#' The optional parameter \code{layout} allows to modify the entire plot more
+#' sophisticated. Each element of the plot can be addressed and its properties
+#' can be defined. This includes font type, size and decoration, colours and
+#' sizes of all plot items. To infer the definition of a specific layout style
+#' cf. \code{get_Layout()} or type eg. for the layout type \code{"journal"}
+#' \code{get_Layout("journal")}. A layout type can be modified by the user by
+#' assigning new values to the list object.\cr\cr It is possible for the
+#' z-scale to specify where ticks are to be drawn by using the parameter
+#' \code{at}, e.g. \code{at = seq(80, 200, 20)}, cf. function documentation of
+#' \code{axis}. Specifying tick positions manually overrides a
+#' \code{zlim}-definition.
+#'
+#' @param data \code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+#' object (required): for \code{data.frame} two columns: De (\code{data[,1]})
+#' and De error (\code{data[,2]}). To plot several data sets in one plot the
+#' data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.
+#'
+#' @param na.rm \code{\link{logical}} (with default): exclude NA values
+#' from the data set prior to any further operations.
+#'
+#' @param log.z \code{\link{logical}} (with default): Option to display the
+#' z-axis in logarithmic scale. Default is \code{TRUE}.
+#'
+#' @param z.0 \code{\link{character}} or \code{\link{numeric}}: User-defined
+#' central value, used for centering of data. One out of \code{"mean"},
+#' \code{"mean.weighted"} and \code{"median"} or a numeric value (not its
+#' logarithm). Default is \code{"mean.weighted"}.
+#'
+#' @param dispersion \code{\link{character}} (with default): measure of
+#' dispersion, used for drawing the scatter polygon. One out of \code{"qr"}
+#' (quartile range), \code{"pnn"} (symmetric percentile range with nn the lower
+#' percentile, e.g. \code{"p05"} depicting the range between 5 and 95 %),
+#' \code{"sd"} (standard deviation) and \code{"2sd"} (2 standard deviations),
+#' default is \code{"qr"}. Note that \code{"sd"} and \code{"2sd"} are only
+#' meaningful in combination with \code{"z.0 = 'mean'"} because the unweighted
+#' mean is used to center the polygon.
+#'
+#' @param plot.ratio \code{\link{numeric}}: Relative space, given to the radial
+#' versus the cartesian plot part, deault is \code{0.75}.
+#'
+#' @param rotate \code{\link{logical}}: Option to turn the plot by 90 degrees.
+#'
+#' @param mtext \code{\link{character}}: additional text below the plot title.
+#'
+#' @param summary \code{\link{character}} (optional): add statistic measures of
+#' centrality and dispersion to the plot. Can be one or more of several
+#' keywords. See details for available keywords. Results differ depending on
+#' the log-option for the z-scale (see details).
+#'
+#' @param summary.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+#' specified to place the summary below the plot header. However, this latter
+#' option in only possible if \code{mtext} is not used.
+#'
+#' @param summary.method \code{\link{character}} (with default): keyword
+#' indicating the method used to calculate the statistic summary. One out of
+#' \code{"unweighted"}, \code{"weighted"} and \code{"MCM"}. See
+#' \code{\link{calc_Statistics}} for details.
+#'
+#' @param legend \code{\link{character}} vector (optional): legend content to
+#' be added to the plot.
+#'
+#' @param legend.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the legend to be plotted.
+#'
+#' @param stats \code{\link{character}}: additional labels of statistically
+#' important values in the plot. One or more out of the following:
+#' \code{"min"}, \code{"max"}, \code{"median"}.
+#'
+#' @param rug \code{\link{logical}}: Option to add a rug to the KDE part, to
+#' indicate the location of individual values.
+#'
+#' @param kde \code{\link{logical}}: Option to add a KDE plot to the dispersion
+#' part, default is \code{TRUE}.
+#'
+#' @param hist \code{\link{logical}}: Option to add a histogram to the
+#' dispersion part. Only meaningful when not more than one data set is plotted.
+#'
+#' @param dots \code{\link{logical}}: Option to add a dot plot to the
+#' dispersion part. If number of dots exceeds space in the dispersion part, a
+#' square indicates this.
+#'
+#' @param boxplot \code{\link{logical}}: Option to add a boxplot to the
+#' dispersion part, default is \code{FALSE}.
+#'
+#' @param y.axis \code{\link{logical}}: Option to hide y-axis labels. Useful
+#' for data with small scatter.
+#'
+#' @param error.bars \code{\link{logical}}: Option to show De-errors as error
+#' bars on De-points. Useful in combination with \code{y.axis = FALSE, bar.col
+#' = "none"}.
+#'
+#' @param bar \code{\link{numeric}} (with default): option to add one or more
+#' dispersion bars (i.e., bar showing the 2-sigma range) centered at the
+#' defined values. By default a bar is drawn according to \code{"z.0"}. To omit
+#' the bar set \code{"bar = FALSE"}.
+#'
+#' @param bar.col \code{\link{character}} or \code{\link{numeric}} (with
+#' default): colour of the dispersion bar. Default is \code{"grey60"}.
+#'
+#' @param polygon.col \code{\link{character}} or \code{\link{numeric}} (with
+#' default): colour of the polygon showing the data scatter. Sometimes this
+#' polygon may be omitted for clarity. To disable it use \code{FALSE} or
+#' \code{polygon = FALSE}. Default is \code{"grey80"}.
+#'
+#' @param line \code{\link{numeric}}: numeric values of the additional lines to
+#' be added.
+#'
+#' @param line.col \code{\link{character}} or \code{\link{numeric}}: colour of
+#' the additional lines.
+#'
+#' @param line.lty \code{\link{integer}}: line type of additional lines
+#'
+#' @param line.label \code{\link{character}}: labels for the additional lines.
+#'
+#' @param grid.col \code{\link{character}} or \code{\link{numeric}} (with
+#' default): colour of the grid lines (originating at [0,0] and strechting to
+#' the z-scale). To disable grid lines use \code{FALSE}. Default is
+#' \code{"grey"}.
+#'
+#' @param frame \code{\link{numeric}} (with default): option to modify the
+#' plot frame type. Can be one out of \code{0} (no frame), \code{1} (frame
+#' originates at 0,0 and runs along min/max isochrons), \code{2} (frame
+#' embraces the 2-sigma bar), \code{3} (frame embraces the entire plot as a
+#' rectangle).Default is \code{1}.
+#'
+#' @param bw \code{\link{character}} (with default): bin-width for KDE, choose
+#' a numeric value for manual setting.
+#'
+#' @param output \code{\link{logical}}: Optional output of numerical plot
+#' parameters. These can be useful to reproduce similar plots. Default is
+#' \code{FALSE}.
+#'
+#' @param interactive \code{\link{logical}} (with default): create an interactive
+#' abanico plot (requires the 'plotly' package)
+#'
+#' @param \dots Further plot arguments to pass. \code{xlab} must be a vector of
+#' length 2, specifying the upper and lower x-axes labels.
+#'
+#' @return returns a plot object and, optionally, a list with plot calculus
+#' data.
+#'
+#' @section Function version: 0.1.10
+#'
+#' @author Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+#' IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Inspired by a plot
+#' introduced by Galbraith & Green (1990)
+#'
+#' @seealso \code{\link{plot_RadialPlot}}, \code{\link{plot_KDE}},
+#' \code{\link{plot_Histogram}}
+#'
+#' @references Galbraith, R. & Green, P., 1990. Estimating the component ages
+#' in a finite mixture. International Journal of Radiation Applications and
+#' Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3),
+#' 197-206.
+#'
+#' Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2015.
+#' The abanico plot: visualising chronometric data with individual standard errors.
+#' Quaternary Geochronology. doi:10.1016/j.quageo.2015.09.003
+#'
+#' @examples
+#'
+#' ## load example data and recalculate to Gray
+#' data(ExampleData.DeValues, envir = environment())
+#' ExampleData.DeValues <- ExampleData.DeValues$CA1
+#'
+#' ## plot the example data straightforward
+#' plot_AbanicoPlot(data = ExampleData.DeValues)
+#'
+#' ## now with linear z-scale
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  log.z = FALSE)
+#'
+#' ## now with output of the plot parameters
+#' plot1 <- plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                           output = TRUE)
+#' str(plot1)
+#' plot1$zlim
+#'
+#' ## now with adjusted z-scale limits
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  zlim = c(10, 200))
+#'
+#' ## now with adjusted x-scale limits
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  xlim = c(0, 20))
+#'
+#' ## now with rug to indicate individual values in KDE part
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  rug = TRUE)
+#'
+#' ## now with a smaller bandwidth for the KDE plot
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  bw = 0.04)
+#'
+#' ## now with a histogram instead of the KDE plot
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  hist = TRUE,
+#'                  kde = FALSE)
+#'
+#' ## now with a KDE plot and histogram with manual number of bins
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  hist = TRUE,
+#'                  breaks = 20)
+#'
+#' ## now with a KDE plot and a dot plot
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  dots = TRUE)
+#'
+#' ## now with user-defined plot ratio
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  plot.ratio = 0.5)
+
+#' ## now with user-defined central value
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  z.0 = 70)
+#'
+#' ## now with median as central value
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  z.0 = "median")
+#'
+#' ## now with the 17-83 percentile range as definition of scatter
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  z.0 = "median",
+#'                  dispersion = "p17")
+#'
+#' ## now with user-defined green line for minimum age model
+#' CAM <- calc_CentralDose(ExampleData.DeValues,
+#'                         plot = FALSE)
+#'
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  line = CAM,
+#'                  line.col = "darkgreen",
+#'                  line.label = "CAM")
+#'
+#' ## now create plot with legend, colour, different points and smaller scale
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  legend = "Sample 1",
+#'                  col = "tomato4",
+#'                  bar.col = "peachpuff",
+#'                  pch = "R",
+#'                  cex = 0.8)
+#'
+#' ## now without 2-sigma bar, polygon, grid lines and central value line
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  bar.col = FALSE,
+#'                  polygon.col = FALSE,
+#'                  grid.col = FALSE,
+#'                  y.axis = FALSE,
+#'                  lwd = 0)
+#'
+#' ## now with direct display of De errors, without 2-sigma bar
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  bar.col = FALSE,
+#'                  ylab = "",
+#'                  y.axis = FALSE,
+#'                  error.bars = TRUE)
+#'
+#' ## now with user-defined axes labels
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  xlab = c("Data error (%)",
+#'                           "Data precision"),
+#'                  ylab = "Scatter",
+#'                  zlab = "Equivalent dose [Gy]")
+#'
+#' ## now with minimum, maximum and median value indicated
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  stats = c("min", "max", "median"))
+#'
+#' ## now with a brief statistical summary as subheader
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  summary = c("n", "in.2s"))
+#'
+#' ## now with another statistical summary
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  summary = c("mean.weighted", "median"),
+#'                  summary.pos = "topleft")
+#'
+#' ## now a plot with two 2-sigma bars for one data set
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  bar = c(30, 100))
+#'
+#' ## now the data set is split into sub-groups, one is manipulated
+#' data.1 <- ExampleData.DeValues[1:30,]
+#' data.2 <- ExampleData.DeValues[31:62,] * 1.3
+#'
+#' ## now a common dataset is created from the two subgroups
+#' data.3 <- list(data.1, data.2)
+#'
+#' ## now the two data sets are plotted in one plot
+#' plot_AbanicoPlot(data = data.3)
+#'
+#' ## now with some graphical modification
+#' plot_AbanicoPlot(data = data.3,
+#'                  z.0 = "median",
+#'                  col = c("steelblue4", "orange4"),
+#'                  bar.col = c("steelblue3", "orange3"),
+#'                  polygon.col = c("steelblue1", "orange1"),
+#'                  pch = c(2, 6),
+#'                  angle = c(30, 50),
+#'                  summary = c("n", "in.2s", "median"))
+#'
+#' ## create Abanico plot with predefined layout definition
+#' plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                  layout = "journal")
+#'
+#' ## now with predefined layout definition and further modifications
+#' plot_AbanicoPlot(data = data.3,
+#'                  z.0 = "median",
+#'                  layout = "journal",
+#'                  col = c("steelblue4", "orange4"),
+#'                  bar.col = adjustcolor(c("steelblue3", "orange3"),
+#'                                        alpha.f = 0.5),
+#'                  polygon.col = c("steelblue3", "orange3"))
+#'
+#' ## for further information on layout definitions see documentation
+#' ## of function get_Layout()
+#'
+#' ## now with manually added plot content
+#' ## create empty plot with numeric output
+#' AP <- plot_AbanicoPlot(data = ExampleData.DeValues,
+#'                        pch = NA,
+#'                        output = TRUE)
+#'
+#' ## identify data in 2 sigma range
+#' in_2sigma <- AP$data[[1]]$data.in.2s
+#'
+#' ## restore function-internal plot parameters
+#' par(AP$par)
+#'
+#' ## add points inside 2-sigma range
+#' points(x = AP$data[[1]]$precision[in_2sigma],
+#'        y = AP$data[[1]]$std.estimate.plot[in_2sigma],
+#'        pch = 16)
+#'
+#' ## add points outside 2-sigma range
+#' points(x = AP$data[[1]]$precision[!in_2sigma],
+#'        y = AP$data[[1]]$std.estimate.plot[!in_2sigma],
+#'        pch = 1)
+#'
+#' @export
+plot_AbanicoPlot <- function(
+  data,
+  na.rm = TRUE,
+  log.z = TRUE,
+  z.0 = "mean.weighted",
+  dispersion = "qr",
+  plot.ratio = 0.75,
+  rotate = FALSE,
+  mtext,
+  summary,
+  summary.pos,
+  summary.method = "MCM",
+  legend,
+  legend.pos,
+  stats,
+  rug = FALSE,
+  kde = TRUE,
+  hist = FALSE,
+  dots = FALSE,
+  boxplot = FALSE,
+  y.axis = TRUE,
+  error.bars = FALSE,
+  bar,
+  bar.col,
+  polygon.col,
+  line,
+  line.col,
+  line.lty,
+  line.label,
+  grid.col,
+  frame = 1,
+  bw = "SJ",
+  output = FALSE,
+  interactive = FALSE,
+  ...
+) {
+  ## check data and parameter consistency--------------------------------------
+
+  ## Homogenise input data format
+  if(is(data, "list") == FALSE) {
+    data <- list(data)
+  }
+
+  ## Check input data
+  for(i in 1:length(data)) {
+    if(is(data[[i]], "RLum.Results") == FALSE &
+       is(data[[i]], "data.frame") == FALSE) {
+      stop(paste("[plot_AbanicoPlot()] Input data format is neither",
+                 "'data.frame' nor 'RLum.Results'"))
+    } else {
+      if(is(data[[i]], "RLum.Results") == TRUE) {
+        data[[i]] <- get_RLum(data[[i]], "data")[,c(1:2)]
+      }
+    }
+  }
+
+  ## Check input data
+  for(i in 1:length(data)) {
+    if(is(data[[i]], "RLum.Results") == FALSE &
+       is(data[[i]], "data.frame") == FALSE) {
+      stop(paste("[plot_AbanicoPlot()] Input data format is neither",
+                 "'data.frame' nor 'RLum.Results'"))
+    } else {
+      if(is(data[[i]], "RLum.Results") == TRUE) {
+        data[[i]] <- get_RLum(data[[i]])[,c(1:2)]
+      }
+    }
+  }
+
+  ## optionally, remove NA-values
+  if(na.rm == TRUE) {
+    for(i in 1:length(data)) {
+
+      n.NA <- sum(!complete.cases(data[[i]]))
+
+      if(n.NA == 1) {message(paste0("[plot_AbanicoPlot()] data set (",
+                                    i, "): 1 NA value excluded."))
+      } else if(n.NA > 1) {
+        message(paste0("[plot_AbanicoPlot()] data set (", i,"): ",
+                       n.NA, " NA values excluded."))
+      }
+
+      data[[i]] <- na.exclude(data[[i]])
+    }
+  }
+
+  ##AFTER NA removal, we should check the data set carefully again ...
+  ##(1)
+  ##check if there is still data left in the entire set
+  if(all(sapply(data, nrow) == 0)){
+
+    try(stop("[plot_AbanicoPlot()] Nothing plotted, your data set is empty!", call. = FALSE))
+    return(NULL)
+
+  }
+  ##(2)
+  ##check for sets with only 1 row or 0 rows at all
+  else if(any(sapply(data, nrow) <= 1)){
+
+    ##select problematic sets and remove the entries from the list
+    NArm.id <- which(sapply(data, nrow) <= 1)
+    data[NArm.id] <- NULL
+
+    warning(paste0("[plot_AbanicoPlot()] Data sets ",
+                   paste(NArm.id, collapse = ", "),
+                   " are found to be empty or consisting of only 1 row. Sets removed!"))
+
+    rm(NArm.id)
+
+    ##unfortunately, the data set might become now empty at all
+    if(length(data) == 0){
+      try(stop("[plot_AbanicoPlot()] After removing invalid entries, nothing is plotted!", call. = FALSE))
+      return(NULL)
+
+    }
+
+  }
+
+  ## check for zero-error values
+  for(i in 1:length(data)) {
+
+    if(length(data[[i]]) < 2) {
+      stop("Data without errors cannot be displayed!")
+    }
+
+    if(sum(data[[i]][,2] == 0) > 0) {
+      data[[i]] <- data[[i]][data[[i]][,2] > 0,]
+
+      if(nrow(data[[i]]) < 1) {
+        stop("[plot_AbanicoPlot()] Data set contains only values with zero errors.", call. = FALSE)
+      }
+
+      warning("[plot_AbanicoPlot()] values with zero errors cannot be displayed and were removed!",call. = FALSE)
+    }
+  }
+
+  ## save original plot parameters and restore them upon end or stop
+  par.old.full <- par(no.readonly = TRUE)
+  cex_old <- par()$cex
+
+  ## this ensures par() is respected for several plots on one page
+  if(sum(par()$mfrow) == 2 & sum(par()$mfcol) == 2){
+    on.exit(par(par.old.full))
+  }
+
+  ## check/set layout definitions
+  if("layout" %in% names(list(...))) {
+    layout = get_Layout(layout = list(...)$layout)
+  } else {
+    layout <- get_Layout(layout = "default")
+  }
+
+  if(missing(stats) == TRUE) {
+    stats <- numeric(0)
+  }
+
+  if(missing(bar) == TRUE) {
+    bar <- rep(TRUE, length(data))
+  }
+
+  if(missing(bar.col) == TRUE) {
+    bar.fill <- rep(x = rep(x = layout$abanico$colour$bar.fill,
+                            length.out = length(data)), length(bar))
+    bar.line <- rep(rep(layout$abanico$colour$bar.line,
+                        length.out = length(data)), length(bar))
+  } else {
+    bar.fill <- bar.col
+    bar.line <- NA
+  }
+
+  if(missing(polygon.col) == TRUE) {
+    polygon.fill <- rep(layout$abanico$colour$poly.fill,
+                        length.out = length(data))
+    polygon.line <- rep(layout$abanico$colour$poly.line,
+                        length.out = length(data))
+  } else {
+    polygon.fill <- polygon.col
+    polygon.line <- NA
+  }
+
+  if(missing(grid.col) == TRUE) {
+    grid.major <- layout$abanico$colour$grid.major
+    grid.minor <- layout$abanico$colour$grid.minor
+  } else {
+    if(length(grid.col) == 1) {
+      grid.major <- grid.col[1]
+      grid.minor <- grid.col[1]
+    } else {
+      grid.major <- grid.col[1]
+      grid.minor <- grid.col[2]
+    }
+  }
+
+  if(missing(summary) == TRUE) {
+    summary <- c("n", "in.2s")
+  }
+
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- "sub"
+  }
+
+  if(missing(mtext) == TRUE) {
+    mtext <- ""
+  }
+
+  ## create preliminary global data set
+  De.global <- data[[1]][,1]
+  if(length(data) > 1) {
+    for(i in 2:length(data)) {
+      De.global <- c(De.global, data[[i]][,1])
+    }
+  }
+
+  ## calculate major preliminary tick values and tick difference
+  extraArgs <- list(...)
+  if("zlim" %in% names(extraArgs)) {
+    limits.z <- extraArgs$zlim
+  } else {
+    z.span <- (mean(De.global) * 0.5) / (sd(De.global) * 100)
+    z.span <- ifelse(z.span > 1, 0.9, z.span)
+    limits.z <- c((ifelse(min(De.global) <= 0, 1.1, 0.9) - z.span) *
+                    min(De.global),
+                  (1.1 + z.span) * max(De.global))
+  }
+
+  if("at" %in% names(extraArgs)) {
+    ticks <- extraArgs$at
+  } else {
+    ticks <- round(pretty(limits.z, n = 5), 3)
+  }
+
+  if("breaks" %in% names(extraArgs)) {
+    breaks <- extraArgs$breaks
+  } else {
+    breaks <- "Sturges"
+  }
+
+  ## check/set bw-parameter
+  for(i in 1:length(data)) {
+    bw.test <- try(density(x = data[[i]][,1],
+                           bw = bw),
+                   silent = TRUE)
+    if(grepl(pattern = "Error", x = bw.test[1]) == TRUE) {
+      bw <- "nrd0"
+      warning("[plot_AbanicoPlot()] Option for bw not possible. Set to nrd0!", call. = FALSE)
+    }
+  }
+
+  if ("fun" %in% names(extraArgs)) {
+    fun <- list(...)$fun
+
+  } else {
+    fun <- FALSE
+  }
+
+  ## check for negative values, stoppp function, but do not stop
+  if(min(De.global) < 0) {
+    message("\n [plot_AbanicoPlot()] data contains negative values. Nothing plotted!")
+    return(NULL)
+  }
+
+  ##check for 0 dose values and adjust for plotting ...
+  if((min(De.global) == 0) && log.z == TRUE){
+    warning("\n [plot_AbanicoPlot()] data contains 0 values, values positively shifted by 0.01",
+            call. = FALSE)
+    data <- lapply(1:length(data), function(x){
+      df <- data.frame(
+        data[[x]][,1] + 0.01, data[[x]][,2])
+      colnames(df) <- colnames(data)
+      return(df)
+
+    })
+
+  }
+
+  ## calculate and append statistical measures --------------------------------
+
+  ## z-values based on log-option
+  z <- lapply(1:length(data), function(x){
+    if(log.z == TRUE) {
+      log(data[[x]][,1])
+    } else {
+      data[[x]][,1]
+    }
+  })
+  if(is(z, "list") == FALSE) {
+    z <- list(z)
+  }
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], z[[x]])
+  })
+  rm(z)
+
+  ## calculate dispersion based on log-option
+  se <- lapply(1:length(data), function(x){
+    if(log.z == TRUE) {
+      data[[x]][,2] / data[[x]][,1]
+    } else {
+      data[[x]][,2]
+    }
+  })
+  if(is(se, "list") == FALSE) {
+    se <- list(se)
+  }
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], se[[x]])
+  })
+  rm(se)
+
+  ## calculate initial data statistics
+  stats.init <- list(NA)
+  for(i in 1:length(data)) {
+    stats.init[[length(stats.init) + 1]] <- calc_Statistics(data = data[[i]][,3:4])
+  }
+  stats.init[[1]] <- NULL
+
+  ## calculate central values
+  if(z.0 == "mean") {
+    z.central <- lapply(1:length(data), function(x){
+      rep(stats.init[[x]]$unweighted$mean,
+          length(data[[x]][,3]))})
+  } else if(z.0 == "median") {
+    z.central <- lapply(1:length(data), function(x){
+      rep(stats.init[[x]]$unweighted$median,
+          length(data[[x]][,3]))})
+  } else  if(z.0 == "mean.weighted") {
+    z.central <- lapply(1:length(data), function(x){
+      rep(stats.init[[x]]$weighted$mean,
+          length(data[[x]][,3]))})
+  } else if(is.numeric(z.0) == TRUE) {
+    z.central <- lapply(1:length(data), function(x){
+      rep(ifelse(log.z == TRUE,
+                 log(z.0),
+                 z.0),
+          length(data[[x]][,3]))})
+  } else {
+    stop("Value for z.0 not supported!")
+  }
+
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], z.central[[x]])})
+  rm(z.central)
+
+  ## calculate precision
+  precision <- lapply(1:length(data), function(x){
+    1 / data[[x]][,4]})
+  if(is(precision, "list") == FALSE) {precision <- list(precision)}
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], precision[[x]])})
+  rm(precision)
+
+  ## calculate standardised estimate
+  std.estimate <- lapply(1:length(data), function(x){
+    (data[[x]][,3] - data[[x]][,5]) / data[[x]][,4]})
+  if(is(std.estimate, "list") == FALSE) {std.estimate <- list(std.estimate)}
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], std.estimate[[x]])})
+
+  ## append empty standard estimate for plotting
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], std.estimate[[x]])})
+  rm(std.estimate)
+
+  ## append optional weights for KDE curve
+  if("weights" %in% names(extraArgs)) {
+    if(extraArgs$weights == TRUE) {
+      wgt <- lapply(1:length(data), function(x){
+        (1 / data[[x]][,2]) / sum(1 / data[[x]][,2]^2)
+      })
+
+      if(is(wgt, "list") == FALSE) {
+        wgt <- list(wgt)
+      }
+
+      data <- lapply(1:length(data), function(x) {
+        cbind(data[[x]], wgt[[x]])})
+
+      rm(wgt)
+    } else {
+      wgt <- lapply(1:length(data), function(x){
+        rep(x = 1, times = nrow(data[[x]])) /
+          sum(rep(x = 1, times = nrow(data[[x]])))
+      })
+
+      if(is(wgt, "list") == FALSE) {
+        wgt <- list(wgt)
+      }
+
+      data <- lapply(1:length(data), function(x) {
+        cbind(data[[x]], wgt[[x]])})
+
+      rm(wgt)
+    }
+  } else {
+    wgt <- lapply(1:length(data), function(x){
+      rep(x = 1, times = nrow(data[[x]])) /
+        sum(rep(x = 1, times = nrow(data[[x]])))
+    })
+
+    if(is(wgt, "list") == FALSE) {
+      wgt <- list(wgt)
+    }
+
+    data <- lapply(1:length(data), function(x) {
+      cbind(data[[x]], wgt[[x]])})
+
+    rm(wgt)
+  }
+
+  ## generate global data set
+  data.global <- cbind(data[[1]],
+                       rep(x = 1, times = nrow(data[[1]])))
+  colnames(data.global) <- rep("", 10)
+
+  if(length(data) > 1) {
+    for(i in 2:length(data)) {
+      data.add <- cbind(data[[i]],
+                        rep(x = i, times = nrow(data[[i]])))
+      colnames(data.add) <- rep("", 10)
+      data.global <- rbind(data.global,
+                           data.add)
+    }
+  }
+
+  ## create column names
+  colnames(data.global) <- c("De",
+                             "error",
+                             "z",
+                             "se",
+                             "z.central",
+                             "precision",
+                             "std.estimate",
+                             "std.estimate.plot",
+                             "weights",
+                             "data set")
+
+  ## calculate global data statistics
+  stats.global <- calc_Statistics(data = data.global[,3:4])
+
+  ## calculate global central value
+  if(z.0 == "mean") {
+    z.central.global <- stats.global$unweighted$mean
+  } else if(z.0 == "median") {
+    z.central.global <- stats.global$unweighted$median
+  } else  if(z.0 == "mean.weighted") {
+    z.central.global <- stats.global$weighted$mean
+  } else if(is.numeric(z.0) == TRUE) {
+    z.central.global <- ifelse(log.z == TRUE,
+                               log(z.0),
+                               z.0)
+  } else {
+    stop("Value for z.0 not supported!")
+  }
+
+  ## create column names
+  for(i in 1:length(data)) {
+    colnames(data[[i]]) <- c("De",
+                             "error",
+                             "z",
+                             "se",
+                             "z.central",
+                             "precision",
+                             "std.estimate",
+                             "std.estimate.plot",
+                             "weights")
+  }
+
+  ## re-calculate standardised estimate for plotting
+  for(i in 1:length(data)) {
+    data[[i]][,8] <- (data[[i]][,3] - z.central.global) / data[[i]][,4]
+  }
+
+  data.global.plot <- data[[1]][,8]
+  if(length(data) > 1) {
+    for(i in 2:length(data)) {
+      data.global.plot <- c(data.global.plot, data[[i]][,8])
+    }
+  }
+  data.global[,8] <- data.global.plot
+
+  ## print message for too small scatter
+  if(max(abs(1 / data.global[6])) < 0.02) {
+    small.sigma <- TRUE
+    message("[plot_AbanicoPlot()] Attention, small standardised estimate scatter. Toggle off y.axis?")
+  }
+
+  ## read out additional arguments---------------------------------------------
+  extraArgs <- list(...)
+
+  main <- if("main" %in% names(extraArgs)) {
+    extraArgs$main
+  } else {
+    expression(paste(D[e], " distribution"))
+  }
+
+  sub <- if("sub" %in% names(extraArgs)) {
+    extraArgs$sub
+  } else {
+    ""
+  }
+
+  if("xlab" %in% names(extraArgs)) {
+    if(length(extraArgs$xlab) != 2) {
+      if (length(extraArgs$xlab) == 3) {
+        xlab <- c(extraArgs$xlab[1:2], "Density")
+      } else {
+        stop("Argmuent xlab is not of length 2!")
+      }
+    } else {xlab <- c(extraArgs$xlab, "Density")}
+  } else {
+    xlab <- c(if(log.z == TRUE) {
+      "Relative standard error (%)"
+    } else {
+      "Standard error"
+    },
+    "Precision",
+    "Density")
+  }
+
+  ylab <- if("ylab" %in% names(extraArgs)) {
+    extraArgs$ylab
+  } else {
+    "Standardised estimate"
+  }
+
+  zlab <- if("zlab" %in% names(extraArgs)) {
+    extraArgs$zlab
+  } else {
+    expression(paste(D[e], " [Gy]"))
+  }
+
+  if("zlim" %in% names(extraArgs)) {
+    limits.z <- extraArgs$zlim
+  } else {
+    z.span <- (mean(data.global[,1]) * 0.5) / (sd(data.global[,1]) * 100)
+    z.span <- ifelse(z.span > 1, 0.9, z.span)
+    limits.z <- c((0.9 - z.span) * min(data.global[[1]]),
+                  (1.1 + z.span) * max(data.global[[1]]))
+  }
+
+  if("xlim" %in% names(extraArgs)) {
+    limits.x <- extraArgs$xlim
+  } else {
+    limits.x <- c(0, max(data.global[,6]) * 1.05)
+  }
+
+  if(limits.x[1] != 0) {
+    limits.x[1] <- 0
+    warning("Lower x-axis limit not set to zero, issue corrected!")
+  }
+
+  if("ylim" %in% names(extraArgs)) {
+    limits.y <- extraArgs$ylim
+  } else {
+    y.span <- (mean(data.global[,1]) * 10) / (sd(data.global[,1]) * 100)
+    y.span <- ifelse(y.span > 1, 0.98, y.span)
+    limits.y <- c(-(1 + y.span) * max(abs(data.global[,7])),
+                  (1 + y.span) * max(abs(data.global[,7])))
+  }
+
+  cex <- if("cex" %in% names(extraArgs)) {
+    extraArgs$cex
+  } else {
+    1
+  }
+
+  lty <- if("lty" %in% names(extraArgs)) {
+    extraArgs$lty
+  } else {
+    rep(rep(2, length(data)), length(bar))
+  }
+
+  lwd <- if("lwd" %in% names(extraArgs)) {
+    extraArgs$lwd
+  } else {
+    rep(rep(1, length(data)), length(bar))
+  }
+
+  pch <- if("pch" %in% names(extraArgs)) {
+    extraArgs$pch
+  } else {
+    rep(20, length(data))
+  }
+
+  if("col" %in% names(extraArgs)) {
+    bar.col <- extraArgs$col
+    kde.line <- extraArgs$col
+    kde.fill <- NA
+    value.dot <- extraArgs$col
+    value.bar <- extraArgs$col
+    value.rug <- extraArgs$col
+    summary.col <- extraArgs$col
+    centrality.col <- extraArgs$col
+  } else {
+    if(length(layout$abanico$colour$bar) == 1) {
+      bar.col <- 1:length(data)
+    } else {
+      bar.col <- layout$abanico$colour$bar.col
+    }
+
+    if(length(layout$abanico$colour$kde.line) == 1) {
+      kde.line <- 1:length(data)
+    } else {
+      kde.line <- layout$abanico$colour$kde.line
+    }
+
+    if(length(layout$abanico$colour$kde.fill) == 1) {
+      kde.fill <- rep(layout$abanico$colour$kde.fill, length(data))
+    } else {
+      kde.fill <- layout$abanico$colour$kde.fill
+    }
+
+    if(length(layout$abanico$colour$value.dot) == 1) {
+      value.dot <- 1:length(data)
+    } else {
+      value.dot <- layout$abanico$colour$value.dot
+    }
+
+    if(length(layout$abanico$colour$value.bar) == 1) {
+      value.bar <- 1:length(data)
+    } else {
+      value.bar <- layout$abanico$colour$value.bar
+    }
+
+    if(length(layout$abanico$colour$value.rug) == 1) {
+      value.rug <- 1:length(data)
+    } else {
+      value.rug <- layout$abanico$colour$value.rug
+    }
+
+    if(length(layout$abanico$colour$summary) == 1) {
+      summary.col <- 1:length(data)
+    } else {
+      summary.col <- layout$abanico$colour$summary
+    }
+
+    if(length(layout$abanico$colour$centrality) == 1) {
+      centrality.col <- rep(x = 1:length(data), times = length(bar))
+    } else {
+      centrality.col <- rep(x = layout$abanico$colour$centrality,
+                            times = length(bar))
+    }
+  }
+
+  ## update central line colour
+  centrality.col <- rep(centrality.col, length(bar))
+
+  tck <- if("tck" %in% names(extraArgs)) {
+    extraArgs$tck
+  } else {
+    NA
+  }
+
+  tcl <- if("tcl" %in% names(extraArgs)) {
+    extraArgs$tcl
+  } else {
+    -0.5
+  }
+
+  ## define auxiliary plot parameters -----------------------------------------
+
+  ## set space between z-axis and baseline of cartesian part
+  if(boxplot == TRUE) {
+
+    lostintranslation <- 1.03
+  } else {
+
+    lostintranslation <- 1.03
+    plot.ratio <- plot.ratio * 1.05
+  }
+
+
+  ## create empty plot to update plot parameters
+  if(rotate == FALSE) {
+    plot(NA,
+         xlim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)),
+         ylim = limits.y,
+         main = "",
+         sub = "",
+         xlab = "",
+         ylab = "",
+         xaxs = "i",
+         yaxs = "i",
+         frame.plot = FALSE,
+         axes = FALSE)
+  } else {
+    plot(NA,
+         xlim = limits.y,
+         ylim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)),
+         main = "",
+         sub = "",
+         xlab = "",
+         ylab = "",
+         xaxs = "i",
+         yaxs = "i",
+         frame.plot = FALSE,
+         axes = FALSE)
+  }
+
+  ## calculate conversion factor for plot coordinates
+  f <- 0
+
+  ## calculate major and minor z-tick values
+  if("at" %in% names(extraArgs)) {
+    tick.values.major <- extraArgs$at
+    tick.values.minor <- extraArgs$at
+  } else {
+    tick.values.major <- signif(pretty(limits.z, n = 5), 3)
+    tick.values.minor <- signif(pretty(limits.z, n = 25), 3)
+  }
+
+  tick.values.major <- tick.values.major[tick.values.major >=
+                                           min(tick.values.minor)]
+  tick.values.major <- tick.values.major[tick.values.major <=
+                                           max(tick.values.minor)]
+  tick.values.major <- tick.values.major[tick.values.major >=
+                                           limits.z[1]]
+  tick.values.major <- tick.values.major[tick.values.major <=
+                                           limits.z[2]]
+  tick.values.minor <- tick.values.minor[tick.values.minor >=
+                                           limits.z[1]]
+  tick.values.minor <- tick.values.minor[tick.values.minor <=
+                                           limits.z[2]]
+
+
+  if(log.z == TRUE) {
+
+    tick.values.major[which(tick.values.major==0)] <- 1
+    tick.values.minor[which(tick.values.minor==0)] <- 1
+
+    tick.values.major <- log(tick.values.major)
+    tick.values.minor <- log(tick.values.minor)
+  }
+
+  ## calculate z-axis radius
+  r <- max(sqrt((limits.x[2])^2 + (data.global[,7] * f)^2))
+
+
+  ## create z-axes labels
+  if(log.z == TRUE) {
+    label.z.text <- signif(exp(tick.values.major), 3)
+  } else {
+    label.z.text <- signif(tick.values.major, 3)
+  }
+
+  ## calculate node coordinates for semi-circle
+  ellipse.values <- c(min(ifelse(log.z == TRUE,
+                                 log(limits.z[1]),
+                                 limits.z[1]),
+                          tick.values.major,
+                          tick.values.minor),
+                      max(ifelse(log.z == TRUE,
+                                 log(limits.z[2]),
+                                 limits.z[2]),
+                          tick.values.major,
+                          tick.values.minor))
+
+  ## correct for unpleasant value
+  ellipse.values[ellipse.values == -Inf] <- 0
+
+  if(rotate == FALSE) {
+    ellipse.x <- r / sqrt(1 + f^2 * (ellipse.values - z.central.global)^2)
+    ellipse.y <- (ellipse.values - z.central.global) * ellipse.x
+  } else {
+    ellipse.y <- r / sqrt(1 + f^2 * (ellipse.values - z.central.global)^2)
+    ellipse.x <- (ellipse.values - z.central.global) * ellipse.y
+  }
+
+  ellipse <- cbind(ellipse.x, ellipse.y)
+
+  ## calculate statistical labels
+  if(length(stats == 1)) {stats <- rep(stats, 2)}
+  stats.data <- matrix(nrow = 3, ncol = 3)
+  data.stats <- as.numeric(data.global[,1])
+
+  if("min" %in% stats == TRUE) {
+    stats.data[1, 3] <- data.stats[data.stats == min(data.stats)][1]
+    stats.data[1, 1] <- data.global[data.stats == stats.data[1, 3], 6][1]
+    stats.data[1, 2] <- data.global[data.stats == stats.data[1, 3], 8][1]
+  }
+
+  if("max" %in% stats == TRUE) {
+    stats.data[2, 3] <- data.stats[data.stats == max(data.stats)][1]
+    stats.data[2, 1] <- data.global[data.stats == stats.data[2, 3], 6][1]
+    stats.data[2, 2] <- data.global[data.stats == stats.data[2, 3], 8][1]
+  }
+
+  if("median" %in% stats == TRUE) {
+    stats.data[3, 3] <- data.stats[data.stats == quantile(data.stats, 0.5, type = 3)]
+    stats.data[3, 1] <- data.global[data.stats == stats.data[3, 3], 6][1]
+    stats.data[3, 2] <- data.global[data.stats == stats.data[3, 3], 8][1]
+  }
+
+  ## re-calculate axes limits if necessary
+  if(rotate == FALSE) {
+    limits.z.x <- range(ellipse[,1])
+    limits.z.y <- range(ellipse[,2])
+  } else {
+    limits.z.x <- range(ellipse[,2])
+    limits.z.y <- range(ellipse[,1])
+  }
+
+  if(!("ylim" %in% names(extraArgs))) {
+    if(limits.z.y[1] < 0.66 * limits.y[1]) {
+      limits.y[1] <- 1.8 * limits.z.y[1]
+    }
+    if(limits.z.y[2] > 0.77 * limits.y[2]) {
+      limits.y[2] <- 1.3 * limits.z.y[2]
+    }
+
+    if(rotate == TRUE) {
+      limits.y <- c(-max(abs(limits.y)), max(abs(limits.y)))
+    }
+
+  }
+  if(!("xlim" %in% names(extraArgs))) {
+    if(limits.z.x[2] > 1.1 * limits.x[2]) {
+      limits.x[2] <- limits.z.x[2]
+    }
+  }
+
+  ## calculate and paste statistical summary
+  De.stats <- matrix(nrow = length(data), ncol = 12)
+  colnames(De.stats) <- c("n",
+                          "mean",
+                          "median",
+                          "kde.max",
+                          "sd.abs",
+                          "sd.rel",
+                          "se.abs",
+                          "se.rel",
+                          "q.25",
+                          "q.75",
+                          "skewness",
+                          "kurtosis")
+
+  for(i in 1:length(data)) {
+    statistics <- calc_Statistics(data[[i]])[[summary.method]]
+    statistics.2 <- calc_Statistics(data[[i]][,3:4])[[summary.method]]
+
+    De.stats[i,1] <- statistics$n
+    De.stats[i,2] <- statistics.2$mean
+    De.stats[i,3] <- statistics.2$median
+    De.stats[i,5] <- statistics$sd.abs
+    De.stats[i,6] <- statistics$sd.rel
+    De.stats[i,7] <- statistics$se.abs
+    De.stats[i,8] <- statistics$se.rel
+    De.stats[i,9] <- quantile(data[[i]][,1], 0.25)
+    De.stats[i,10] <- quantile(data[[i]][,1], 0.75)
+    De.stats[i,11] <- statistics$skewness
+    De.stats[i,12] <- statistics$kurtosis
+
+    ## account for log.z-option
+    if(log.z == TRUE) {
+      De.stats[i,2:4] <- exp(De.stats[i,2:4])
+    }
+
+    ##kdemax - here a little doubled as it appears below again
+    De.density <-density(x = data[[i]][,1],
+                         kernel = "gaussian",
+                         bw = bw,
+                         from = limits.z[1],
+                         to = limits.z[2])
+
+    De.stats[i,4] <- De.density$x[which.max(De.density$y)]
+  }
+
+  label.text = list(NA)
+
+  if(summary.pos[1] != "sub") {
+    n.rows <- length(summary)
+
+    for(i in 1:length(data)) {
+      stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "")
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          paste(
+                            "",
+                            ifelse("n" %in% summary[j] == TRUE,
+                                   paste("n = ",
+                                         De.stats[i,1],
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("mean" %in% summary[j] == TRUE,
+                                   paste("mean = ",
+                                         round(De.stats[i,2], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("median" %in% summary[j] == TRUE,
+                                   paste("median = ",
+                                         round(De.stats[i,3], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kde.max" %in% summary[j] == TRUE,
+                                   paste("kdemax = ",
+                                         round(De.stats[i,4], 2),
+                                         " \n ",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sd.abs" %in% summary[j] == TRUE,
+                                   paste("abs. sd = ",
+                                         round(De.stats[i,5], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sd.rel" %in% summary[j] == TRUE,
+                                   paste("rel. sd = ",
+                                         round(De.stats[i,6], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("se.abs" %in% summary[j] == TRUE,
+                                   paste("se = ",
+                                         round(De.stats[i,7], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("se.rel" %in% summary[j] == TRUE,
+                                   paste("rel. se = ",
+                                         round(De.stats[i,8], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("skewness" %in% summary[j] == TRUE,
+                                   paste("skewness = ",
+                                         round(De.stats[i,11], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kurtosis" %in% summary[j] == TRUE,
+                                   paste("kurtosis = ",
+                                         round(De.stats[i,12], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("in.2s" %in% summary[j] == TRUE,
+                                   paste("in 2 sigma = ",
+                                         round(sum(data[[i]][,7] > -2 &
+                                                     data[[i]][,7] < 2) /
+                                                 nrow(data[[i]]) * 100 , 1),
+                                         " %",
+                                         sep = ""),
+                                   ""),
+                            sep = ""))
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]] <- paste(stops,
+                                                    summary.text,
+                                                    stops,
+                                                    sep = "")
+    }
+  } else {
+    for(i in 1:length(data)) {
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          ifelse("n" %in% summary[j] == TRUE,
+                                 paste("n = ",
+                                       De.stats[i,1],
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("mean" %in% summary[j] == TRUE,
+                                 paste("mean = ",
+                                       round(De.stats[i,2], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("median" %in% summary[j] == TRUE,
+                                 paste("median = ",
+                                       round(De.stats[i,3], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kde.max" %in% summary[j] == TRUE,
+                                 paste("kdemax = ",
+                                       round(De.stats[i,4], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sd.abs" %in% summary[j] == TRUE,
+                                 paste("abs. sd = ",
+                                       round(De.stats[i,5], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sd.rel" %in% summary[j] == TRUE,
+                                 paste("rel. sd = ",
+                                       round(De.stats[i,6], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("se.rel" %in% summary[j] == TRUE,
+                                 paste("rel. se = ",
+                                       round(De.stats[i,7], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("se.abs" %in% summary[j] == TRUE,
+                                 paste("abs. se = ",
+                                       round(De.stats[i,8], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("skewness" %in% summary[j] == TRUE,
+                                 paste("skewness = ",
+                                       round(De.stats[i,11], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kurtosis" %in% summary[j] == TRUE,
+                                 paste("kurtosis = ",
+                                       round(De.stats[i,12], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("in.2s" %in% summary[j] == TRUE,
+                                 paste("in 2 sigma = ",
+                                       round(sum(data[[i]][,7] > -2 &
+                                                   data[[i]][,7] < 2) /
+                                               nrow(data[[i]]) * 100 , 1),
+                                       " % |  ",
+                                       sep = ""),
+                                 "")
+        )
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]]  <- paste(
+        "  ",
+        summary.text,
+        sep = "")
+    }
+
+    ## remove outer vertical lines from string
+    for(i in 2:length(label.text)) {
+      label.text[[i]] <- substr(x = label.text[[i]],
+                                start = 3,
+                                stop = nchar(label.text[[i]]) - 3)
+    }
+  }
+
+  ## remove dummy list element
+  label.text[[1]] <- NULL
+
+  if(rotate == FALSE) {
+    ## convert keywords into summary placement coordinates
+    if(missing(summary.pos) == TRUE) {
+      summary.pos <- c(limits.x[1], limits.y[2])
+      summary.adj <- c(0, 1)
+    } else if(length(summary.pos) == 2) {
+      summary.pos <- summary.pos
+      summary.adj <- c(0, 1)
+    } else if(summary.pos[1] == "topleft") {
+      summary.pos <- c(limits.x[1], limits.y[2] - par()$cxy[2] * 1)
+      summary.adj <- c(0, 1)
+    } else if(summary.pos[1] == "top") {
+      summary.pos <- c(mean(limits.x), limits.y[2] - par()$cxy[2] * 1)
+      summary.adj <- c(0.5, 1)
+    } else if(summary.pos[1] == "topright") {
+      summary.pos <- c(limits.x[2], limits.y[2] - par()$cxy[2] * 1)
+      summary.adj <- c(1, 1)
+    }  else if(summary.pos[1] == "left") {
+      summary.pos <- c(limits.x[1], mean(limits.y))
+      summary.adj <- c(0, 0.5)
+    } else if(summary.pos[1] == "center") {
+      summary.pos <- c(mean(limits.x), mean(limits.y))
+      summary.adj <- c(0.5, 0.5)
+    } else if(summary.pos[1] == "right") {
+      summary.pos <- c(limits.x[2], mean(limits.y))
+      summary.adj <- c(1, 0.5)
+    }else if(summary.pos[1] == "bottomleft") {
+      summary.pos <- c(limits.x[1], limits.y[1] + par()$cxy[2] * 3.5)
+      summary.adj <- c(0, 0)
+    } else if(summary.pos[1] == "bottom") {
+      summary.pos <- c(mean(limits.x), limits.y[1] + par()$cxy[2] * 3.5)
+      summary.adj <- c(0.5, 0)
+    } else if(summary.pos[1] == "bottomright") {
+      summary.pos <- c(limits.x[2], limits.y[1] + par()$cxy[2] * 3.5)
+      summary.adj <- c(1, 0)
+    }
+
+    ## convert keywords into legend placement coordinates
+    if(missing(legend.pos) == TRUE) {
+      legend.pos <- c(limits.x[1], limits.y[2])
+      legend.adj <- c(0, 1)
+    } else if(length(legend.pos) == 2) {
+      legend.pos <- legend.pos
+      legend.adj <- c(0, 1)
+    } else if(legend.pos[1] == "topleft") {
+      legend.pos <- c(limits.x[1], limits.y[2])
+      legend.adj <- c(0, 1)
+    } else if(legend.pos[1] == "top") {
+      legend.pos <- c(mean(limits.x), limits.y[2])
+      legend.adj <- c(0.5, 1)
+    } else if(legend.pos[1] == "topright") {
+      legend.pos <- c(limits.x[2], limits.y[2])
+      legend.adj <- c(1, 1)
+    } else if(legend.pos[1] == "left") {
+      legend.pos <- c(limits.x[1], mean(limits.y))
+      legend.adj <- c(0, 0.5)
+    } else if(legend.pos[1] == "center") {
+      legend.pos <- c(mean(limits.x), mean(limits.y))
+      legend.adj <- c(0.5, 0.5)
+    } else if(legend.pos[1] == "right") {
+      legend.pos <- c(limits.x[2], mean(limits.y))
+      legend.adj <- c(1, 0.5)
+    } else if(legend.pos[1] == "bottomleft") {
+      legend.pos <- c(limits.x[1], limits.y[1])
+      legend.adj <- c(0, 0)
+    } else if(legend.pos[1] == "bottom") {
+      legend.pos <- c(mean(limits.x), limits.y[1])
+      legend.adj <- c(0.5, 0)
+    } else if(legend.pos[1] == "bottomright") {
+      legend.pos <- c(limits.x[2], limits.y[1])
+      legend.adj <- c(1, 0)
+    }
+  } else {
+    ## convert keywords into summary placement coordinates
+    if(missing(summary.pos) == TRUE) {
+      summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1])
+      summary.adj <- c(0, 0)
+    } else if(length(summary.pos) == 2) {
+      summary.pos <- summary.pos
+      summary.adj <- c(0, 1)
+    } else if(summary.pos[1] == "topleft") {
+      summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[2])
+      summary.adj <- c(0, 1)
+    } else if(summary.pos[1] == "top") {
+      summary.pos <- c(mean(limits.y), limits.x[2])
+      summary.adj <- c(0.5, 1)
+    } else if(summary.pos[1] == "topright") {
+      summary.pos <- c(limits.y[2], limits.x[2])
+      summary.adj <- c(1, 1)
+    }  else if(summary.pos[1] == "left") {
+      summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, mean(limits.x))
+      summary.adj <- c(0, 0.5)
+    } else if(summary.pos[1] == "center") {
+      summary.pos <- c(mean(limits.y), mean(limits.x))
+      summary.adj <- c(0.5, 0.5)
+    } else if(summary.pos[1] == "right") {
+      summary.pos <- c(limits.y[2], mean(limits.x))
+      summary.adj <- c(1, 0.5)
+    }else if(summary.pos[1] == "bottomleft") {
+      summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1])
+      summary.adj <- c(0, 0)
+    } else if(summary.pos[1] == "bottom") {
+      summary.pos <- c(mean(limits.y), limits.x[1])
+      summary.adj <- c(0.5, 0)
+    } else if(summary.pos[1] == "bottomright") {
+      summary.pos <- c(limits.y[2], limits.x[1])
+      summary.adj <- c(1, 0)
+    }
+
+    ## convert keywords into legend placement coordinates
+    if(missing(legend.pos) == TRUE) {
+      legend.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1])
+      legend.adj <- c(0, 0)
+    } else if(length(legend.pos) == 2) {
+      legend.pos <- legend.pos
+      legend.adj <- c(1, 0)
+    } else if(legend.pos[1] == "topleft") {
+      legend.pos <- c(limits.y[1] + par()$cxy[1] * 11, limits.x[2])
+      legend.adj <- c(1, 0)
+    } else if(legend.pos[1] == "top") {
+      legend.pos <- c(mean(limits.y), limits.x[2])
+      legend.adj <- c(1, 0.5)
+    } else if(legend.pos[1] == "topright") {
+      legend.pos <- c(limits.y[2], limits.x[2])
+      legend.adj <- c(1, 1)
+    } else if(legend.pos[1] == "left") {
+      legend.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, mean(limits.x))
+      legend.adj <- c(0.5, 0)
+    } else if(legend.pos[1] == "center") {
+      legend.pos <- c(mean(limits.y), mean(limits.x))
+      legend.adj <- c(0.5, 0.5)
+    } else if(legend.pos[1] == "right") {
+      legend.pos <- c(limits.y[2], mean(limits.x))
+      legend.adj <- c(0.5, 1)
+    } else if(legend.pos[1] == "bottomleft") {
+      legend.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1])
+      legend.adj <- c(0, 0)
+    } else if(legend.pos[1] == "bottom") {
+      legend.pos <- c(mean(limits.y), limits.x[1])
+      legend.adj <- c(0, 0.5)
+    } else if(legend.pos[1] == "bottomright") {
+      legend.pos <- c(limits.y[2], limits.x[1])
+      legend.adj <- c(0, 1)
+    }
+  }
+
+  ## define cartesian plot origins
+  if(rotate == FALSE) {
+    xy.0 <- c(min(ellipse[,1]) * lostintranslation, min(ellipse[,2]))
+  } else {
+    xy.0 <- c(min(ellipse[,1]), min(ellipse[,2]) * lostintranslation)
+  }
+
+  ## calculate coordinates for dispersion polygon overlay
+  y.max.x <- 2 * limits.x[2] / max(data.global[6])
+
+  polygons <- matrix(nrow = length(data), ncol = 14)
+  for(i in 1:length(data)) {
+
+    if(dispersion == "qr") {
+      ci.lower <- quantile(data[[i]][,1], 0.25)
+      ci.upper <- quantile(data[[i]][,1], 0.75)
+    } else if(grepl(x = dispersion, pattern = "p") == TRUE) {
+      ci.plot <- as.numeric(strsplit(x = dispersion,
+                                     split = "p")[[1]][2])
+      ci.plot <- (100 - ci.plot) / 100
+      ci.lower <- quantile(data[[i]][,1], ci.plot)
+      ci.upper <- quantile(data[[i]][,1], 1 - ci.plot)
+    } else if(dispersion == "sd") {
+      if(log.z == TRUE) {
+        ci.lower <- exp(mean(log(data[[i]][,1])) - sd(log(data[[i]][,1])))
+        ci.upper <- exp(mean(log(data[[i]][,1])) + sd(log(data[[i]][,1])))
+      } else {
+        ci.lower <- mean(data[[i]][,1]) - sd(data[[i]][,1])
+        ci.upper <- mean(data[[i]][,1]) + sd(data[[i]][,1])
+      }
+    } else if(dispersion == "2sd") {
+      if(log.z == TRUE) {
+        ci.lower <- exp(mean(log(data[[i]][,1])) - 2 * sd(log(data[[i]][,1])))
+        ci.upper <- exp(mean(log(data[[i]][,1])) + 2 * sd(log(data[[i]][,1])))
+      } else {
+        ci.lower <- mean(data[[i]][,1]) - 2 * sd(data[[i]][,1])
+        ci.upper <- mean(data[[i]][,1]) + 2 * sd(data[[i]][,1])
+      }
+    } else {
+      stop("Measure of dispersion not supported.")
+    }
+
+    if(log.z == TRUE) {
+      ci.lower[which(ci.lower < 0)] <- 1
+      y.lower <- log(ci.lower)
+      y.upper <- log(ci.upper)
+    } else {
+      y.lower <- ci.lower
+      y.upper <- ci.upper
+    }
+
+    if(rotate == FALSE) {
+      polygons[i,1:7] <- c(limits.x[1],
+                           limits.x[2],
+                           xy.0[1],
+                           par()$usr[2],
+                           par()$usr[2],
+                           xy.0[1],
+                           limits.x[2])
+      polygons[i,8:14] <- c(0,
+                            (y.upper - z.central.global) *
+                              limits.x[2],
+                            (y.upper - z.central.global) *
+                              xy.0[1],
+                            (y.upper - z.central.global) *
+                              xy.0[1],
+                            (y.lower - z.central.global) *
+                              xy.0[1],
+                            (y.lower - z.central.global) *
+                              xy.0[1],
+                            (y.lower - z.central.global) *
+                              limits.x[2]
+      )
+    } else {
+      y.max <- par()$usr[4]
+      polygons[i,1:7] <- c(limits.x[1],
+                           limits.x[2],
+                           xy.0[2],
+                           y.max,
+                           y.max,
+                           xy.0[2],
+                           limits.x[2])
+      polygons[i,8:14] <- c(0,
+                            (y.upper - z.central.global) *
+                              limits.x[2],
+                            (y.upper - z.central.global) *
+                              xy.0[2],
+                            (y.upper - z.central.global) *
+                              xy.0[2],
+                            (y.lower - z.central.global) *
+                              xy.0[2],
+                            (y.lower - z.central.global) *
+                              xy.0[2],
+                            (y.lower - z.central.global) *
+                              limits.x[2]
+      )
+    }
+  }
+
+  ## append information about data in confidence interval
+  for(i in 1:length(data)) {
+    data.in.2s <- rep(x = FALSE, times = nrow(data[[i]]))
+    data.in.2s[data[[i]][,8] > -2 & data[[i]][,8] < 2] <- TRUE
+    data[[i]] <- cbind(data[[i]], data.in.2s)
+  }
+
+  ## calculate coordinates for 2-sigma bar overlay
+  if(bar[1] == TRUE) {
+    bars <- matrix(nrow = length(data), ncol = 8)
+
+    for(i in 1:length(data)) {
+      bars[i,1:4] <- c(limits.x[1],
+                       limits.x[1],
+                       ifelse("xlim" %in% names(extraArgs),
+                              extraArgs$xlim[2] * 0.95,
+                              max(data.global$precision)),
+                       ifelse("xlim" %in% names(extraArgs),
+                              extraArgs$xlim[2] * 0.95,
+                              max(data.global$precision)))
+
+      bars[i,5:8] <- c(-2,
+                       2,
+                       (data[[i]][1,5] - z.central.global) *
+                         bars[i,3] + 2,
+                       (data[[i]][1,5] - z.central.global) *
+                         bars[i,3] - 2)
+
+    }
+  } else {
+    bars <- matrix(nrow = length(bar), ncol = 8)
+
+    if(is.numeric(bar) == TRUE & log.z == TRUE) {
+      bar <- log(bar)
+    }
+
+    for(i in 1:length(bar)) {
+      bars[i,1:4] <- c(limits.x[1],
+                       limits.x[1],
+                       ifelse("xlim" %in% names(extraArgs),
+                              extraArgs$xlim[2] * 0.95,
+                              max(data.global$precision)),
+                       ifelse("xlim" %in% names(extraArgs),
+                              extraArgs$xlim[2] * 0.95,
+                              max(data.global$precision)))
+
+      bars[i,5:8] <- c(-2,
+                       2,
+                       (bar[i] - z.central.global) *
+                         bars[i,3] + 2,
+                       (bar[i] - z.central.global) *
+                         bars[i,3] - 2)
+    }
+  }
+  if (rotate == TRUE) {
+    bars <- matrix(bars[, rev(seq_len(ncol(bars)))], ncol = 8)
+  }
+
+  ## calculate error bar coordinates
+  if(error.bars == TRUE) {
+    arrow.coords <- list(NA)
+    for(i in 1:length(data)) {
+      arrow.x1 <- data[[i]][,6]
+      arrow.x2 <- data[[i]][,6]
+      arrow.y1 <- data[[i]][,1] - data[[i]][,2]
+      arrow.y2 <- data[[i]][,1] + data[[i]][,2]
+
+      if(log.z == TRUE) {
+        arrow.y1 <- log(arrow.y1)
+        arrow.y2 <- log(arrow.y2)
+      }
+
+      arrow.coords[[length(arrow.coords) + 1]] <- cbind(
+        arrow.x1,
+        arrow.x2,
+        (arrow.y1 - z.central.global) * arrow.x1,
+        (arrow.y2 - z.central.global) * arrow.x1)
+    }
+    arrow.coords[[1]] <- NULL
+  }
+
+  ## calculate KDE
+  KDE <- list(NA)
+  KDE.ext <- 0
+  KDE.bw <- numeric(0)
+
+  for(i in 1:length(data)) {
+    KDE.i <- density(x = data[[i]][,3],
+                     kernel = "gaussian",
+                     bw = bw,
+                     from = ellipse.values[1],
+                     to = ellipse.values[2],
+                     weights = data[[i]]$weights)
+    KDE.xy <- cbind(KDE.i$x, KDE.i$y)
+    KDE.bw <- c(KDE.bw, KDE.i$bw)
+    KDE.ext <- ifelse(max(KDE.xy[,2]) < KDE.ext, KDE.ext, max(KDE.xy[,2]))
+    KDE.xy <- rbind(c(min(KDE.xy[,1]), 0), KDE.xy, c(max(KDE.xy[,1]), 0))
+    KDE[[length(KDE) + 1]] <- cbind(KDE.xy[,1], KDE.xy[,2])
+  }
+  KDE[1] <- NULL
+
+  ## calculate mean KDE bandwidth
+  KDE.bw <- mean(KDE.bw, na.rm = TRUE)
+
+  ## calculate max KDE value for labelling
+  KDE.max.plot <- numeric(length(data))
+
+  for(i in 1:length(data)) {
+    KDE.plot <- density(x = data[[i]][,1],
+                        kernel = "gaussian",
+                        bw = bw,
+                        from = limits.z[1],
+                        to = limits.z[2])
+    KDE.max.plot[i] <- max(KDE.plot$y)
+  }
+
+  KDE.max.plot <- max(KDE.max.plot, na.rm = TRUE)
+
+  ## calculate histogram data without plotting
+
+  ## create dummy list
+  hist.data <- list(NA)
+
+  for(i in 1:length(data)) {
+    hist.i <- hist(x = data[[i]][,3],
+                   plot = FALSE,
+                   breaks = breaks)
+    hist.data[[length(hist.data) + 1]] <- hist.i
+  }
+
+  ## remove dummy list object
+  hist.data[[1]] <- NULL
+
+  ## calculate maximum histogram bar height for normalisation
+  hist.max.plot <- numeric(length(data))
+  for(i in 1:length(data)) {
+    hist.max.plot <- ifelse(max(hist.data[[i]]$counts, na.rm = TRUE) >
+                              hist.max.plot, max(hist.data[[i]]$counts,
+                                                 na.rm = TRUE), hist.max.plot)
+  }
+  hist.max.plot <- max(hist.max.plot, na.rm = TRUE)
+
+  ## normalise histogram bar height to KDE dimensions
+  for(i in 1:length(data)) {
+    hist.data[[i]]$density <- hist.data[[i]]$counts / hist.max.plot *
+      KDE.max.plot
+  }
+
+  ## calculate boxplot data without plotting
+
+  ## create dummy list
+  boxplot.data <- list(NA)
+
+  for(i in 1:length(data)) {
+    boxplot.i <- boxplot(x = data[[i]][,3],
+                   plot = FALSE)
+    boxplot.data[[length(boxplot.data) + 1]] <- boxplot.i
+  }
+
+  ## remove dummy list object
+  boxplot.data[[1]] <- NULL
+
+  ## calculate line coordinates and further parameters
+  if(missing(line) == FALSE) {
+
+    ## check if line parameters are R.Lum-objects
+    for(i in 1:length(line)) {
+      if(is.list(line) == TRUE) {
+        if(is(line[[i]], "RLum.Results")) {
+          line[[i]] <- as.numeric(get_RLum(object = line[[i]],
+                                           data.object = "summary")$de)
+        }
+      } else if(is(object = line, class2 = "RLum.Results")) {
+        line <- as.numeric(get_RLum(object = line,
+                                    data.object = "summary")$de)
+      }
+    }
+
+    ## convert list to vector
+    if(is.list(line) == TRUE) {
+      line <- unlist(line)
+    }
+
+    if(log.z == TRUE) {
+      line <- log(line)
+    }
+
+    line.coords <- list(NA)
+
+    if(rotate == FALSE) {
+      for(i in 1:length(line)) {
+        line.x <- c(limits.x[1], min(ellipse[,1]), par()$usr[2])
+        line.y <- c(0,
+                    (line[i] - z.central.global) * min(ellipse[,1]),
+                    (line[i] - z.central.global) * min(ellipse[,1]))
+        line.coords[[length(line.coords) + 1]] <- rbind(line.x, line.y)
+      }
+    } else {
+      for(i in 1:length(line)) {
+        line.x <- c(limits.x[1], min(ellipse[,2]),y.max)
+        line.y <- c(0,
+                    (line[i] - z.central.global) * min(ellipse[,2]),
+                    (line[i] - z.central.global) * min(ellipse[,2]))
+        line.coords[[length(line.coords) + 1]] <- rbind(line.x, line.y)
+      }
+    }
+
+    line.coords[1] <- NULL
+
+    if(missing(line.col) == TRUE) {
+      line.col <- seq(from = 1, to = length(line.coords))
+    }
+
+    if(missing(line.lty) == TRUE) {
+      line.lty <- rep(1, length(line.coords))
+    }
+
+    if(missing(line.label) == TRUE) {
+      line.label <- rep("", length(line.coords))
+    }
+  }
+
+  ## calculate rug coordinates
+  if(missing(rug) == FALSE) {
+    if(log.z == TRUE) {
+      rug.values <- log(De.global)
+    } else {
+      rug.values <- De.global
+    }
+
+    rug.coords <- list(NA)
+
+    if(rotate == FALSE) {
+      for(i in 1:length(rug.values)) {
+        rug.x <- c(xy.0[1] * (1 - 0.013 * (layout$abanico$dimension$rugl / 100)),
+                   xy.0[1])
+        rug.y <- c((rug.values[i] - z.central.global) * min(ellipse[,1]),
+                   (rug.values[i] - z.central.global) * min(ellipse[,1]))
+        rug.coords[[length(rug.coords) + 1]] <- rbind(rug.x, rug.y)
+      }
+    } else {
+      for(i in 1:length(rug.values)) {
+        rug.x <- c(xy.0[2] * (1 - 0.013 * (layout$abanico$dimension$rugl / 100)),
+                   xy.0[2])
+        rug.y <- c((rug.values[i] - z.central.global) * min(ellipse[,2]),
+                   (rug.values[i] - z.central.global) * min(ellipse[,2]))
+        rug.coords[[length(rug.coords) + 1]] <- rbind(rug.x, rug.y)
+      }
+    }
+
+    rug.coords[1] <- NULL
+  }
+
+  ## Generate plot ------------------------------------------------------------
+
+  ## determine number of subheader lines to shift the plot
+  if(length(summary) > 0 & summary.pos[1] == "sub") {
+    shift.lines <- (length(data) + 1) * layout$abanico$dimension$summary.line/100
+  } else {shift.lines <- 1}
+
+  ## extract original plot parameters
+  par(bg = layout$abanico$colour$background)
+  bg.original <- par()$bg
+
+  if(rotate == FALSE) {
+    ## setup plot area
+    par(mar = c(4.5, 4.5, shift.lines + 1.5, 7),
+        xpd = TRUE,
+        cex = cex)
+
+    if(layout$abanico$dimension$figure.width != "auto" |
+       layout$abanico$dimension$figure.height != "auto") {
+      par(mai = layout$abanico$dimension$margin / 25.4,
+          pin = c(layout$abanico$dimension$figure.width / 25.4 -
+                    layout$abanico$dimension$margin[2] / 25.4 -
+                    layout$abanico$dimension$margin[4] / 25.4,
+                  layout$abanico$dimension$figure.height / 25.4 -
+                    layout$abanico$dimension$margin[1] / 25.4 -
+                    layout$abanico$dimension$margin[3]/25.4))
+    }
+
+    ## create empty plot
+    par(new = TRUE)
+    plot(NA,
+         xlim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)),
+         ylim = limits.y,
+         main = "",
+         sub = sub,
+         xlab = "",
+         ylab = "",
+         xaxs = "i",
+         yaxs = "i",
+         frame.plot = FALSE,
+         axes = FALSE)
+
+    ## add y-axis label
+    mtext(text = ylab,
+          at = mean(x = c(min(ellipse[,2]),
+                          max(ellipse[,2])),
+                    na.rm = TRUE),
+          #        at = 0, ## BUG FROM VERSION 0.4.0, maybe removed in future
+          adj = 0.5,
+          side = 2,
+          line = 3 * layout$abanico$dimension$ylab.line / 100,
+          col = layout$abanico$colour$ylab,
+          family = layout$abanico$font.type$ylab,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$ylab],
+          cex = cex * layout$abanico$font.size$ylab/12)
+
+    ## calculate upper x-axis label values
+    label.x.upper <- if(log.z == TRUE) {
+      as.character(round(1/axTicks(side = 1)[-1] * 100, 1))
+    } else {
+      as.character(round(1/axTicks(side = 1)[-1], 1))
+    }
+
+    # optionally, plot 2-sigma-bar
+    if(bar[1] != FALSE) {
+      for(i in 1:length(bar)) {
+        polygon(x = bars[i,1:4],
+                y = bars[i,5:8],
+                col = bar.fill[i],
+                border = bar.line[i])
+      }
+    }
+
+    ## remove unwanted parts
+    polygon(x = c(par()$usr[2],
+                  par()$usr[2],
+                  par()$usr[2] * 2,
+                  par()$usr[2] * 2),
+            y = c(min(ellipse[,2]) * 2,
+                  max(ellipse[,2]) * 2,
+                  max(ellipse[,2]) * 2,
+                  min(ellipse[,2]) * 2),
+            col = bg.original,
+            lty = 0)
+
+    ## optionally, plot dispersion polygon
+    if(polygon.fill[1] != "none") {
+      for(i in 1:length(data)) {
+        polygon(x = polygons[i,1:7],
+                y = polygons[i,8:14],
+                col = polygon.fill[i],
+                border = polygon.line[i])
+      }
+    }
+
+    ## optionally, add minor grid lines
+    if(grid.minor != "none") {
+
+      for(i in 1:length(tick.values.minor)) {
+        lines(x = c(limits.x[1], min(ellipse[,1])),
+              y = c(0, (tick.values.minor[i] - z.central.global) *
+                      min(ellipse[,1])),
+              col = grid.minor,
+              lwd = 1)
+      }
+
+      for(i in 1:length(tick.values.minor)) {
+        lines(x = c(xy.0[1], par()$usr[2]),
+              y = c((tick.values.minor[i] - z.central.global) *
+                      min(ellipse[,1]),
+                    (tick.values.minor[i] - z.central.global) *
+                      min(ellipse[,1])),
+              col = grid.minor,
+              lwd = 1)
+      }
+    }
+
+    ## optionally, add major grid lines
+    if(grid.major != "none") {
+      for(i in 1:length(tick.values.major)) {
+        lines(x = c(limits.x[1], min(ellipse[,1])),
+              y = c(0, (tick.values.major[i] - z.central.global) *
+                      min(ellipse[,1])),
+              col = grid.major,
+              lwd = 1)
+      }
+      for(i in 1:length(tick.values.major)) {
+        lines(x = c(xy.0[1], par()$usr[2]),
+              y = c((tick.values.major[i] - z.central.global) *
+                      min(ellipse[,1]),
+                    (tick.values.major[i] - z.central.global) *
+                      min(ellipse[,1])),
+              col = grid.major,
+              lwd = 1)
+      }
+    }
+
+    ## optionally, plot lines for each bar
+    if(lwd[1] > 0 & lty[1] > 0 & bar[1] != FALSE & length(data) == 1) {
+      if(bar[1] == TRUE & length(bar) == 1) {
+        bar[1] <- z.central.global
+      }
+      for(i in 1:length(bar)) {
+        x2 <- r / sqrt(1 + f^2 * (
+          bar[i] - z.central.global)^2)
+        y2 <- (bar[i] - z.central.global) * x2
+        lines(x = c(limits.x[1], x2, xy.0[1], par()$usr[2]),
+              y = c(0, y2, y2, y2),
+              lty = lty[i],
+              lwd = lwd[i],
+              col = centrality.col[i])
+      }
+    } else if(lwd[1] > 0 & lty[1] > 0 & bar[1] != FALSE) {
+      for(i in 1:length(data)) {
+
+        z.line <- ifelse(test = is.numeric(bar[i]) == TRUE,
+                         yes = bar[i],
+                         no = data[[i]][1,5])
+
+        x2 <- r / sqrt(1 + f^2 * (
+          z.line - z.central.global)^2)
+        y2 <- (z.line - z.central.global) * x2
+        lines(x = c(limits.x[1], x2, xy.0[1], par()$usr[2]),
+              y = c(0, y2, y2, y2),
+              lty = lty[i],
+              lwd = lwd[i],
+              col = centrality.col[i])
+      }
+    }
+
+    ## optionally add further lines
+    if(missing(line) == FALSE) {
+      for(i in 1:length(line)) {
+        lines(x = line.coords[[i]][1,1:3],
+              y = line.coords[[i]][2,1:3],
+              col = line.col[i],
+              lty = line.lty[i]
+              )
+        text(x = line.coords[[i]][1,3],
+             y = line.coords[[i]][2,3] + par()$cxy[2] * 0.3,
+             labels = line.label[i],
+             pos = 2,
+             col = line.col[i],
+             cex = cex * 0.9)
+      }
+    }
+
+    ## add plot title
+    cex.old <- par()$cex
+    par(cex = layout$abanico$font.size$main / 12)
+    title(main = main,
+          family = layout$abanico$font.type$main,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$main],
+          col.main = layout$abanico$colour$main,
+          line = shift.lines * layout$abanico$dimension$main / 100)
+    par(cex = cex.old)
+
+    ## calculate lower x-axis (precision)
+    x.axis.ticks <- axTicks(side = 1)
+    x.axis.ticks <- x.axis.ticks[c(TRUE, x.axis.ticks <= limits.x[2])]
+    x.axis.ticks <- x.axis.ticks[x.axis.ticks <= max(ellipse[,1])]
+
+    ## x-axis with lables and ticks
+    axis(side = 1,
+         at = x.axis.ticks,
+         col = layout$abanico$colour$xtck1,
+         col.axis = layout$abanico$colour$xtck1,
+         labels = NA,
+         tcl = -layout$abanico$dimension$xtcl1 / 200,
+         cex = cex)
+    axis(side = 1,
+         at = x.axis.ticks,
+         line = 2 * layout$abanico$dimension$xtck1.line / 100 - 2,
+         lwd = 0,
+         col = layout$abanico$colour$xtck1,
+         family = layout$abanico$font.type$xtck1,
+         font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                        layout$abanico$font.deco$xtck1],
+         col.axis = layout$abanico$colour$xtck1,
+         cex.axis = layout$abanico$font.size$xlab1/12)
+
+    ## extend axis line to right side of the plot
+    lines(x = c(max(x.axis.ticks), max(ellipse[,1])),
+          y = c(limits.y[1], limits.y[1]),
+          col = layout$abanico$colour$xtck1)
+
+    ## draw closing tick on right hand side
+    axis(side = 1,
+         tcl = -layout$abanico$dimension$xtcl1 / 200,
+         lwd = 0,
+         lwd.ticks = 1,
+         at = limits.x[2],
+         labels = FALSE,
+         col = layout$abanico$colour$xtck1)
+
+    axis(side = 1,
+         tcl = layout$abanico$dimension$xtcl2 / 200,
+         lwd = 0,
+         lwd.ticks = 1,
+         at = limits.x[2],
+         labels = FALSE,
+         col = layout$abanico$colour$xtck2)
+
+    ## add lower axis label
+    mtext(xlab[2],
+          at = (limits.x[1] + max(ellipse[,1])) / 2,
+          side = 1,
+          line = 2.5 * layout$abanico$dimension$xlab1.line / 100,
+          col = layout$abanico$colour$xlab1,
+          family = layout$abanico$font.type$xlab1,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$xlab1],
+          cex = cex * layout$abanico$font.size$xlab1/12)
+
+    ## add upper axis label
+    mtext(xlab[1],
+          at = (limits.x[1] + max(ellipse[,1])) / 2,
+          side = 1,
+          line = -3.5 * layout$abanico$dimension$xlab2.line / 100,
+          col = layout$abanico$colour$xlab2,
+          family = layout$abanico$font.type$xlab2,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$xlab2],
+          cex = cex * layout$abanico$font.size$xlab2/12)
+
+    ## plot upper x-axis
+    axis(side = 1,
+         at = x.axis.ticks[-1],
+         col = layout$abanico$colour$xtck2,
+         col.axis = layout$abanico$colour$xtck2,
+         labels = NA,
+         tcl = layout$abanico$dimension$xtcl2 / 200,
+         cex = cex)
+
+    ## remove first tick label (infinity)
+    label.x.upper <- label.x.upper[1:(length(x.axis.ticks) - 1)]
+
+    axis(side = 1,
+         at = x.axis.ticks[-1],
+         labels = label.x.upper,
+         line = -1 * layout$abanico$dimension$xtck2.line / 100 - 2,
+         lwd = 0,
+         col = layout$abanico$colour$xtck2,
+         family = layout$abanico$font.type$xtck2,
+         font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                        layout$abanico$font.deco$xtck2],
+         col.axis = layout$abanico$colour$xtck2,
+         cex.axis = layout$abanico$font.size$xlab2/12)
+
+    ## plot y-axis
+    if(y.axis == TRUE) {
+      char.height <- par()$cxy[2]
+      tick.space <- axisTicks(usr = limits.y, log = FALSE)
+      tick.space <- (max(tick.space) - min(tick.space)) / length(tick.space)
+      if(tick.space < char.height * 1.7) {
+        axis(side = 2,
+             tcl = -layout$abanico$dimension$ytcl / 200,
+             lwd = 1,
+             lwd.ticks = 1,
+             at = c(-2, 2),
+             labels = c("", ""),
+             las = 1,
+             col = layout$abanico$colour$ytck)
+
+        axis(side = 2,
+             at = 0,
+             tcl = 0,
+             line = 2 * layout$abanico$dimension$ytck.line / 100 - 2,
+             labels = paste("\u00B1", "2"),
+             las = 1,
+             family = layout$abanico$font.type$ytck,
+             font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                            layout$abanico$font.deco$ytck],
+             col.axis = layout$abanico$colour$ytck,
+             cex.axis = layout$abanico$font.size$ylab/12)
+      } else {
+        axis(side = 2,
+             at = seq(-2, 2, by = 2),
+             col = layout$abanico$colour$ytck,
+             col.axis = layout$abanico$colour$ytck,
+             labels = NA,
+             las = 1,
+             tcl = -layout$abanico$dimension$ytcl / 200,
+             cex = cex)
+        axis(side = 2,
+             at = seq(-2, 2, by = 2),
+             line = 2 * layout$abanico$dimension$ytck.line / 100 - 2,
+             lwd = 0,
+             las = 1,
+             col = layout$abanico$colour$ytck,
+             family = layout$abanico$font.type$ytck,
+             font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                            layout$abanico$font.deco$ytck],
+             col.axis = layout$abanico$colour$ytck,
+             cex.axis = layout$abanico$font.size$ylab/12)
+      }
+    } else {
+      axis(side = 2,
+           at = 0,
+           col = layout$abanico$colour$ytck,
+           col.axis = layout$abanico$colour$ytck,
+           labels = NA,
+           las = 1,
+           tcl = -layout$abanico$dimension$ytcl / 200,
+           cex = cex)
+      axis(side = 2,
+           at = 0,
+           line = 2 * layout$abanico$dimension$ytck.line / 100 - 2,
+           lwd = 0,
+           las = 1,
+           col = layout$abanico$colour$ytck,
+           family = layout$abanico$font.type$ytck,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$ytck],
+           col.axis = layout$abanico$colour$ytck,
+           cex.axis = layout$abanico$font.size$ylab/12)
+    }
+
+    ## plot minor z-ticks
+    for(i in 1:length(tick.values.minor)) {
+      lines(x = c(par()$usr[2],
+                  (1 + 0.007 * cex * layout$abanico$dimension$ztcl / 100) *
+                    par()$usr[2]),
+            y = c((tick.values.minor[i] - z.central.global) *
+                    min(ellipse[,1]),
+                  (tick.values.minor[i] - z.central.global) *
+                    min(ellipse[,1])),
+            col = layout$abanico$colour$ztck)
+    }
+
+
+    ## plot major z-ticks
+    for(i in 1:length(tick.values.major)) {
+      lines(x = c(par()$usr[2],
+                  (1 + 0.015 * cex * layout$abanico$dimension$ztcl / 100) *
+                    par()$usr[2]),
+            y = c((tick.values.major[i] - z.central.global) *
+                    min(ellipse[,1]),
+                  (tick.values.major[i] - z.central.global) *
+                    min(ellipse[,1])),
+            col = layout$abanico$colour$ztck)
+    }
+
+    ## plot z-axes
+    lines(ellipse, col = layout$abanico$colour$border)
+    lines(rep(par()$usr[2], nrow(ellipse)), ellipse[,2],
+          col = layout$abanico$colour$ztck)
+
+
+    ## plot z-axis text
+    text(x = (1 + 0.04 * cex * layout$abanico$dimension$ztcl / 100) *
+           par()$usr[2],
+         y = (tick.values.major - z.central.global) * min(ellipse[,1]),
+         labels = label.z.text,
+         adj = 0,
+         family = layout$abanico$font.type$ztck,
+         font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                        layout$abanico$font.deco$ztck],
+         cex = cex * layout$abanico$font.size$ztck/12)
+
+
+    ## plot z-label
+    mtext(text = zlab,
+          at = mean(x = c(min(ellipse[,2]),
+                          max(ellipse[,2])),
+                    na.rm = TRUE),
+          #        at = 0, ## BUG from version 0.4.0, maybe removed in future
+          side = 4,
+          las = 3,
+          adj = 0.5,
+          line = 5 * layout$abanico$dimension$zlab.line / 100,
+          col = layout$abanico$colour$zlab,
+          family = layout$abanico$font.type$zlab,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$zlab],
+          cex = cex * layout$abanico$font.size$zlab/12)
+
+    ## plot values and optionally error bars
+    if(error.bars == TRUE) {
+      for(i in 1:length(data)) {
+        arrows(x0 = arrow.coords[[i]][,1],
+               x1 = arrow.coords[[i]][,2],
+               y0 = arrow.coords[[i]][,3],
+               y1 = arrow.coords[[i]][,4],
+               length = 0,
+               angle = 90,
+               code = 3,
+               col = value.bar[i])
+      }
+    }
+
+    for(i in 1:length(data)) {
+      points(data[[i]][,6][data[[i]][,6] <= limits.x[2]],
+             data[[i]][,8][data[[i]][,6] <= limits.x[2]],
+             col = value.dot[i],
+             pch = pch[i],
+             cex = layout$abanico$dimension$pch / 100)
+    }
+
+    ## calculate KDE width
+    KDE.max <- 0
+
+    for(i in 1:length(data)) {
+
+      KDE.max <- ifelse(test = KDE.max < max(KDE[[i]][,2]),
+                        yes = max(KDE[[i]][,2]),
+                        no = KDE.max)
+
+    }
+
+    ## optionally adjust KDE width for boxplot option
+    if(boxplot == TRUE) {
+
+      KDE.max <- 1.25 * KDE.max
+    }
+
+    KDE.scale <- (par()$usr[2] - xy.0[1]) / (KDE.max * 1.05)
+
+    ## optionally add KDE plot
+    if(kde == TRUE) {
+
+      ## plot KDE lines
+      for(i in 1:length(data)) {
+        polygon(x = xy.0[1] + KDE[[i]][,2] * KDE.scale,
+                y = (KDE[[i]][,1] - z.central.global) * min(ellipse[,1]),
+                col = kde.fill[i],
+                border = kde.line[i],
+                lwd = 1.7)
+      }
+
+      ## plot KDE x-axis
+      axis(side = 1,
+           at = c(xy.0[1], par()$usr[2]),
+           col = layout$abanico$colour$xtck3,
+           col.axis = layout$abanico$colour$xtck3,
+           labels = NA,
+           tcl = -layout$abanico$dimension$xtcl3 / 200,
+           cex = cex)
+
+      axis(side = 1,
+           at = c(xy.0[1], par()$usr[2]),
+           labels = as.character(round(c(0, KDE.max.plot), 3)),
+           line = 2 * layout$abanico$dimension$xtck3.line / 100 - 2,
+           lwd = 0,
+           col = layout$abanico$colour$xtck3,
+           family = layout$abanico$font.type$xtck3,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$xtck3],
+           col.axis = layout$abanico$colour$xtck3,
+           cex.axis = layout$abanico$font.size$xtck3/12)
+
+      mtext(text = paste(xlab[3],
+                         " (bw ",
+                         round(x = KDE.bw,
+                               digits = 3),
+                         ")",
+                         sep = ""),
+            at = (xy.0[1] + par()$usr[2]) / 2,
+            side = 1,
+            line = 2.5 * layout$abanico$dimension$xlab3.line / 100,
+            col = layout$abanico$colour$xlab3,
+            family = layout$abanico$font.type$xlab3,
+            font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                           layout$abanico$font.deco$xlab3],
+            cex = cex * layout$abanico$font.size$xlab3/12)
+    }
+
+    ## optionally add histogram or dot plot axis
+    if(hist == TRUE) {
+      axis(side = 1,
+           at = c(xy.0[1], par()$usr[2]),
+           labels = as.character(c(0, hist.max.plot)),
+           line = -1 * layout$abanico$dimension$xtck3.line / 100 - 2,
+           lwd = 0,
+           col = layout$abanico$colour$xtck3,
+           family = layout$abanico$font.type$xtck3,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$xtck3],
+           col.axis = layout$abanico$colour$xtck3,
+           cex.axis = layout$abanico$font.size$xtck3/12)
+
+      ## add label
+      mtext(text = "n",
+            at = (xy.0[1] + par()$usr[2]) / 2,
+            side = 1,
+            line = -3.5 * layout$abanico$dimension$xlab2.line / 100,
+            col = layout$abanico$colour$xlab2,
+            family = layout$abanico$font.type$xlab2,
+            font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                           layout$abanico$font.deco$xlab2],
+            cex = cex * layout$abanico$font.size$xlab2/12)
+
+      ## plot ticks
+      axis(side = 1,
+           at = c(xy.0[1], par()$usr[2]),
+           col = layout$abanico$colour$xtck2,
+           col.axis = layout$abanico$colour$xtck2,
+           labels = NA,
+           tcl = layout$abanico$dimension$xtcl2 / 200,
+           cex = cex)
+
+      ## calculate scaling factor for histogram bar heights
+      hist.scale <- (par()$usr[2] - xy.0[1]) / (KDE.max.plot * 1.05)
+
+      ## draw each bar for each data set
+      for(i in 1:length(data)) {
+        for(j in 1:length(hist.data[[i]]$density)) {
+          ## calculate x-coordinates
+          hist.x.i <- c(xy.0[1],
+                        xy.0[1],
+                        xy.0[1] + hist.data[[i]]$density[j] * hist.scale,
+                        xy.0[1] + hist.data[[i]]$density[j] * hist.scale)
+
+          ## calculate y-coordinates
+          hist.y.i <- c((hist.data[[i]]$breaks[j] - z.central.global) *
+                          min(ellipse[,1]),
+                        (hist.data[[i]]$breaks[j + 1] - z.central.global) *
+                          min(ellipse[,1]),
+                        (hist.data[[i]]$breaks[j + 1] - z.central.global) *
+                          min(ellipse[,1]),
+                        (hist.data[[i]]$breaks[j] - z.central.global) *
+                          min(ellipse[,1]))
+
+          ## remove data out of z-axis range
+          hist.y.i <- ifelse(hist.y.i < min(ellipse[,2]),
+                             min(ellipse[,2]),
+                             hist.y.i)
+          hist.y.i <- ifelse(hist.y.i > max(ellipse[,2]),
+                             max(ellipse[,2]),
+                             hist.y.i)
+
+          ## draw the bars
+          polygon(x = hist.x.i,
+                  y = hist.y.i,
+                  col = kde.fill[i],
+                  border = kde.line[i])
+        }
+      }
+    }
+
+    ## optionally add dot plot
+    if(dots == TRUE) {
+      for(i in 1:length(data)) {
+        for(j in 1:length(hist.data[[i]]$counts)) {
+
+          ## calculate scaling factor for histogram bar heights
+          dots.distance <- (par()$usr[2] - (xy.0[1] + par()$cxy[1] * 0.4)) / hist.max.plot
+
+          dots.x.i <- seq(from = xy.0[1] + par()$cxy[1] * 0.4,
+                          by = dots.distance,
+                          length.out = hist.data[[i]]$counts[j])
+
+          dots.y.i <- rep((hist.data[[i]]$mids[j] - z.central.global) *
+                            min(ellipse[,1]), length(dots.x.i))
+
+          ## remove data out of z-axis range
+          dots.x.i <- dots.x.i[dots.y.i >= min(ellipse[,2]) &
+                                 dots.y.i <= max(ellipse[,2])]
+          dots.y.i <- dots.y.i[dots.y.i >= min(ellipse[,2]) &
+                                 dots.y.i <= max(ellipse[,2])]
+
+          if(max(c(0, dots.x.i), na.rm = TRUE) >= (par()$usr[2] -
+                                                   par()$cxy[1] * 0.4)) {
+            dots.y.i <- dots.y.i[dots.x.i < (par()$usr[2] - par()$cxy[1] * 0.4)]
+            dots.x.i <- dots.x.i[dots.x.i < (par()$usr[2] - par()$cxy[1] * 0.4)]
+            pch.dots <- c(rep(20, length(dots.x.i) - 1), 15)
+          } else {
+            pch.dots <- rep(20, length(dots.x.i))
+          }
+
+          ## plot points
+          points(x = dots.x.i,
+                 y = dots.y.i,
+                 pch = "|",
+                 cex = 0.7 * cex,
+                 col = kde.line[i])
+
+        }
+      }
+    }
+
+    ## optionally add box plot
+    if(boxplot == TRUE) {
+
+      for(i in 1:length(data)) {
+
+        ## draw median line
+        lines(x = c(xy.0[1] + KDE.max * 0.85, xy.0[1] + KDE.max * 0.95),
+              y = c((boxplot.data[[i]]$stats[3,1] - z.central.global) *
+                      min(ellipse[,1]),
+                    (boxplot.data[[i]]$stats[3,1] - z.central.global) *
+                      min(ellipse[,1])),
+              lwd = 2,
+              col = kde.line[i])
+
+        ## draw p25-p75-polygon
+        polygon(x = c(xy.0[1] + KDE.max * 0.85,
+                      xy.0[1] + KDE.max * 0.85,
+                      xy.0[1] + KDE.max * 0.95,
+                      xy.0[1] + KDE.max * 0.95),
+                y = c((boxplot.data[[i]]$stats[2,1] - z.central.global) *
+                        min(ellipse[,1]),
+                      (boxplot.data[[i]]$stats[4,1] - z.central.global) *
+                        min(ellipse[,1]),
+                      (boxplot.data[[i]]$stats[4,1] - z.central.global) *
+                        min(ellipse[,1]),
+                      (boxplot.data[[i]]$stats[2,1] - z.central.global) *
+                        min(ellipse[,1])),
+                border = kde.line[i])
+
+        ## draw whiskers
+        lines(x = c(xy.0[1] + KDE.max * 0.9,
+                    xy.0[1] + KDE.max * 0.9),
+              y = c((boxplot.data[[i]]$stats[2,1] - z.central.global) *
+                      min(ellipse[,1]),
+                    (boxplot.data[[i]]$stats[1,1] - z.central.global) *
+                      min(ellipse[,1])),
+              col = kde.line[i])
+
+        lines(x = c(xy.0[1] + KDE.max * 0.87,
+                    xy.0[1] + KDE.max * 0.93),
+              y = rep((boxplot.data[[i]]$stats[1,1] - z.central.global) *
+                        min(ellipse[,1]), 2),
+              col = kde.line[i])
+
+        lines(x = c(xy.0[1] + KDE.max * 0.9,
+                    xy.0[1] + KDE.max * 0.9),
+              y = c((boxplot.data[[i]]$stats[4,1] - z.central.global) *
+                      min(ellipse[,1]),
+                    (boxplot.data[[i]]$stats[5,1] - z.central.global) *
+                      min(ellipse[,1])),
+              col = kde.line[i])
+
+        lines(x = c(xy.0[1] + KDE.max * 0.87,
+                    xy.0[1] + KDE.max * 0.93),
+              y = rep((boxplot.data[[i]]$stats[5,1] - z.central.global) *
+                        min(ellipse[,1]), 2),
+              col = kde.line[i])
+
+        ## draw outlier points
+        points(x = rep(xy.0[1] + KDE.max * 0.9,
+                       length(boxplot.data[[i]]$out)),
+               y = (boxplot.data[[i]]$out - z.central.global) *
+                 min(ellipse[,1]),
+               cex = cex * 0.8,
+               col = kde.line[i])
+      }
+    }
+
+    ## optionally add stats, i.e. min, max, median sample text
+    if(length(stats) > 0) {
+      text(x = stats.data[,1],
+           y = stats.data[,2],
+           pos = 2,
+           labels = round(stats.data[,3], 1),
+           family = layout$abanico$font.type$stats,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$stats],
+           cex = cex * layout$abanico$font.size$stats/12,
+           col = layout$abanico$colour$stats)
+    }
+
+    ## optionally add rug
+    if(rug == TRUE) {
+      for(i in 1:length(rug.coords)) {
+        lines(x = rug.coords[[i]][1,],
+              y = rug.coords[[i]][2,],
+              col = value.rug[data.global[i,10]])
+      }
+    }
+
+    ## plot KDE base line
+    lines(x = c(xy.0[1], xy.0[1]),
+          y = c(min(ellipse[,2]), max(ellipse[,2])),
+          col = layout$abanico$colour$border)
+
+    ## draw border around plot
+    if(frame == 1) {
+      polygon(x = c(limits.x[1], min(ellipse[,1]), par()$usr[2],
+                    par()$usr[2], min(ellipse[,1])),
+              y = c(0, max(ellipse[,2]), max(ellipse[,2]),
+                    min(ellipse[,2]), min(ellipse[,2])),
+              border = layout$abanico$colour$border,
+              lwd = 0.8)
+    } else if(frame == 2) {
+      polygon(x = c(limits.x[1], min(ellipse[,1]), par()$usr[2],
+                    par()$usr[2], min(ellipse[,1]), limits.x[1]),
+              y = c(2, max(ellipse[,2]), max(ellipse[,2]),
+                    min(ellipse[,2]), min(ellipse[,2]), -2),
+              border = layout$abanico$colour$border,
+              lwd = 0.8)
+    } else if(frame == 3) {
+      polygon(x = c(limits.x[1], par()$usr[2],
+                    par()$usr[2], limits.x[1]),
+              y = c(max(ellipse[,2]), max(ellipse[,2]),
+                    min(ellipse[,2]), min(ellipse[,2])),
+              border = layout$abanico$colour$border,
+              lwd = 0.8)
+    }
+
+    ## optionally add legend content
+    if(missing(legend) == FALSE) {
+      ## store and change font familiy
+      par.family <- par()$family
+      par(family = layout$abanico$font.type$legend)
+
+      legend(x = legend.pos[1],
+             y = 0.8 * legend.pos[2],
+             xjust = legend.adj[1],
+             yjust = legend.adj[2],
+             legend = legend,
+             pch = pch,
+             col = value.dot,
+             text.col = value.dot,
+             text.font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                                 layout$abanico$font.deco$legend],
+             cex = cex * layout$abanico$font.size$legend/12,
+             bty = "n")
+
+      ## restore font family
+      par(family = par.family)
+    }
+
+    ## optionally add subheader text
+    mtext(text = mtext,
+          side = 3,
+          line = (shift.lines - 2) * layout$abanico$dimension$mtext / 100,
+          col = layout$abanico$colour$mtext,
+          family = layout$abanico$font.type$mtext,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$mtext],
+          cex = cex * layout$abanico$font.size$mtext / 12)
+
+    ## add summary content
+    for(i in 1:length(data)) {
+      if(summary.pos[1] != "sub") {
+        text(x = summary.pos[1],
+             y = summary.pos[2],
+             adj = summary.adj,
+             labels = label.text[[i]],
+             col = summary.col[i],
+             family = layout$abanico$font.type$summary,
+             font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                            layout$abanico$font.deco$summary],
+             cex = cex * layout$abanico$font.size$summary / 12)
+      } else {
+        if(mtext == "") {
+          mtext(side = 3,
+                line = (shift.lines- 1 - i) *
+                  layout$abanico$dimension$summary / 100 ,
+                text = label.text[[i]],
+                col = summary.col[i],
+                family = layout$abanico$font.type$summary,
+                font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                               layout$abanico$font.deco$summary],
+                cex = cex * layout$abanico$font.size$summary / 12)
+        }
+      }
+    }
+  } else {
+    ## setup plot area
+    par(mar = c(4, 4, shift.lines + 5, 4),
+        xpd = TRUE,
+        cex = cex)
+
+    if(layout$abanico$dimension$figure.width != "auto" |
+       layout$abanico$dimension$figure.height != "auto") {
+      par(mai = layout$abanico$dimension$margin / 25.4,
+          pin = c(layout$abanico$dimension$figure.width / 25.4 -
+                    layout$abanico$dimension$margin[2] / 25.4 -
+                    layout$abanico$dimension$margin[4] / 25.4,
+                  layout$abanico$dimension$figure.height / 25.4 -
+                    layout$abanico$dimension$margin[1] / 25.4 -
+                    layout$abanico$dimension$margin[3]/25.4))
+    }
+
+    ## create empty plot
+    par(new = TRUE)
+    plot(NA,
+         xlim = limits.y,
+         ylim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)),
+         main = "",
+         sub = sub,
+         xlab = "",
+         ylab = "",
+         xaxs = "i",
+         yaxs = "i",
+         frame.plot = FALSE,
+         axes = FALSE)
+
+    ## add y-axis label
+    mtext(text = ylab,
+          at = 0,
+          adj = 0.5,
+          side = 1,
+          line = 3 * layout$abanico$dimension$ylab.line / 100,
+          col = layout$abanico$colour$ylab,
+          family = layout$abanico$font.type$ylab,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$ylab],
+          cex = cex * layout$abanico$font.size$ylab/12)
+
+    ## calculate upper x-axis label values
+    label.x.upper <- if(log.z == TRUE) {
+      as.character(round(1/axTicks(side = 2)[-1] * 100, 1))
+    } else {
+      as.character(round(1/axTicks(side = 2)[-1], 1))
+    }
+
+    # optionally, plot 2-sigma-bar
+    if(bar[1] != FALSE) {
+      for(i in 1:length(bar)) {
+        polygon(x = bars[i,1:4],
+                y = bars[i,5:8],
+                col = bar.fill[i],
+                border = bar.line[i])
+      }
+    }
+
+    ## remove unwanted parts
+    polygon(y = c(par()$usr[2],
+                  par()$usr[2],
+                  par()$usr[2] * 2,
+                  par()$usr[2] * 2),
+            x = c(min(ellipse[,2]) * 2,
+                  max(ellipse[,2]) * 2,
+                  max(ellipse[,2]) * 2,
+                  min(ellipse[,2]) * 2),
+            col = bg.original,
+            lty = 0)
+
+    ## optionally, plot dispersion polygon
+    if(polygon.fill[1] != "none") {
+      for(i in 1:length(data)) {
+        polygon(x = polygons[i,8:14],
+                y = polygons[i,1:7],
+                col = polygon.fill[i],
+                border = polygon.line[i])
+      }
+    }
+
+    ## optionally, add minor grid lines
+    if(grid.minor != "none") {
+      for(i in 1:length(tick.values.minor)) {
+        lines(y = c(limits.x[1], min(ellipse[,1])),
+              x = c(0, (tick.values.minor[i] - z.central.global) * min(ellipse[,1])),
+              col = grid.minor,
+              lwd = 1)
+      }
+      for(i in 1:length(tick.values.minor)) {
+        lines(y = c(xy.0[2], par()$usr[2]),
+              x = c((tick.values.minor[i] - z.central.global) * min(ellipse[,1]),
+                    (tick.values.minor[i] - z.central.global) * min(ellipse[,1])),
+              col = grid.minor,
+              lwd = 1)
+      }
+    }
+
+    ## optionally, add major grid lines
+    if(grid.major != "none") {
+      for(i in 1:length(tick.values.major)) {
+        lines(y = c(limits.x[1], min(ellipse[,2])),
+              x = c(0, (tick.values.major[i] - z.central.global) * min(ellipse[,2])),
+              col = grid.major,
+              lwd = 1)
+      }
+      for(i in 1:length(tick.values.major)) {
+        lines(y = c(xy.0[2],y.max),
+              x = c((tick.values.major[i] - z.central.global) * min(ellipse[,2]),
+                    (tick.values.major[i] - z.central.global) * min(ellipse[,2])),
+              col = grid.major,
+              lwd = 1)
+      }
+    }
+
+    ## optionally, plot lines for each bar
+    if(lwd[1] > 0 & lty[1] > 0 & bar[1] != FALSE & length(data) == 1) {
+      if(bar[1] == TRUE & length(bar) == 1) {
+        bar[1] <- z.central.global
+      }
+      for(i in 1:length(bar)) {
+        x2 <- r / sqrt(1 + f^2 * (
+          bar[i] - z.central.global)^2)
+        y2 <- (bar[i] - z.central.global) * x2
+        lines(x = c(0, y2, y2, y2),
+              y = c(limits.x[1], x2, xy.0[2], par()$usr[4]),
+              lty = lty[i],
+              lwd = lwd[i],
+              col = centrality.col[i])
+      }
+    }
+
+    ## optionally add further lines
+    if(missing(line) == FALSE) {
+      for(i in 1:length(line)) {
+        lines(y = line.coords[[i]][1,1:3],
+              x = line.coords[[i]][2,1:3],
+              col = line.col[i],
+              lty = line.lty[i]
+              )
+        text(y = line.coords[[i]][1,3],
+             x = line.coords[[i]][2,3] + par()$cxy[2] * 0.3,
+             labels = line.label[i],
+             pos = 2,
+             col = line.col[i],
+             cex = cex * 0.9)
+      }
+    }
+
+    ## add plot title
+    cex.old <- par()$cex
+    par(cex = layout$abanico$font.size$main / 12)
+    title(main = main,
+          family = layout$abanico$font.type$main,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$main],
+          col.main = layout$abanico$colour$main,
+          line = (shift.lines + 3.5) * layout$abanico$dimension$main / 100)
+    par(cex = cex.old)
+
+    ## calculate lower x-axis (precision)
+    x.axis.ticks <- axTicks(side = 2)
+    x.axis.ticks <- x.axis.ticks[c(TRUE, x.axis.ticks <= limits.x[2])]
+    x.axis.ticks <- x.axis.ticks[x.axis.ticks <= max(ellipse[,2])]
+
+    ## x-axis with lables and ticks
+    axis(side = 2,
+         at = x.axis.ticks,
+         col = layout$abanico$colour$xtck1,
+         col.axis = layout$abanico$colour$xtck1,
+         labels = NA,
+         tcl = -layout$abanico$dimension$xtcl1 / 200,
+         cex = cex)
+    axis(side = 2,
+         at = x.axis.ticks,
+         line = 2 * layout$abanico$dimension$xtck1.line / 100 - 2,
+         lwd = 0,
+         col = layout$abanico$colour$xtck1,
+         family = layout$abanico$font.type$xtck1,
+         font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                        layout$abanico$font.deco$xtck1],
+         col.axis = layout$abanico$colour$xtck1,
+         cex.axis = layout$abanico$font.size$xlab1/12)
+
+    ## extend axis line to right side of the plot
+    lines(y = c(max(x.axis.ticks), max(ellipse[,2])),
+          x = c(limits.y[1], limits.y[1]),
+          col = layout$abanico$colour$xtck1)
+
+    ## draw closing tick on right hand side
+    axis(side = 2,
+         tcl = -layout$abanico$dimension$xtcl1 / 200,
+         lwd = 0,
+         lwd.ticks = 1,
+         at = limits.x[2],
+         labels = FALSE,
+         col = layout$abanico$colour$xtck1)
+
+    axis(side = 2,
+         tcl = layout$abanico$dimension$xtcl2 / 200,
+         lwd = 0,
+         lwd.ticks = 1,
+         at = limits.x[2],
+         labels = FALSE,
+         col = layout$abanico$colour$xtck2)
+
+    ## add lower axis label
+    mtext(xlab[2],
+          at = (limits.x[1] + max(ellipse[,2])) / 2,
+          side = 2,
+          line = 2.5 * layout$abanico$dimension$xlab1.line / 100,
+          col = layout$abanico$colour$xlab1,
+          family = layout$abanico$font.type$xlab1,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$xlab1],
+          cex = cex * layout$abanico$font.size$xlab1/12)
+
+    ## add upper axis label
+    mtext(xlab[1],
+          at = (limits.x[1] + max(ellipse[,2])) / 2,
+          side = 2,
+          line = -3.5 * layout$abanico$dimension$xlab2.line / 100,
+          col = layout$abanico$colour$xlab2,
+          family = layout$abanico$font.type$xlab2,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$xlab2],
+          cex = cex * layout$abanico$font.size$xlab2/12)
+
+    ## plot upper x-axis
+    axis(side = 2,
+         at = x.axis.ticks[-1],
+         col = layout$abanico$colour$xtck2,
+         col.axis = layout$abanico$colour$xtck2,
+         labels = NA,
+         tcl = layout$abanico$dimension$xtcl2 / 200,
+         cex = cex)
+
+    ## remove first tick label (infinity)
+    label.x.upper <- label.x.upper[1:(length(x.axis.ticks) - 1)]
+
+    axis(side = 2,
+         at = x.axis.ticks[-1],
+         labels = label.x.upper,
+         line = -1 * layout$abanico$dimension$xtck2.line / 100 - 2,
+         lwd = 0,
+         col = layout$abanico$colour$xtck2,
+         family = layout$abanico$font.type$xtck2,
+         font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                        layout$abanico$font.deco$xtck2],
+         col.axis = layout$abanico$colour$xtck2,
+         cex.axis = layout$abanico$font.size$xlab2/12)
+
+    ## plot y-axis
+    if(y.axis == TRUE) {
+      char.height <- par()$cxy[2]
+      tick.space <- axisTicks(usr = limits.y, log = FALSE)
+      tick.space <- (max(tick.space) - min(tick.space)) / length(tick.space)
+      if(tick.space < char.height * 1.7) {
+        axis(side = 1,
+             tcl = -layout$abanico$dimension$ytcl / 200,
+             lwd = 1,
+             lwd.ticks = 1,
+             at = c(-2, 2),
+             labels = c("", ""),
+             las = 1,
+             col = layout$abanico$colour$ytck)
+
+        axis(side = 1,
+             at = 0,
+             tcl = 0,
+             line = 2 * layout$abanico$dimension$ytck.line / 100 - 2,
+             labels = paste("\u00B1", "2"),
+             las = 1,
+             family = layout$abanico$font.type$ytck,
+             font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                            layout$abanico$font.deco$ytck],
+             col.axis = layout$abanico$colour$ytck,
+             cex.axis = layout$abanico$font.size$ylab/12)
+      } else {
+        axis(side = 1,
+             at = seq(-2, 2, by = 2),
+             col = layout$abanico$colour$ytck,
+             col.axis = layout$abanico$colour$ytck,
+             labels = NA,
+             las = 1,
+             tcl = -layout$abanico$dimension$ytcl / 200,
+             cex = cex)
+        axis(side = 1,
+             at = seq(-2, 2, by = 2),
+             line = 2 * layout$abanico$dimension$ytck.line / 100 - 2,
+             lwd = 0,
+             las = 1,
+             col = layout$abanico$colour$ytck,
+             family = layout$abanico$font.type$ytck,
+             font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                            layout$abanico$font.deco$ytck],
+             col.axis = layout$abanico$colour$ytck,
+             cex.axis = layout$abanico$font.size$ylab/12)
+      }
+    } else {
+      axis(side = 1,
+           at = 0,
+           col = layout$abanico$colour$ytck,
+           col.axis = layout$abanico$colour$ytck,
+           labels = NA,
+           las = 1,
+           tcl = -layout$abanico$dimension$ytcl / 200,
+           cex = cex)
+      axis(side = 1,
+           at = 0,
+           line = 2 * layout$abanico$dimension$ytck.line / 100 - 2,
+           lwd = 0,
+           las = 1,
+           col = layout$abanico$colour$ytck,
+           family = layout$abanico$font.type$ytck,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$ytck],
+           col.axis = layout$abanico$colour$ytck,
+           cex.axis = layout$abanico$font.size$ylab/12)
+    }
+
+    ## plot minor z-ticks
+    for(i in 1:length(tick.values.minor)) {
+      lines(y = c(par()$usr[4],
+                  (1 + 0.015 * cex * layout$abanico$dimension$ztcl / 100) *
+                    y.max),
+            x = c((tick.values.minor[i] - z.central.global) *
+                    min(ellipse[,2]),
+                  (tick.values.minor[i] - z.central.global) *
+                    min(ellipse[,2])),
+            col = layout$abanico$colour$ztck)
+    }
+
+    ## plot major z-ticks
+    for(i in 1:length(tick.values.major)) {
+      lines(y = c(par()$usr[4],
+                  (1 + 0.03 * cex * layout$abanico$dimension$ztcl / 100) *
+                    y.max),
+            x = c((tick.values.major[i] - z.central.global) *
+                    min(ellipse[,2]),
+                  (tick.values.major[i] - z.central.global) *
+                    min(ellipse[,2])),
+            col = layout$abanico$colour$ztck)
+    }
+
+    ## plot z-axes
+    lines(ellipse, col = layout$abanico$colour$border)
+    lines(y = rep(par()$usr[4], nrow(ellipse)),
+          x = ellipse[,1],
+          col = layout$abanico$colour$ztck)
+
+    ## plot z-axis text
+    text(y = (1 + 0.06 * cex * layout$abanico$dimension$ztcl / 100) *
+           y.max,
+         x = (tick.values.major - z.central.global) * min(ellipse[,2]),
+         labels = label.z.text,
+         adj = 0.5,
+         family = layout$abanico$font.type$ztck,
+         font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                        layout$abanico$font.deco$ztck],
+         cex = cex * layout$abanico$font.size$ztck/12)
+
+    ## plot z-label
+    mtext(text = zlab,
+          at = 0,
+          side = 3,
+          las = 1,
+          adj = 0.5,
+          line = 2.5 * layout$abanico$dimension$zlab.line / 100,
+          col = layout$abanico$colour$zlab,
+          family = layout$abanico$font.type$zlab,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$zlab],
+          cex = cex * layout$abanico$font.size$zlab/12)
+
+    ## plot values and optionally error bars
+    if(error.bars == TRUE) {
+      for(i in 1:length(data)) {
+        arrows(y0 = arrow.coords[[i]][,1],
+               y1 = arrow.coords[[i]][,2],
+               x0 = arrow.coords[[i]][,3],
+               x1 = arrow.coords[[i]][,4],
+               length = 0,
+               angle = 90,
+               code = 3,
+               col = value.bar[i])
+      }
+    }
+
+    for(i in 1:length(data)) {
+      points(y = data[[i]][,6][data[[i]][,6] <= limits.x[2]],
+             x = data[[i]][,8][data[[i]][,6] <= limits.x[2]],
+             col = value.dot[i],
+             pch = pch[i],
+             cex = layout$abanico$dimension$pch / 100)
+    }
+
+    ## calculate KDE width
+    KDE.max <- 0
+
+    for(i in 1:length(data)) {
+      KDE.max <- ifelse(test = KDE.max < max(KDE[[i]][,2]),
+                        yes = max(KDE[[i]][,2]),
+                        no = KDE.max)
+    }
+
+    ## optionally adjust KDE width for boxplot option
+    if(boxplot == TRUE) {
+
+      KDE.max <- 1.3 * KDE.max
+    }
+
+    KDE.scale <- (par()$usr[4] - xy.0[2]) / (KDE.max * 1.05)
+
+    ## optionally add KDE plot
+    if(kde == TRUE) {
+
+      ## plot KDE lines
+      for(i in 1:length(data)) {
+        polygon(y = xy.0[2] + KDE[[i]][,2] * KDE.scale,
+                x = (KDE[[i]][,1] - z.central.global) * min(ellipse[,2]),
+                col = kde.fill[i],
+                border = kde.line[i],
+                lwd = 1.7)
+      }
+
+      ## plot KDE x-axis
+      axis(side = 2,
+           at = c(xy.0[2], y.max),
+           col = layout$abanico$colour$xtck3,
+           col.axis = layout$abanico$colour$xtck3,
+           labels = NA,
+           tcl = -layout$abanico$dimension$xtcl3 / 200,
+           cex = cex)
+
+      axis(side = 2,
+           at = c(xy.0[2], y.max),
+           labels = as.character(round(c(0, KDE.max.plot), 3)),
+           line = 2 * layout$abanico$dimension$xtck3.line / 100 - 2,
+           lwd = 0,
+           col = layout$abanico$colour$xtck3,
+           family = layout$abanico$font.type$xtck3,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$xtck3],
+           col.axis = layout$abanico$colour$xtck3,
+           cex.axis = layout$abanico$font.size$xtck3/12)
+
+      mtext(text = paste(xlab[3],
+                         " (bw ",
+                         round(x = KDE.bw,
+                               digits = 3),
+                         ")",
+                         sep = ""),
+            at = (xy.0[2] + y.max) / 2,
+            side = 2,
+            line = 2.5 * layout$abanico$dimension$xlab3.line / 100,
+            col = layout$abanico$colour$xlab3,
+            family = layout$abanico$font.type$xlab3,
+            font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                           layout$abanico$font.deco$xlab3],
+            cex = cex * layout$abanico$font.size$xlab3/12)
+    }
+
+    ## optionally add histogram or dot plot axis
+    if(hist == TRUE) {
+      axis(side = 2,
+           at = c(xy.0[2], y.max),
+           labels = as.character(c(0, hist.max.plot)),
+           line = -1 * layout$abanico$dimension$xtck3.line / 100 - 2,
+           lwd = 0,
+           col = layout$abanico$colour$xtck3,
+           family = layout$abanico$font.type$xtck3,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$xtck3],
+           col.axis = layout$abanico$colour$xtck3,
+           cex.axis = layout$abanico$font.size$xtck3/12)
+
+      ## add label
+      mtext(text = "n",
+            at = (xy.0[2] + y.max) / 2,
+            side = 2,
+            line = -3.5 * layout$abanico$dimension$xlab2.line / 100,
+            col = layout$abanico$colour$xlab2,
+            family = layout$abanico$font.type$xlab2,
+            font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                           layout$abanico$font.deco$xlab2],
+            cex = cex * layout$abanico$font.size$xlab2/12)
+
+      ## plot ticks
+      axis(side = 2,
+           at = c(xy.0[2], y.max),
+           col = layout$abanico$colour$xtck2,
+           col.axis = layout$abanico$colour$xtck2,
+           labels = NA,
+           tcl = layout$abanico$dimension$xtcl2 / 200,
+           cex = cex)
+
+      ## calculate scaling factor for histogram bar heights
+      hist.scale <- (par()$usr[4] - xy.0[2]) / (KDE.max.plot * 1.05)
+
+      ## draw each bar for each data set
+      for(i in 1:length(data)) {
+        for(j in 1:length(hist.data[[i]]$density)) {
+          ## calculate x-coordinates
+          hist.x.i <- c(xy.0[2],
+                        xy.0[2],
+                        xy.0[2] + hist.data[[i]]$density[j] * hist.scale,
+                        xy.0[2] + hist.data[[i]]$density[j] * hist.scale)
+
+          ## calculate y-coordinates
+          hist.y.i <- c((hist.data[[i]]$breaks[j] - z.central.global) *
+                          min(ellipse[,2]),
+                        (hist.data[[i]]$breaks[j + 1] - z.central.global) *
+                          min(ellipse[,2]),
+                        (hist.data[[i]]$breaks[j + 1] - z.central.global) *
+                          min(ellipse[,2]),
+                        (hist.data[[i]]$breaks[j] - z.central.global) *
+                          min(ellipse[,2]))
+
+          ## remove data out of z-axis range
+          hist.y.i <- ifelse(hist.y.i < min(ellipse[,1]),
+                             min(ellipse[,1]),
+                             hist.y.i)
+          hist.y.i <- ifelse(hist.y.i > max(ellipse[,1]),
+                             max(ellipse[,1]),
+                             hist.y.i)
+
+          ## draw the bars
+          polygon(y = hist.x.i,
+                  x = hist.y.i,
+                  col = kde.fill[i],
+                  border = kde.line[i])
+        }
+      }
+    }
+
+    ## optionally add dot plot
+    if(dots == TRUE) {
+      for(i in 1:length(data)) {
+        for(j in 1:length(hist.data[[i]]$counts)) {
+
+          ## calculate scaling factor for histogram bar heights
+          dots.distance <- (par()$usr[4] - (xy.0[2] + par()$cxy[1] * 0.4)) / hist.max.plot
+
+          dots.x.i <- seq(from = xy.0[2] + par()$cxy[2] * 0.4,
+                          by = dots.distance,
+                          length.out = hist.data[[i]]$counts[j])
+
+          dots.y.i <- rep((hist.data[[i]]$mids[j] - z.central.global) *
+                            min(ellipse[,2]), length(dots.x.i))
+
+          ## remove data out of z-axis range
+          dots.x.i <- dots.x.i[dots.y.i >= min(ellipse[,1]) &
+                                 dots.y.i <= max(ellipse[,1])]
+          dots.y.i <- dots.y.i[dots.y.i >= min(ellipse[,1]) &
+                                 dots.y.i <= max(ellipse[,1])]
+
+          if(max(c(0, dots.x.i), na.rm = TRUE) >= (par()$usr[4] -
+                                                   par()$cxy[2] * 0.4)) {
+            dots.y.i <- dots.y.i[dots.x.i < (par()$usr[4] - par()$cxy[2] * 0.4)]
+            dots.x.i <- dots.x.i[dots.x.i < (par()$usr[4] - par()$cxy[2] * 0.4)]
+            pch.dots <- c(rep(20, length(dots.x.i) - 1), 15)
+          } else {
+            pch.dots <- rep(20, length(dots.x.i))
+          }
+
+          ## plot points
+          points(y = dots.x.i,
+                 x = dots.y.i,
+                 pch = "-",
+                 cex = 0.7 * cex,
+                 col = kde.line[i])
+        }
+      }
+    }
+
+    ## optionally add box plot
+    if(boxplot == TRUE) {
+
+      for(i in 1:length(data)) {
+
+        ## draw median line
+        lines(x = c((boxplot.data[[i]]$stats[3,1] - z.central.global) *
+                      min(ellipse[,2]),
+                    (boxplot.data[[i]]$stats[3,1] - z.central.global) *
+                      min(ellipse[,2])),
+              y = c(min(ellipse[,2]) + KDE.max * 0.91,
+                    xy.0[2] + KDE.max * 0.96),
+              lwd = 2,
+              col = kde.line[i])
+
+        ## draw p25-p75-polygon
+        polygon(y = c(min(ellipse[,2]) + KDE.max * 0.91,
+                      min(ellipse[,2]) + KDE.max * 0.91,
+                      xy.0[2] + KDE.max * 0.96,
+                      xy.0[2] + KDE.max * 0.96),
+                x = c((boxplot.data[[i]]$stats[2,1] - z.central.global) *
+                        min(ellipse[,2]),
+                      (boxplot.data[[i]]$stats[4,1] - z.central.global) *
+                        min(ellipse[,2]),
+                      (boxplot.data[[i]]$stats[4,1] - z.central.global) *
+                        min(ellipse[,2]),
+                      (boxplot.data[[i]]$stats[2,1] - z.central.global) *
+                        min(ellipse[,2])),
+                border = kde.line[i])
+
+        ## draw whiskers
+        lines(y = rep(mean(c(min(ellipse[,2]) + KDE.max * 0.91,
+                             xy.0[2] + KDE.max * 0.96)), 2),
+              x = c((boxplot.data[[i]]$stats[2,1] - z.central.global) *
+                      min(ellipse[,2]),
+                    (boxplot.data[[i]]$stats[1,1] - z.central.global) *
+                      min(ellipse[,2])),
+              col = kde.line[i])
+
+        lines(y = c(min(ellipse[,2]) + KDE.max * 0.91,
+                    xy.0[2] + KDE.max * 0.96),
+              x = rep((boxplot.data[[i]]$stats[1,1] - z.central.global) *
+                        min(ellipse[,2]), 2),
+              col = kde.line[i])
+
+        lines(y = rep(mean(c(min(ellipse[,2]) + KDE.max * 0.91,
+                             xy.0[2] + KDE.max * 0.96)), 2),
+              x = c((boxplot.data[[i]]$stats[4,1] - z.central.global) *
+                      min(ellipse[,2]),
+                    (boxplot.data[[i]]$stats[5,1] - z.central.global) *
+                      min(ellipse[,2])),
+              col = kde.line[i])
+
+        lines(y = c(min(ellipse[,2]) + KDE.max * 0.91,
+                    xy.0[2] + KDE.max * 0.96),
+              x = rep((boxplot.data[[i]]$stats[5,1] - z.central.global) *
+                        min(ellipse[,2]), 2),
+              col = kde.line[i])
+
+        ## draw outlier points
+        points(y = rep(mean(c(min(ellipse[,2]) + KDE.max * 0.91,
+                              xy.0[2] + KDE.max * 0.96)),
+                       length(boxplot.data[[i]]$out)),
+               x = (boxplot.data[[i]]$out - z.central.global) *
+                 min(ellipse[,2]),
+               cex = cex * 0.8,
+               col = kde.line[i])
+      }
+    }
+
+    ## optionally add stats, i.e. min, max, median sample text
+    if(length(stats) > 0) {
+      text(y = stats.data[,1],
+           x = stats.data[,2],
+           pos = 2,
+           labels = round(stats.data[,3], 1),
+           family = layout$abanico$font.type$stats,
+           font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                          layout$abanico$font.deco$stats],
+           cex = cex * layout$abanico$font.size$stats/12,
+           col = layout$abanico$colour$stats)
+    }
+
+    ## optionally add rug
+    if(rug == TRUE) {
+      for(i in 1:length(rug.coords)) {
+        lines(y = rug.coords[[i]][1,],
+              x = rug.coords[[i]][2,],
+              col = value.rug[data.global[i,10]])
+      }
+    }
+
+    ## plot KDE base line
+    lines(y = c(xy.0[2], xy.0[2]),
+          x = c(min(ellipse[,1]), max(ellipse[,1])),
+          col = layout$abanico$colour$border)
+
+    ## draw border around plot
+    polygon(y = c(limits.x[1], min(ellipse[,2]), y.max,
+                  y.max, min(ellipse[,2])),
+            x = c(0, max(ellipse[,1]), max(ellipse[,1]),
+                  min(ellipse[,1]), min(ellipse[,1])),
+            border = layout$abanico$colour$border,
+            lwd = 0.8)
+
+    ## optionally add legend content
+    if(missing(legend) == FALSE) {
+      ## store and change font familiy
+      par.family <- par()$family
+      par(family = layout$abanico$font.type$legend)
+
+      legend(y = legend.pos[2],
+             x = 0.8 * legend.pos[1],
+             xjust = legend.adj[2],
+             yjust = legend.adj[1],
+             legend = legend,
+             pch = pch,
+             col = value.dot,
+             text.col = value.dot,
+             text.font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                                 layout$abanico$font.deco$legend],
+             cex = cex * layout$abanico$font.size$legend/12,
+             bty = "n")
+
+      ## restore font family
+      par(family = par.family)
+    }
+
+    ## optionally add subheader text
+    mtext(text = mtext,
+          side = 3,
+          line = (shift.lines - 2 + 3.5) * layout$abanico$dimension$mtext / 100,
+          col = layout$abanico$colour$mtext,
+          family = layout$abanico$font.type$mtext,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$abanico$font.deco$mtext],
+          cex = cex * layout$abanico$font.size$mtext / 12)
+
+    ## add summary content
+    for(i in 1:length(data)) {
+      if(summary.pos[1] != "sub") {
+        text(x = summary.pos[1],
+             y = summary.pos[2],
+             adj = summary.adj,
+             labels = label.text[[i]],
+             col = summary.col[i],
+             family = layout$abanico$font.type$summary,
+             font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                            layout$abanico$font.deco$summary],
+             cex = cex * layout$abanico$font.size$summary / 12)
+      } else {
+        if(mtext == "") {
+          mtext(side = 3,
+                line = (shift.lines - 1 + 3.5 - i) *
+                  layout$abanico$dimension$summary / 100 ,
+                text = label.text[[i]],
+                col = summary.col[i],
+                family = layout$abanico$font.type$summary,
+                font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                               layout$abanico$font.deco$summary],
+                cex = cex * layout$abanico$font.size$summary / 12)
+        }
+      }
+    }
+  }
+
+  ##sTeve
+  if(fun & !interactive){sTeve()}
+
+  ## create numeric output
+  plot.output <- list(xlim = limits.x,
+                      ylim = limits.y,
+                      zlim = limits.z,
+                      polar.box = c(limits.x[1],
+                                    limits.x[2],
+                                    min(ellipse[,2]),
+                                    max(ellipse[,2])),
+                      cartesian.box = c(xy.0[1],
+                                        par()$usr[2],
+                                        xy.0[2],
+                                        max(ellipse[,2])),
+                      plot.ratio = plot.ratio,
+                      data = data,
+                      data.global = data.global,
+                      KDE = KDE,
+                      par = par(no.readonly = TRUE))
+
+  ## INTERACTIVE PLOT ----------------------------------------------------------
+  if (interactive) {
+    if (!requireNamespace("plotly", quietly = TRUE))
+      stop("The interactive abanico plot requires the 'plotly' package. To install",
+           " this package run 'install.packages('plotly')' in your R console.",
+           call. = FALSE)
+
+    ##cheat R check (global visible binding error)
+    x <- NA
+    y <- NA
+
+
+    ## tidy data ----
+    data <- plot.output
+    kde <- data.frame(x = data$KDE[[1]][ ,2], y = data$KDE[[1]][ ,1])
+
+    # radial scatter plot ----
+    point.text <- paste0("Measured value:</br>",
+                         data$data.global$De, " ± ", data$data.global$error,"</br>",
+                         "P(",format(data$data.global$precision,  digits = 2, nsmall = 1),", ",
+                         format(data$data.global$std.estimate,  digits = 2, nsmall = 1),")")
+
+    IAP <- plotly::plot_ly(data = data$data.global, x = precision, y = std.estimate,
+                           type = "scatter", mode = "markers",
+                           hoverinfo = "text", text = point.text,
+                           name = "Points",
+                           yaxis = "y")
+
+    ellipse <- as.data.frame(ellipse)
+    IAP <- plotly::add_trace(IAP, data = ellipse,
+                             x = ellipse.x, y = ellipse.y,
+                             hoverinfo = "none",
+                             name = "z-axis (left)",
+                             type = "scatter", mode = "lines",
+                             line = list(color = "black",
+                                         width = 1),
+                             yaxis = "y")
+
+    ellipse.right <- ellipse
+    ellipse.right$ellipse.x <- ellipse.right$ellipse.x * 1/0.75
+
+    IAP <- plotly::add_trace(IAP, data = ellipse.right,
+                             x = ellipse.x, y = ellipse.y,
+                             hoverinfo = "none",
+                             name = "z-axis (right)",
+                             type = "scatter", mode = "lines",
+                             line = list(color = "black",
+                                         width = 1),
+                             yaxis = "y")
+
+    # z-axis ticks
+    major.ticks.x <- c(data$xlim[2] * 1/0.75,
+                       (1 + 0.015 * layout$abanico$dimension$ztcl / 100) *
+                         data$xlim[2] * 1/0.75)
+    minor.ticks.x <- c(data$xlim[2] * 1/0.75,
+                       (1 + 0.01 * layout$abanico$dimension$ztcl / 100) *
+                         data$xlim[2] * 1/0.75)
+    major.ticks.y <- (tick.values.major - z.central.global) *  min(ellipse[ ,1])
+    minor.ticks.y <- (tick.values.minor - z.central.global) *  min(ellipse[ ,1])
+
+    # major z-tick lines
+    for (i in 1:length(major.ticks.y)) {
+      major.tick <- data.frame(x = major.ticks.x, y = rep(major.ticks.y[i], 2))
+      IAP <- plotly::add_trace(IAP, data = major.tick,
+                               x = x, y = y, showlegend = FALSE,
+                               hoverinfo = "none",
+                               type = "scatter", mode = "lines",
+                               line = list(color = "black",
+                                           width = 1),
+                               yaxis = "y")
+    }
+
+    # minor z-tick lines
+    for (i in 1:length(minor.ticks.y)) {
+      minor.tick <- data.frame(x = minor.ticks.x, y = rep(minor.ticks.y[i], 2))
+      IAP <- plotly::add_trace(IAP, data = minor.tick,
+                               hoverinfo = "none",
+                               x = x, y = y, showlegend = FALSE,
+                               type = "scatter", mode = "lines",
+                               line = list(color = "black",
+                                           width = 1),
+                               yaxis = "y")
+    }
+
+
+    # z-tick label
+    tick.text <- paste(" ", exp(tick.values.major))
+    tick.pos <- data.frame(x = major.ticks.x[2],
+                           y = major.ticks.y)
+
+    IAP <- plotly::add_trace(IAP, data = tick.pos,
+                             x = x, y = y, showlegend = FALSE,
+                             text = tick.text, textposition = "right",
+                             hoverinfo = "none",
+                             type = "scatter", mode = "text",
+                             yaxis = "y")
+
+    # Central Line ----
+    central.line <- data.frame(x = c(-100, data$xlim[2]*1/0.75), y = c(0, 0))
+    central.line.text <- paste0("Central value: ",
+                                format(exp(z.central.global), digits = 2, nsmall = 1))
+
+    IAP <- plotly::add_trace(IAP, data = central.line,
+                             x = x, y = y, name = "Central line",
+                             type = "scatter", mode = "lines",
+                             hoverinfo = "text", text = central.line.text,
+                             yaxis = "y",
+                             line = list(color = "black",
+                                         width = 0.5,
+                                         dash = 2))
+
+    # KDE plot ----
+    KDE.x <- xy.0[1] + KDE[[1]][ ,2] * KDE.scale
+    KDE.y <- (KDE[[1]][ ,1] - z.central.global) * min(ellipse[,1])
+    KDE.curve <- data.frame(x = KDE.x, y = KDE.y)
+    KDE.curve <- KDE.curve[KDE.curve$x != xy.0[1], ]
+
+    KDE.text <- paste0("Value:",
+                       format(exp(KDE[[1]][ ,1]), digits = 2, nsmall = 1), "</br>",
+                       "Density:",
+                       format(KDE[[1]][ ,2], digits = 2, nsmall = 1))
+
+    IAP <- plotly::add_trace(IAP, data = KDE.curve,
+                             hoverinfo = "text",
+                             text = KDE.text,
+                             x = x, y = y, name = "KDE",
+                             type = "scatter", mode = "lines",
+                             line = list(color = "red"),
+                             yaxis = "y")
+
+    # set layout ----
+    IAP <- plotly::layout(IAP,
+                          hovermode = "closest",
+                          dragmode = "pan",
+                          xaxis = list(range = c(data$xlim[1], data$xlim[2] * 1/0.65),
+                                       zeroline = FALSE,
+                                       showgrid = FALSE,
+                                       tickmode = "array",
+                                       tickvals = x.axis.ticks),
+                          yaxis = list(range = data$ylim,
+                                       zeroline = FALSE,
+                                       showline = FALSE,
+                                       showgrid = FALSE,
+                                       tickmode = "array",
+                                       tickvals = c(-2, 0, 2)),
+                          shapes = list(list(type = "rect", # 2 sigma bar
+                                             x0 = 0, y0 = -2,
+                                             x1 = bars[1,3], y1 = 2,
+                                             xref = "x", yref = "y",
+                                             fillcolor = "grey",
+                                             opacity = 0.2))
+
+    )
+
+    # show interactive plot ----
+    #print(plotly::subplot(IAP, IAP.kde))
+    print(IAP)
+  }
+
+  ## restore initial cex
+  par(cex = cex_old)
+
+  ## create and return numeric output
+  if(output == TRUE) {
+    return(plot.output)
+  }
+}
diff --git a/R/plot_DRTResults.R b/R/plot_DRTResults.R
new file mode 100644
index 0000000..658eb43
--- /dev/null
+++ b/R/plot_DRTResults.R
@@ -0,0 +1,826 @@
+#' Visualise dose recovery test results
+#'
+#' The function provides a standardised plot output for dose recovery test
+#' measurements.
+#'
+#' Procedure to test the accuracy of a measurement protocol to reliably
+#' determine the dose of a specific sample. Here, the natural signal is erased
+#' and a known laboratory dose administered which is treated as unknown. Then
+#' the De measurement is carried out and the degree of congruence between
+#' administered and recovered dose is a measure of the protocol's accuracy for
+#' this sample.\cr In the plot the normalised De is shown on the y-axis, i.e.
+#' obtained De/Given Dose.
+#'
+#' @param values \code{\linkS4class{RLum.Results}} or \code{\link{data.frame}},
+#' (\bold{required}): input values containing at least De and De error. To plot
+#' more than one data set in one figure, a \code{list} of the individual data
+#' sets must be provided (e.g. \code{list(dataset.1, dataset.2)}).
+#'
+#' @param given.dose \code{\link{numeric}} (optional): given dose used for the
+#' dose recovery test to normalise data. If only one given dose is provided
+#' this given dose is valid for all input data sets (i.e., \code{values} is a
+#' list).  Otherwise a given dose for each input data set has to be provided
+#' (e.g., \code{given.dose = c(100,200)}). If no \code{given.dose} values are
+#' plotted without normalisation (might be useful for preheat plateau tests).
+#' Note: Unit has to be the same as from the input values (e.g., Seconds or
+#' Gray).
+#'
+#' @param error.range \code{\link{numeric}}: symmetric error range in percent
+#' will be shown as dashed lines in the plot. Set \code{error.range} to 0 to
+#' void plotting of error ranges.
+#'
+#' @param preheat \code{\link{numeric}}: optional vector of preheat
+#' temperatures to be used for grouping the De values. If specified, the
+#' temperatures are assigned to the x-axis.
+#'
+#' @param boxplot \code{\link{logical}}: optionally plot values, that are
+#' grouped by preheat temperature as boxplots. Only possible when
+#' \code{preheat} vector is specified.
+#'
+#' @param mtext \code{\link{character}}: additional text below the plot title.
+#'
+#' @param summary \code{\link{character}} (optional): adds numerical output to
+#' the plot.  Can be one or more out of: \code{"n"} (number of samples),
+#' \code{"mean"} (mean De value), \code{"mean.weighted"} (error-weighted mean),
+#' \code{"median"} (median of the De values), \code{"sdrel"} (relative standard
+#' deviation in percent), \code{"sdabs"} (absolute standard deviation),
+#' \code{"serel"} (relative standard error) and \code{"seabs"} (absolute
+#' standard error).
+#'
+#' @param summary.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+#' specified to place the summary below the plot header. However, this latter
+#' option in only possible if \code{mtext} is not used.
+#'
+#' @param legend \code{\link{character}} vector (optional): legend content to
+#' be added to the plot.
+#'
+#' @param legend.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the legend to be plotted.
+#' @param par.local \code{\link{logical}} (with default): use local graphical
+#' parameters for plotting, e.g. the plot is shown in one column and one row.
+#' If \code{par.local = FALSE}, global parameters are inherited, i.e. parameters
+#' provided via \code{par()} work
+#' @param na.rm \code{\link{logical}}: indicating wether \code{NA} values are
+#' removed before plotting from the input data set
+#' @param \dots further arguments and graphical parameters passed to
+#' \code{\link{plot}}.
+#' @return A plot is returned.
+#'
+#' @note Further data and plot arguments can be added by using the appropiate R
+#' commands.
+#' @section Function version: 0.1.10
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France), Michael Dietze, GFZ Potsdam (Germany)
+#'
+#' @seealso \code{\link{plot}}
+#'
+#' @references Wintle, A.G., Murray, A.S., 2006. A review of quartz optically
+#' stimulated luminescence characteristics and their relevance in
+#' single-aliquot regeneration dating protocols. Radiation Measurements, 41,
+#' 369-391.
+#'
+#' @keywords dplot
+#'
+#' @examples
+#'
+#'
+#' ## read example data set and misapply them for this plot type
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## plot values
+#' plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+#' given.dose = 2800, mtext = "Example data")
+#'
+#' ## plot values with legend
+#' plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+#'                 given.dose = 2800,
+#'                 legend = "Test data set")
+#'
+#' ## create and plot two subsets with randomised values
+#' x.1 <- ExampleData.DeValues$BT998[7:11,]
+#' x.2 <- ExampleData.DeValues$BT998[7:11,] * c(runif(5, 0.9, 1.1), 1)
+#'
+#' plot_DRTResults(values = list(x.1, x.2),
+#'                 given.dose = 2800)
+#'
+#' ## some more user-defined plot parameters
+#' plot_DRTResults(values = list(x.1, x.2),
+#'                 given.dose = 2800,
+#'                 pch = c(2, 5),
+#'                 col = c("orange", "blue"),
+#'                 xlim = c(0, 8),
+#'                 ylim = c(0.85, 1.15),
+#'                 xlab = "Sample aliquot")
+#'
+#' ## plot the data with user-defined statistical measures as legend
+#' plot_DRTResults(values = list(x.1, x.2),
+#'                 given.dose = 2800,
+#'                 summary = c("n", "mean.weighted", "sd"))
+#'
+#' ## plot the data with user-defined statistical measures as sub-header
+#' plot_DRTResults(values = list(x.1, x.2),
+#'                 given.dose = 2800,
+#'                 summary = c("n", "mean.weighted", "sd"),
+#'                 summary.pos = "sub")
+#'
+#' ## plot the data grouped by preheat temperatures
+#' plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+#'                 given.dose = 2800,
+#'                 preheat = c(200, 200, 200, 240, 240))
+#' ## read example data set and misapply them for this plot type
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' ## plot values
+#' plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+#'                 given.dose = 2800, mtext = "Example data")
+#' ## plot two data sets grouped by preheat temperatures
+#' plot_DRTResults(values = list(x.1, x.2),
+#'                 given.dose = 2800,
+#'                 preheat = c(200, 200, 200, 240, 240))
+#'
+#' ## plot the data grouped by preheat temperatures as boxplots
+#' plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+#'                 given.dose = 2800,
+#'                 preheat = c(200, 200, 200, 240, 240),
+#'                 boxplot = TRUE)
+#'
+#' @export
+plot_DRTResults <- function(
+  values,
+  given.dose = NULL,
+  error.range = 10,
+  preheat,
+  boxplot = FALSE,
+  mtext,
+  summary,
+  summary.pos,
+  legend,
+  legend.pos,
+  par.local = TRUE,
+  na.rm  = FALSE,
+  ...
+){
+
+  ## Validity checks ----------------------------------------------------------
+
+  ##avoid crash for wrongly set boxlot argument
+  if(missing(preheat) & boxplot == TRUE){
+
+    warning("[plot_DRTResults()] Option 'boxplot' not valid without any value in 'preheat'. Reset to FALSE.")
+    boxplot  <- FALSE
+
+  }
+
+  if(missing(summary) == TRUE) {summary <- NULL}
+  if(missing(summary.pos) == TRUE) {summary.pos <- "topleft"}
+  if(missing(legend.pos) == TRUE) {legend.pos <- "topright"}
+  if(missing(mtext) == TRUE) {mtext <- ""}
+
+  ## Homogenise and check input data
+  if(is(values, "list") == FALSE) {values <- list(values)}
+
+  for(i in 1:length(values)) {
+    if(is(values[[i]], "RLum.Results")==FALSE &
+         is(values[[i]], "data.frame")==FALSE){
+      stop(paste("[plot_DRTResults()] Wrong input data format",
+                 "(!= 'data.frame' or 'RLum.Results')"))
+    } else {
+      if(is(values[[i]], "RLum.Results")==TRUE){
+        values[[i]] <- get_RLum(values[[i]])[,1:2]
+      }
+    }
+  }
+
+  ## Check input arguments ----------------------------------------------------
+  for(i in 1:length(values)) {
+
+    ##check for preheat temperature values
+    if(missing(preheat) == FALSE) {
+      if(length(preheat) != nrow(values[[i]])){
+        stop("[plot_DRTResults()] number of preheat temperatures != De values!")
+      }
+    }
+
+    ##remove NA values; yes Micha, it is not that simple
+    if(na.rm  == TRUE){
+
+      ##currently we assume that all input data sets comprise a similar of data
+      if(!missing(preheat) & i == length(values)){
+
+        ##find and mark NA value indicies
+        temp.NA.values <- unique(c(which(is.na(values[[i]][,1])), which(is.na(values[[i]][,2]))))
+
+        ##remove preheat entries
+        preheat <- preheat[-temp.NA.values]
+
+      }
+
+      values[[i]] <- na.exclude(values[[i]])
+
+    }
+  }
+
+  ## create global data set
+  values.global <- NULL
+  n.values <- NULL
+  for(i in 1:length(values)) {
+    values.global <- rbind(values.global, values[[i]])
+    n.values <- c(n.values, nrow(values[[i]]))
+  }
+
+  ## Set plot format parameters -----------------------------------------------
+  extraArgs <- list(...) # read out additional arguments list
+
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {"Dose recovery test"}
+
+  xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else {
+    ifelse(missing(preheat) == TRUE, "# Aliquot", "Preheat temperature [\u00B0C]")
+  }
+
+  ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
+  {if(!is.null(given.dose)){
+    expression(paste("Normalised ", D[e], sep=""))
+  }else{expression(paste(D[e], " [s]"), sep = "")}}
+
+  xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else
+  {c(1, max(n.values) + 1)}
+
+  ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else
+  {c(0.75, 1.25)} #check below for further corrections if boundaries exceed set range
+
+  cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else {1}
+
+  pch <- if("pch" %in% names(extraArgs)) {extraArgs$pch} else {
+    abs(seq(from = 20, to = -100))
+  }
+
+  fun <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE}
+
+  ## calculations and settings-------------------------------------------------
+
+  ## normalise data if given.dose is given
+  if(!is.null(given.dose)){
+
+    if(length(given.dose) > 1){
+
+      if(length(values) < length(given.dose)){
+
+        stop("[plot_DRTResults()] 'given.dose' > number of input data sets!")
+
+      }
+
+      for(i in 1:length(values)) {
+        values[[i]] <- values[[i]]/given.dose[i]
+      }
+
+    }else{
+
+      for(i in 1:length(values)) {
+        values[[i]] <- values[[i]]/given.dose
+      }
+
+    }
+  }
+
+  ##correct ylim for data set which exceed boundaries
+  if((max(sapply(1:length(values), function(x){max(values[[x]][,1], na.rm = TRUE)}))>1.25 |
+        min(sapply(1:length(values), function(x){min(values[[x]][,1], na.rm = TRUE)}))<0.75) &
+       ("ylim" %in% names(extraArgs)) == FALSE){
+
+    ylim <- c(
+      min(sapply(1:length(values), function(x){
+        min(values[[x]][,1], na.rm = TRUE) - max(values[[x]][,2], na.rm = TRUE)})),
+      max(sapply(1:length(values), function(x){
+        max(values[[x]][,1], na.rm = TRUE) + max(values[[x]][,2], na.rm = TRUE)})))
+
+  }
+
+
+  ## optionally group data by preheat temperature
+  if(missing(preheat) == FALSE) {
+    modes <- as.numeric(rownames(as.matrix(table(preheat))))
+    values.preheat <- list(NA)
+    values.boxplot <- list(NA)
+    for(i in 1:length(modes)) {
+      for(j in 1:length(values)) {
+        values.preheat[[length(values.preheat) + 1]] <-
+          cbind(values[[j]][preheat == modes[i],],
+                preheat[preheat == modes[i]])
+        values.boxplot[[length(values.boxplot) + 1]] <-
+          values[[j]][preheat == modes[i],1]
+      }
+      j <- 1
+    }
+    values.preheat[[1]] <- NULL
+    values.boxplot[[1]] <- NULL
+    modes.plot <- rep(modes, each = length(values))
+  } else {modes <- 1}
+
+  ## assign colour indices
+  col <- if("col" %in% names(extraArgs)) {extraArgs$col} else {
+    if(missing(preheat) == TRUE) {
+      rep(seq(from = 1, to = length(values)), each = length(modes))
+    } else {
+      rep(seq(from = 1, to = length(values)), length(modes))
+    }
+  }
+
+  ## calculate and paste statistical summary
+  label.text = list(NA)
+
+  if(summary.pos[1] != "sub") {
+    n.rows <- length(summary)
+
+    for(i in 1:length(values)) {
+      stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "")
+      label.text[[length(label.text) + 1]] <- paste(stops, paste(
+        ifelse("n" %in% summary == TRUE,
+               paste("n = ",
+                     nrow(values[[i]]),
+                     "\n",
+                     sep = ""),
+               ""),
+        ifelse("mean" %in% summary == TRUE,
+               paste("mean = ",
+                     round(mean(values[[i]][,1]), 2),
+                     "\n",
+                     sep = ""),
+               ""),
+        ifelse("mean.weighted" %in% summary == TRUE,
+               paste("weighted mean = ",
+                     round(weighted.mean(x = values[[i]][,1],
+                                         w = 1 / values[[i]][,2]), 2),
+                     "\n",
+                     sep = ""),
+               ""),
+        ifelse("median" %in% summary == TRUE,
+               paste("median = ",
+                     round(median(values[[i]][,1]), 2),
+                     "\n",
+                     sep = ""),
+               ""),
+        ifelse("sdrel" %in% summary == TRUE,
+               paste("sd = ",
+                     round(sd(values[[i]][,1]) / mean(values[[i]][,1]) * 100,
+                           2), " %",
+                     "\n",
+                     sep = ""),
+               ""),
+        ifelse("sdabs" %in% summary == TRUE,
+               paste("sd = ",
+                     round(sd(values[[i]][,1]), 2),
+                     "\n",
+                     sep = ""),
+               ""),
+        ifelse("serel" %in% summary == TRUE,
+               paste("se = ",
+                     round(calc_Statistics(values[[i]])$unweighted$se.rel, 2),
+                     " %\n",
+                     sep = ""),
+               ""),
+        ifelse("seabs" %in% summary == TRUE,
+               paste("se = ",
+                     round(calc_Statistics(values[[i]])$unweighted$se.abs, 2),
+                     "\n",
+                     sep = ""),
+               ""),
+        sep = ""), stops, sep = "")
+
+    }
+  } else {
+    for(i in 1:length(values)) {
+      label.text[[length(label.text) + 1]]  <- paste(
+        "| ",
+        ifelse("n" %in% summary == TRUE,
+               paste("n = ",
+                     nrow(values[[i]]),
+                     " | ",
+                     sep = ""),
+               ""),
+        ifelse("mean" %in% summary == TRUE,
+               paste("mean = ",
+                     round(mean(values[[i]][,1]), 2),
+                     " | ",
+                     sep = ""),
+               ""),
+        ifelse("mean.weighted" %in% summary == TRUE,
+               paste("weighted mean = ",
+                     round(weighted.mean(x = values[[i]][,1],
+                                         w = 1 / values[[i]][,2]), 2),
+                     " | ",
+                     sep = ""),
+               ""),
+        ifelse("median" %in% summary == TRUE,
+               paste("median = ",
+                     round(median(values[[i]][,1]), 2),
+                     " | ",
+                     sep = ""),
+               ""),
+        ifelse("sdrel" %in% summary == TRUE,
+               paste("sd = ",
+                     round(sd(values[[i]][,1]) / mean(values[[i]][,1]) * 100,
+                           2), " %",
+                     " | ",
+                     sep = ""),
+               ""),
+        ifelse("sdabs" %in% summary == TRUE,
+               paste("sd = ",
+                     round(sd(values[[i]][,1]), 2),
+                     " | ",
+                     sep = ""),
+               ""),
+        ifelse("serel" %in% summary == TRUE,
+               paste("se = ",
+                     round(calc_Statistics(values[[i]])$unweighted$se.rel, 2),
+                     " % | ",
+                     sep = ""),
+               ""),
+        ifelse("seabs" %in% summary == TRUE,
+               paste("se = ",
+                     round(calc_Statistics(values[[i]])$unweighted$se.abs, 2),
+                     " | ",
+                     sep = ""),
+               ""),
+        sep = "")
+
+    }
+  }
+
+  ## remove dummy list element
+  label.text[[1]] <- NULL
+
+  ## convert keywords into summary placement coordinates
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- c(xlim[1], ylim[2])
+    summary.adj <- c(0, 1)
+  } else if(length(summary.pos) == 2) {
+    summary.pos <- summary.pos
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "topleft") {
+    summary.pos <- c(xlim[1], ylim[2])
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "top") {
+    summary.pos <- c(mean(xlim), ylim[2])
+    summary.adj <- c(0.5, 1)
+  } else if(summary.pos[1] == "topright") {
+    summary.pos <- c(xlim[2], ylim[2])
+    summary.adj <- c(1, 1)
+  }  else if(summary.pos[1] == "left") {
+    summary.pos <- c(xlim[1], mean(ylim))
+    summary.adj <- c(0, 0.5)
+  } else if(summary.pos[1] == "center") {
+    summary.pos <- c(mean(xlim), mean(ylim))
+    summary.adj <- c(0.5, 0.5)
+  } else if(summary.pos[1] == "right") {
+    summary.pos <- c(xlim[2], mean(ylim))
+    summary.adj <- c(1, 0.5)
+  }else if(summary.pos[1] == "bottomleft") {
+    summary.pos <- c(xlim[1], ylim[1])
+    summary.adj <- c(0, 0)
+  } else if(summary.pos[1] == "bottom") {
+    summary.pos <- c(mean(xlim), ylim[1])
+    summary.adj <- c(0.5, 0)
+  } else if(summary.pos[1] == "bottomright") {
+    summary.pos <- c(xlim[2], ylim[1])
+    summary.adj <- c(1, 0)
+  }
+
+  ## convert keywords into legend placement coordinates
+  if(missing(legend.pos) == TRUE) {
+    legend.pos <- c(xlim[2], ylim[2])
+    legend.adj <- c(1, 1)
+  } else if(length(legend.pos) == 2) {
+    legend.pos <- legend.pos
+    legend.adj <- c(0, 1)
+  } else if(legend.pos[1] == "topleft") {
+    legend.pos <- c(xlim[1], ylim[2])
+    legend.adj <- c(0, 1)
+  } else if(legend.pos[1] == "top") {
+    legend.pos <- c(mean(xlim), ylim[2])
+    legend.adj <- c(0.5, 1)
+  } else if(legend.pos[1] == "topright") {
+    legend.pos <- c(xlim[2], ylim[2])
+    legend.adj <- c(1, 1)
+  } else if(legend.pos[1] == "left") {
+    legend.pos <- c(xlim[1], mean(ylim))
+    legend.adj <- c(0, 0.5)
+  } else if(legend.pos[1] == "center") {
+    legend.pos <- c(mean(xlim), mean(ylim))
+    legend.adj <- c(0.5, 0.5)
+  } else if(legend.pos[1] == "right") {
+    legend.pos <- c(xlim[2], mean(ylim))
+    legend.adj <- c(1, 0.5)
+  } else if(legend.pos[1] == "bottomleft") {
+    legend.pos <- c(xlim[1], ylim[1])
+    legend.adj <- c(0, 0)
+  } else if(legend.pos[1] == "bottom") {
+    legend.pos <- c(mean(xlim), ylim[1])
+    legend.adj <- c(0.5, 0)
+  } else if(legend.pos[1] == "bottomright") {
+    legend.pos <- c(xlim[2], ylim[1])
+    legend.adj <- c(1, 0)
+  }
+
+  ## Plot output --------------------------------------------------------------
+
+  ## determine number of subheader lines to shif the plot
+  shift.lines <- if(summary.pos[1] == "sub" & mtext == "") {
+    length(label.text) - 1
+  } else {1}
+
+  ## setup plot area
+  if(par.local){
+
+    if (shift.lines <= 0)
+      shift.lines <- 1
+    par.default <- par()[c("mfrow", "cex", "oma")]
+    par(mfrow = c(1, 1), cex = cex, oma = c(0, 1, shift.lines - 1, 1))
+  }
+
+  ## optionally plot values and error bars
+  if(boxplot == FALSE) {
+    ## plot data and error
+    if(missing(preheat) == TRUE) {
+      ## create empty plot
+      plot(NA,NA,
+           xlim = xlim,
+           ylim = ylim,
+           xlab = xlab,
+           ylab = ylab,
+           xaxt = "n",
+           main = "")
+
+      ##add x-axis ... this avoids digits in the axis labeling
+      axis(side = 1, at = 1:(nrow(values[[1]])+1), labels = 1:(nrow(values[[1]])+1))
+
+      ## add title
+      title(main = main,
+            line = shift.lines + 2)
+
+      ## add additional lines
+      if (!is.null(given.dose)) {
+        abline(h = 1)
+
+        if (error.range > 0) {
+          ## error range lines
+          abline(h = 1 * (1 + error.range / 100), lty = 2)
+          abline(h = 1 * (1 - error.range / 100), lty = 2)
+
+          ## error range labels
+          text(
+            par()$usr[2],
+            (1 + error.range / 100) + 0.02,
+            paste("+", error.range , " %", sep = ""),
+            pos = 2,
+            cex = 0.8
+          )
+          text(
+            par()$usr[2],
+            (1 - error.range / 100) - 0.02,
+            paste("-", error.range , "%", sep = ""),
+            pos = 2,
+            cex = 0.8
+          )
+        }
+
+      }
+
+      ## add data and error bars
+      for(i in 1:length(values)) {
+
+        points(x = c(1:nrow(values[[i]])),
+               y = values[[i]][,1],
+               pch = if(nrow(values[[i]]) == length(pch)){ pch } else { pch[i] },
+               col = if(nrow(values[[i]]) == length(col)){ col } else { col[i] },
+               cex = 1.2 * cex)
+
+        arrows(c(1:nrow(values[[i]])),
+               values[[i]][,1] + values[[i]][,2],
+               c(1:nrow(values[[i]])),
+               values[[i]][,1] - values[[i]][,2],
+               angle = 90,
+               length = 0.075,
+               code = 3,
+               col = if(nrow(values[[i]]) == length(col)){ col } else { col[i] })
+
+        ## add summary content
+        if(summary.pos[1] != "sub") {
+          text(x = summary.pos[1],
+               y = summary.pos[2],
+               adj = summary.adj,
+               labels = label.text[[i]],
+               cex = 0.8 * cex,
+               col = col[i])
+        } else {
+          if(mtext == "") {
+            mtext(side = 3,
+                  line = - i + 2.5,
+                  text = label.text[[i]],
+                  col = col[i],
+                  cex = cex * 0.8)
+          }
+        }
+      }
+    } else {
+
+      ## option for provided preheat data
+      ## create empty plot
+      plot(NA,NA,
+           xlim = c(min(modes.plot) * 0.9, max(modes.plot) * 1.1),
+           ylim = ylim,
+           xlab = xlab,
+           ylab = ylab,
+           main = "",
+           axes = FALSE,
+           frame.plot = TRUE)
+
+      ## add axes
+      axis(1,
+           at = modes.plot,
+           labels = modes.plot)
+      axis(2)
+
+      ## add title
+      title(main = main,
+            line = shift.lines + 2)
+
+      ## add additional lines
+      if (!is.null(given.dose)) {
+        abline(h = 1)
+
+        if (error.range > 0) {
+          ## error range lines
+          abline(h = 1 * (1 + error.range / 100), lty = 2)
+          abline(h = 1 * (1 - error.range / 100), lty = 2)
+
+          ## error range labels
+          text(
+            par()$usr[2],
+            (1 + error.range / 100) + 0.02,
+            paste("+", error.range , " %", sep = ""),
+            pos = 2,
+            cex = 0.8
+          )
+          text(
+            par()$usr[2],
+            (1 - error.range / 100) - 0.02,
+            paste("-", error.range , "%", sep = ""),
+            pos = 2,
+            cex = 0.8
+          )
+        }
+      }
+
+      ## plot values
+      for(i in 1:length(values.preheat)) {
+        points(x = values.preheat[[i]][,3],
+               y = values.preheat[[i]][,1],
+               pch = pch[i],
+               col = col[i],
+               cex = 1.2 * cex)
+
+        arrows(values.preheat[[i]][,3],
+               values.preheat[[i]][,1] + values.preheat[[i]][,2],
+               values.preheat[[i]][,3],
+               values.preheat[[i]][,1] - values.preheat[[i]][,2],
+               angle = 90,
+               length = 0.075,
+               code = 3,
+               col = col[i])
+      }
+    }
+  }
+
+  ## optionally, plot boxplot
+  if(boxplot == TRUE) {
+    ## create empty plot
+    boxplot(values.boxplot,
+            names = modes.plot,
+            ylim = ylim,
+            xlab = xlab,
+            ylab = ylab,
+            xaxt = "n",
+            main = "",
+            border = col)
+
+    ## add axis label, if necessary
+    if (length(modes.plot) == 1) {
+      axis(side = 1, at = 1, labels = modes.plot)
+
+    } else if (length(modes.plot) > length(unique(modes.plot))){
+
+      ticks <- seq(from = 1 + ((length(values.boxplot)/length(unique(modes.plot)) - 1)/2),
+                   to = length(values.boxplot),
+                   by = length(values.boxplot)/length(unique(modes.plot)))
+
+      axis(
+        side = 1,
+        at = ticks,
+        labels = unique(modes.plot)
+      )
+
+      ##polygon for a better graphical representation of the groups
+      polygon.x <- seq(
+        1,length(values.boxplot),
+        by = length(values.boxplot) / length(unique(modes.plot))
+      )
+
+      polygon.step <- unique(diff(polygon.x) - 1)
+
+      for (x.plyg in polygon.x) {
+        polygon(
+          x = c(x.plyg,x.plyg,x.plyg + polygon.step, x.plyg + polygon.step),
+          y = c(
+            par()$usr[3],
+            ylim[1] - (ylim[1] - par()$usr[3]) / 2,
+            ylim[1] - (ylim[1] - par()$usr[3]) / 2,
+            par()$usr[3]
+          ),
+          col = "grey",
+          border = "grey"
+
+        )
+
+      }
+
+    }else{
+
+      axis(side = 1, at = 1:length(unique(modes.plot)), labels = unique(modes.plot))
+
+    }
+
+    ## add title
+    title(main = main,
+          line = shift.lines + 2)
+
+    ## add additional lines
+    abline(h = 1)
+
+    if(error.range > 0){
+      ## error range lines
+      abline(h = 1 * (1 + error.range / 100), lty = 2)
+      abline(h = 1 * (1 - error.range / 100), lty = 2)
+
+      ## error range labels
+      text(par()$usr[2], (1 + error.range / 100) + 0.02,
+           paste("+", error.range ," %", sep = ""), pos = 2, cex = 0.8)
+      text(par()$usr[2], (1 - error.range / 100) - 0.02,
+           paste("-", error.range ,"%", sep = ""), pos = 2, cex = 0.8)
+    }
+
+    ## plot data and error
+    for(i in 1:length(values)) {
+      ## add summary content
+      if(summary.pos[1] != "sub") {
+        text(x = summary.pos[1],
+             y = summary.pos[2],
+             adj = summary.adj,
+             labels = label.text[[i]],
+             cex = 0.8 * cex,
+             col = col[i])
+      } else {
+        if(mtext == "") {
+          mtext(side = 3,
+                line = - i + 2.5,
+                text = label.text[[i]],
+                col = col[i],
+                cex = cex * 0.8)
+        }
+      }
+    }
+  }
+
+  ## optionally add legend content
+  if(missing(legend) == FALSE) {
+    legend(x = legend.pos[1],
+           y = legend.pos[2],
+           xjust = legend.adj[1],
+           yjust = legend.adj[2],
+           legend = legend,
+           col = col,
+           pch = pch,
+           lty = 1,
+           cex = cex * 0.8)
+  }
+
+  ## optionally add subheader text
+  mtext(side = 3,
+        line = shift.lines,
+        text = mtext,
+        cex = 0.8 * cex)
+
+  ##reset par()
+  if(par.local){
+    par(par.default)
+    rm(par.default)
+  }
+
+  ##FUN by R Luminescence Team
+  if(fun == TRUE) {sTeve()}
+
+}
diff --git a/R/plot_DetPlot.R b/R/plot_DetPlot.R
new file mode 100644
index 0000000..040dece
--- /dev/null
+++ b/R/plot_DetPlot.R
@@ -0,0 +1,347 @@
+#' Create De(t) plot
+#'
+#' Plots the equivalent dose (De) in dependency of the chosen signal integral (cf. Bailey et al., 2003).
+#' The function is simply passing several arguments to the function \code{\link{plot}} and the used
+#' analysis functions and runs it in a loop. Example: \code{legend.pos} for legend position,
+#' \code{legend} for legend text.\cr
+#'
+#' \bold{method}\cr
+#'
+#' The original method presented by Baiely et al., 2003 shifted the signal integrals and slightly
+#' extended them accounting for changes in the counting statistics. Example: \code{c(1:3, 3:5, 5:7)}.
+#' However, here also another method is provided allowing to expand the signal integral by
+#' consectutively expaning the integral by its chosen length. Example: \code{c(1:3, 1:5, 1:7)}
+#'
+#' Note that in both cases the integral limits are overlap. The finally applied limits are part
+#' of the function output.\cr
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} (\bold{required}): input
+#' object containing data for analysis
+#'
+#' @param signal.integral.min \code{\link{integer}} (\bold{required}): lower
+#' bound of the signal integral.
+#'
+#' @param signal.integral.max \code{\link{integer}} (\bold{required}): upper
+#' bound of the signal integral.
+#'
+#' @param background.integral.min \code{\link{integer}} (\bold{required}):
+#' lower bound of the background integral.
+#'
+#' @param background.integral.max \code{\link{integer}} (\bold{required}):
+#' upper bound of the background integral.
+#'
+#' @param method \code{\link{character}} (with default): method applied for constructing the De(t) plot.
+#' \code{shift} (the default): the chosen signal integral is shifted the shine down curve,
+#' \code{expansion}: the chosen signal integral is expanded each time by its length
+#'
+#' @param signal_integral.seq \code{\link{numeric}} (optional): argument to provide an own
+#' signal integral sequence for constructing the De(t) plot
+#'
+#' @param analyse_function \code{\link{character}} (with default): name of the analyse function
+#' to be called. Supported functions are: \code{'analyse_SAR.CWOSL'}, \code{'analyse_pIRIRSequence'}
+#'
+#' @param analyse_function.control \code{\link{list}} (optional): arguments to be passed to the
+#' supported analyse functions (\code{'analyse_SAR.CWOSL'}, \code{'analyse_pIRIRSequence'})
+#'
+#' @param n.channels \code{\link{integer}} (optional): number of channels used for the De(t) plot.
+#' If nothing is provided all De-values are calculated and plotted until the start of the background
+#' integral.
+#'
+#' @param show_ShineDownCurve  \code{\link{logical}} (with default): enables or disables shine down
+#' curve in the plot output
+#'
+#' @param respect_RC.Status \code{\link{logical} (with default)}: remove De-values with 'FAILED' RC.Status
+#' from the plot (cf. \code{\link{analyse_SAR.CWOSL}} and \code{\link{analyse_pIRIRSequence}})
+#'
+#' @param verbose \code{\link{logical} (with default)}: enables or disables terminal feedback
+#'
+#' @param \dots further arguments and graphical parameters passed to
+#' \code{\link{plot.default}}, \code{\link{analyse_SAR.CWOSL}} and \code{\link{analyse_pIRIRSequence}}.
+#' See details for further information.
+#'
+#' @return A plot and an \code{\linkS4class{RLum.Results}} object with the produced De values
+#'
+#' \code{@data}:
+#' \tabular{lll}{
+#' \bold{Object} \tab \bold{Type} \tab \bold{Description}\cr
+#' De.values \tab \code{data.frame} \tab table with De values \cr
+#' signal_integral.seq \tab \code{numeric} \tab integral sequence used for the calculation
+#' }
+#'
+#' \code{@info}:
+#'
+#' \tabular{lll}{
+#' \bold{Object} \tab \bold{Type} \tab \bold{Description}\cr
+#' call \tab \code{call} \tab the original function call
+#' }
+#'
+#'
+#'
+#' @note The entire analysis is based on the used analysis functions, namely
+#' \code{\link{analyse_SAR.CWOSL}} and \code{\link{analyse_pIRIRSequence}}. However, the integrity
+#' checks of this function are not that thoughtful as in these functions itself. It means, that
+#' every sequence should be checked carefully before running long calculations using serveral
+#' hundreds of channels.
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @references
+#'
+#' Bailey, R.M., Singarayer, J.S., Ward, S., Stokes, S., 2003. Identification of partial resetting
+#' using De as a function of illumination time. Radiation Measurements 37, 511-518.
+#' doi:10.1016/S1350-4487(03)00063-5
+#'
+#' @seealso \code{\link{plot}}, \code{\link{analyse_SAR.CWOSL}}, \code{\link{analyse_pIRIRSequence}}
+#'
+#' @examples
+#'
+#' \dontrun{
+#' ##load data
+#' ##ExampleData.BINfileData contains two BINfileData objects
+#' ##CWOSL.SAR.Data and TL.SAR.Data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##transform the values from the first position in a RLum.Analysis object
+#' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+#'
+#' plot_DetPlot(object,
+#'              signal.integral.min = 1,
+#'              signal.integral.max = 3,
+#'              background.integral.min = 900,
+#'              background.integral.max = 1000,
+#'              n.channels = 5,
+#' )
+#' }
+#'
+#' @export
+plot_DetPlot <- function(
+  object,
+  signal.integral.min,
+  signal.integral.max,
+  background.integral.min,
+  background.integral.max,
+  method = "shift",
+  signal_integral.seq = NULL,
+  analyse_function = "analyse_SAR.CWOSL",
+  analyse_function.control = list(),
+  n.channels = NULL,
+  show_ShineDownCurve = TRUE,
+  respect_RC.Status = FALSE,
+  verbose = TRUE,
+  ...
+) {
+
+
+# Integrity Tests -----------------------------------------------------------------------------
+
+  ##get structure
+  object.structure <- structure_RLum(object)
+
+
+# Set parameters ------------------------------------------------------------------------------
+
+  ##set n.channels
+  if(is.null(n.channels)){
+
+    n.channels <- ceiling(
+      (background.integral.min - 1 - signal.integral.max) / (signal.integral.max - signal.integral.min)
+    )
+
+  }
+
+  analyse_function.settings <- list(
+     sequence.structure = c("TL", "IR50", "pIRIR225"),
+     dose.points = NULL,
+     mtext.outer = "",
+     plot = FALSE,
+     plot.single = FALSE
+  )
+
+  analyse_function.settings <- modifyList(analyse_function.settings, analyse_function.control)
+
+
+# Analyse -------------------------------------------------------------------------------------
+
+  ##set integral sequence
+  if (is.null(signal_integral.seq)) {
+    signal_integral.seq <-
+      seq(signal.integral.min,
+          background.integral.min - 1,
+          by = signal.integral.max - signal.integral.min)
+  }
+
+
+  if(analyse_function  == "analyse_SAR.CWOSL"){
+
+    results <- merge_RLum(lapply(1:n.channels, function(x){
+      analyse_SAR.CWOSL(
+        object = object,
+        signal.integral.min = if(method == "shift"){signal_integral.seq[x]}else{signal_integral.seq[1]},
+        signal.integral.max =  signal_integral.seq[x+1],
+        background.integral.min = background.integral.min,
+        background.integral.max = background.integral.max,
+        dose.points = analyse_function.settings$dose.points,
+        mtext.outer = analyse_function.settings$mtext.outer,
+        plot = analyse_function.settings$plot,
+        plot.single = analyse_function.settings$plot.single,
+        verbose = verbose
+
+      )
+
+    }))
+
+
+  }
+  else if(analyse_function  == "analyse_pIRIRSequence"){
+
+    results <- merge_RLum(lapply(1:n.channels, function(x){
+      analyse_pIRIRSequence(
+        object = object,
+        signal.integral.min = if(method == "shift"){signal_integral.seq[x]}else{signal_integral.seq[1]},
+        signal.integral.max = signal_integral.seq[x+1],
+        background.integral.min = background.integral.min,
+        background.integral.max = background.integral.max,
+        dose.points = analyse_function.settings$dose.points,
+        mtext.outer = analyse_function.settings$mtext.outer,
+        plot = analyse_function.settings$plot,
+        plot.single = analyse_function.settings$plot.single,
+        sequence.structure = analyse_function.settings$sequence.structure,
+        verbose = verbose
+
+      )
+
+    }))
+
+
+
+  }
+  else{
+   stop("[plot_DetPlot()] 'analyse_function' unknown!")
+
+  }
+
+
+# Plot ----------------------------------------------------------------------------------------
+
+  ##get De results
+  if(analyse_function == "analyse_pIRIRSequence"){
+    pIRIR_signals <- unique(get_RLum(results)$Signal)
+
+  }else{
+    pIRIR_signals <- NA
+
+  }
+
+  ##run this in a loop to account for pIRIR data
+  df_final <- lapply(1:length(pIRIR_signals), function(i){
+
+    ##get data.frame
+    df <- get_RLum(results)
+
+    ##further limit
+    if(!is.na(pIRIR_signals[1])){
+      df <- df[df$Signal == pIRIR_signals[i],]
+
+    }
+
+    ##add shine down curve, which is by definition the first IRSL/OSL curve
+    ##and normalise on the highest De value
+    OSL_curve <-
+      as(get_RLum(object, recordType = "SL")[[i]], "matrix")
+
+    ##limit to what we see
+    OSL_curve <- OSL_curve[1:signal_integral.seq[n.channels + 1],]
+
+    m <-
+      ((min(df$De, na.rm = TRUE) - max(df$De.Error, na.rm = TRUE)) - (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE))) / (min(OSL_curve[, 2], na.rm = TRUE) - max(OSL_curve[, 2], na.rm = TRUE))
+    n <- (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE)) - m * max(OSL_curve[, 2])
+
+    OSL_curve[, 2] <- m * OSL_curve[, 2] + n
+    rm(n, m)
+
+    ##set plot settings
+    plot.settings <- list(
+      ylim = c((min(df$De, na.rm = TRUE) - max(df$De.Error, na.rm = TRUE)),
+               (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE))),
+      xlim = c(min(OSL_curve[, 1]), max(OSL_curve[, 1])),
+      ylab = expression(paste(D[e] / s, " and ", L[n]/(a.u.))),
+      xlab = "Stimulation time [s]",
+      main = "De(t) plot",
+      pch = 1,
+      mtext = ifelse(is.na(pIRIR_signals[1]), "", paste0("Signal: ",pIRIR_signals[i])),
+      cex = 1,
+      legend = c(expression(L[n]-signal), expression(D[e])),
+      legend.pos = "bottomleft"
+    )
+    plot.settings <- modifyList(plot.settings, list(...))
+
+    ##general settings
+    par(cex = plot.settings$cex)
+
+    ##open plot area
+    plot(
+      NA,
+      NA,
+      xlim = plot.settings$xlim,
+      ylim = plot.settings$ylim,
+      xlab = plot.settings$xlab,
+      ylab = plot.settings$ylab,
+      main = plot.settings$main
+    )
+
+    if (show_ShineDownCurve) {
+      lines(OSL_curve, type = "b", pch = 20)
+    }
+
+    ##set x-axis
+    df_x <-
+      OSL_curve[seq(signal.integral.max, signal_integral.seq[n.channels+1], length.out = nrow(df)),1]
+
+    #combine everything to allow excluding unwanted values
+    df_final <- cbind(df, df_x)
+
+    if (respect_RC.Status) {
+      df_final <- df_final[df_final$RC.Status != "FAILED", ]
+
+    }
+
+
+    ##TodDo:color failed points red
+    ##plot points and error bars
+    points(df_final[, c("df_x", "De")], pch = plot.settings$pch)
+    segments(
+      x0 = df_final$df_x,
+      y0 = df_final$De + df_final$De.Error,
+      x1 = df_final$df_x,
+      y1 = df_final$De - df_final$De.Error
+    )
+
+    ##set mtext
+    mtext(side = 3, plot.settings$mtext)
+
+    ##legend
+    legend(
+      plot.settings$legend.pos,
+      legend = plot.settings$legend,
+      pch = c(plot.settings$pch, 20),
+      bty = "n"
+    )
+
+    ##set return
+    return(df_final)
+
+  })
+
+  ##merge results
+  return(set_RLum(
+    class = "RLum.Results",
+    data = list(
+      De.values = as.data.frame(data.table::rbindlist(df_final)),
+      signal_integral.seq = signal_integral.seq
+      ),
+    info = list(call = sys.call())
+  ))
+
+}
diff --git a/R/plot_FilterCombinations.R b/R/plot_FilterCombinations.R
new file mode 100644
index 0000000..029caba
--- /dev/null
+++ b/R/plot_FilterCombinations.R
@@ -0,0 +1,297 @@
+#' Plot filter combinations along with the (optional) net transmission window
+#'
+#' The function allows to plot transmission windows for different filters. Missing data for specific
+#' wavelenghts are automatically interpolated for the given filter data using the function \code{\link{approx}}.
+#' With that a standardised output is reached and a net transmission window can be shown.\cr
+#'
+#' \bold{How to provide input data?}\cr
+#'
+#' CASE 1\cr
+#'
+#' The function expects that all filter values are either of type \code{matrix} or \code{data.frame}
+#' with two columns. The first columens contains the wavelength, the second the relative transmission
+#' (but not in percentage, i.e. the maximum transmission can be only become 1).
+#'
+#' In this case only the transmission window is show as provided. Changes in filter thickness and
+#' relection factor are not considered. \cr
+#'
+#' CASE 2\cr
+#'
+#' The filter data itself are provided as list element containing a \code{matrix} or \code{data.frame}
+#' and additional information on the thickness of the filter, e.g., \code{list(filter1 = list(filter_matrix, d = 2))}.
+#' The given filter data are always considered as standard input and the filter thickness value
+#' is taken into account by
+#'
+#' \deqn{Transmission = Transmission^(d)}
+#'
+#' with d given in the same dimension as the original filter data.\cr
+#'
+#' CASE 3\cr
+#'
+#' Same as CASE 2 but additionally a reflection factor P is provided, e.g.,
+#' \code{list(filter1 = list(filter_matrix, d = 2, P = 0.9))}. The final transmission
+#' becomes:
+#'
+#' \deqn{Transmission = Transmission^(d) * P}\cr
+#'
+#' \bold{Advanced plotting parameters}\cr
+#'
+#' The following further non-common plotting parameters can be passed to the function:\cr
+#'
+#' \tabular{lll}{
+#' \bold{Argument} \tab \bold{Datatype} \tab \bold{Description}\cr
+#' \code{legend} \tab \code{logical} \tab enable/disable legend \cr
+#' \code{legend.pos} \tab \code{character} \tab change legend position (\code{\link[graphics]{legend}}) \cr
+#' \code{legend.text} \tab \code{character} \tab same as the argument \code{legend} in (\code{\link[graphics]{legend}}) \cr
+#' \code{net_transmission.col} \tab \code{col} \tab colour of net transmission window polygon \cr
+#' \code{grid} \tab \code{list} \tab full list of arguments that can be passd to the function \code{\link[graphics]{grid}}
+#' }
+#'
+#' For further modifications standard additional R plot functions are recommend, e.g., the legend
+#' can be fully customised by disabling the standard legend and use the function \code{\link[graphics]{legend}}
+#' instead.
+#'
+#'
+#' @param filters \code{\link{list}} (\bold{required}): a named list of filter data for each filter to be shown.
+#' The filter data itself should be either provided as \code{\link{data.frame}} or \code{\link{matrix}}.
+#' (for more options s. Details)
+#'
+#' @param wavelength_range \code{\link{numeric}} (with default): wavelength range used for the interpolation
+#'
+#' @param show_net_transmission \code{\link{logical}} (with default): show net transmission window
+#' as polygon.
+#'
+#' @param plot \code{\link{logical}} (with default): enables or disables the plot output
+#'
+#' @param \dots further arguments that can be passed to control the plot output. Suppored are \code{main},
+#' \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{type}, \code{lty}, \code{lwd}.
+#' For non common plotting parameters see the details section.
+#'
+#' @return Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+#'
+#' \bold{@data}
+#' \tabular{lll}{
+#' \bold{Object} \tab \bold{Type} \bold{Description} \cr
+#'  net_transmission_window \tab \code{matrix} \tab the resulting net transmission window \cr
+#'  filter_matrix \tab \code{matrix} \tab the filter matrix used for plotting
+#'
+#' }
+#'
+#' \bold{@info}
+#' \tabular{lll}{
+#' \bold{Object} \tab \bold{Type} \bold{Description} \cr
+#' call \tab \code{call} \tab the original function call
+#'
+#' }
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
+#'
+#' @seealso \code{\linkS4class{RLum.Results}}, \code{\link{approx}}
+#'
+#' @keywords datagen aplot
+#'
+#' @examples
+#'
+#' ## (For legal reasons no real filter data are provided)
+#'
+#' ## Create filter sets
+#' filter1 <- density(rnorm(100, mean = 450, sd = 20))
+#' filter1 <- matrix(c(filter1$x, filter1$y/max(filter1$y)), ncol = 2)
+#' filter2 <- matrix(c(200:799,rep(c(0,0.8,0),each = 200)), ncol = 2)
+#'
+#' ## Example 1 (standard)
+#' plot_FilterCombinations(filters = list(filter1, filter2))
+#'
+#' ## Example 2 (with d and P value and name for filter 2)
+#' results <- plot_FilterCombinations(
+#' filters = list(filter_1 = filter1, Rectangle = list(filter2, d = 2, P = 0.6)))
+#' results
+#'
+#'
+#' @export
+plot_FilterCombinations <- function(
+  filters,
+  wavelength_range = 200:1000,
+  show_net_transmission = TRUE,
+  plot = TRUE,
+  ...) {
+  # Integrity tests -----------------------------------------------------------------------------
+
+  #check filters
+  if (!is(filters, "list")) {
+    stop("[plot_FilterCombinations()] 'filters' should be of type 'list'")
+
+  }
+
+  #input should either data.frame or matrix
+  lapply(filters, function(x) {
+    if (!is(x, "data.frame") & !is(x, "matrix") & !is(x, "list")) {
+      stop(
+        paste(
+          "[plot_FilterCombinations()] input for filter",
+          x,
+          "is not of type 'matrix', 'data.frame' or 'list'!"
+        )
+      )
+
+    }
+
+
+  })
+
+  #check for named list, if not set names
+  if (is.null(names(filters))) {
+    names(filters) <- paste("Filter ", 1:length(filters))
+
+  }
+
+
+  # Data Preparation ----------------------------------------------------------------------------
+
+  ##check if filters are provided with their tickness, if so correct
+  ##transmission for this ... relevant for glass filters
+  filters <- lapply(filters, function(x) {
+    if (is(x, "list")) {
+
+      ##correction for the transmission accounting for filter tickness, the
+      ##provided thickness is always assumed to be 1
+      if(length(x) > 1){
+        x[[1]][, 2] <- x[[1]][, 2] ^ (x[[2]])
+
+      }else{
+        return(x[[1]])
+
+      }
+
+      ##account for potentially provided transmission relexion factor
+      if(length(x) > 2){
+       x[[1]][,2] <-  x[[1]][,2] * x[[3]]
+       return(x[[1]])
+
+      }else{
+       return(x[[1]])
+
+      }
+
+    } else{
+      return(x)
+
+    }
+
+  })
+
+  #check if there are transmission values greater than one, this is not possible
+  lapply(filters, function(x) {
+    if (max(x[, 2], na.rm = TRUE) > 1.01) {
+      stop("[plot_FilterCombinations()] transmission values > 1 found. Check your data.")
+
+    }
+
+  })
+
+  ##combine everything in a matrix using approx for interpolation
+  filter_matrix <- vapply(filters, function(x) {
+    approx(x = x[, 1], y = x[, 2], xout = wavelength_range)$y
+
+  }, FUN.VALUE = vector(mode = "numeric", length = length(wavelength_range)))
+
+  ##calculate transmission window
+  filter_matrix <- cbind(filter_matrix)
+  net_transmission_window <- matrix(
+    c(wavelength_range, matrixStats::rowMins(filter_matrix)),
+    ncol = 2)
+
+  ##set rownames of filter matrix
+  rownames(filter_matrix) <- wavelength_range
+
+  ##set column names for filter matrix
+  colnames(filter_matrix) <- names(filters)
+
+  # Plotting ------------------------------------------------------------------------------------
+
+  if (plot) {
+    ##set plot settings
+    plot_settings <- list(
+      main = "Filter Combination",
+      xlab = "Wavelength [nm]",
+      ylab = "Transmission [a.u.]",
+      xlim = range(wavelength_range),
+      ylim = c(0, 1),
+      legend.pos = "topleft",
+      lty = 1,
+      lwd = 1,
+      col = 1:length(filters),
+      grid = expression(nx = 10, ny = 10),
+      legend = TRUE,
+      legend.text = colnames(filter_matrix),
+      net_transmission.col = "grey"
+
+    )
+
+    ##modify settings on request
+    plot_settings <- modifyList(plot_settings, list(...))
+
+    ##plot induvidal filters
+    graphics::matplot(
+      x = wavelength_range,
+      y = filter_matrix,
+      type = "l",
+      main = plot_settings$main,
+      xlab = plot_settings$xlab,
+      ylab = plot_settings$ylab,
+      xlim = plot_settings$xlim,
+      ylim = plot_settings$ylim,
+      lty = plot_settings$lty,
+      lwd = plot_settings$lwd,
+      col = plot_settings$col
+
+    )
+
+    if (!is.null(plot_settings$grid)) {
+      graphics::grid(eval(plot_settings$grid))
+
+    }
+
+    ##show effective transmission, which is the minimum for each row
+    if (show_net_transmission) {
+      polygon(
+        x = c(wavelength_range, rev(wavelength_range)),
+        y = c(net_transmission_window[, 2],
+              rep(0, length(wavelength_range))),
+        col = plot_settings$net_transmission.col,
+        border = NA
+      )
+
+    }
+
+    #legend
+    if (plot_settings$legend) {
+      legend(
+        plot_settings$legend.pos,
+        legend = plot_settings$legend.text,
+        col = plot_settings$col,
+        lty = plot_settings$lty,
+        bty = "n"
+      )
+    }
+
+
+  }
+
+
+  # Produce output object -----------------------------------------------------------------------
+  return(set_RLum(
+    class = "RLum.Results",
+    data = list(
+      net_transmission_window = net_transmission_window,
+      filter_matrix = filter_matrix
+
+    ),
+    info = list(call = sys.call())
+  ))
+
+
+
+}
diff --git a/R/plot_GrowthCurve.R b/R/plot_GrowthCurve.R
new file mode 100644
index 0000000..571ae64
--- /dev/null
+++ b/R/plot_GrowthCurve.R
@@ -0,0 +1,1686 @@
+#' Fit and plot a growth curve for luminescence data (Lx/Tx against dose)
+#'
+#' A dose response curve is produced for luminescence measurements using a
+#' regenerative protocol.
+#'
+#' \bold{Fitting methods} \cr\cr For all options (except for the \code{LIN}, \code{QDR} and
+#' the \code{EXP OR LIN}), the \code{\link[minpack.lm]{nlsLM}} function with the
+#' \code{LM} (Levenberg-Marquardt algorithm) algorithm is used. Note: For historical reasons
+#' for the Monte Carlo simulations partly  the function \code{\link{nls}} using the \code{port} algorithm.
+#'
+#' The solution is found by transforming the function or using \code{\link{uniroot}}. \cr
+#'
+#' \code{LIN}: fits a linear function to the data using
+#' \link{lm}: \deqn{y = m*x+n}
+#'
+#' \code{QDR}: fits a linear function to the data using
+#' \link{lm}: \deqn{y = a + b * x + c * x^2}
+#'
+#' \code{EXP}: try to fit a function of the form
+#' \deqn{y = a*(1-exp(-(x+c)/b))} Parameters b and c are approximated by a
+#' linear fit using \link{lm}. Note: b = D0\cr
+#'
+#' \code{EXP OR LIN}: works for some cases where an \code{EXP} fit fails. If
+#' the \code{EXP} fit fails, a \code{LIN} fit is done instead. \cr
+#'
+#' \code{EXP+LIN}: tries to fit an exponential plus linear function of the
+#' form: \deqn{y = a*(1-exp(-(x+c)/b)+(g*x))} The De is calculated by
+#' iteration.\cr \bold{Note:} In the context of luminescence dating, this
+#' function has no physical meaning. Therefore, no D0 value is returned.\cr
+#'
+#' \code{EXP+EXP}: tries to fit a double exponential function of the form
+#' \deqn{y = (a1*(1-exp(-(x)/b1)))+(a2*(1-exp(-(x)/b2)))} This fitting
+#' procedure is not robust against wrong start parameters and should be further
+#' improved.\cr\cr
+#'
+#' \bold{Fit weighting}\cr
+#'
+#' If the option \code{fit.weights =  TRUE} is chosen, weights are calculated using
+#' provided signal errors (Lx/Tx error): \deqn{fit.weights = 1/error/(sum(1/error))}\cr
+#'
+#' \bold{Error estimation using Monte Carlo simulation}\cr
+#'
+#' Error estimation is done using a Monte Carlo (MC) simulation approach. A set of Lx/Tx values is
+#' constructed by randomly drawing curve data from samled from normal
+#' distributions. The normal distribution is defined by the input values (mean
+#' = value, sd = value.error). Then, a growth curve fit is attempted for each
+#' dataset resulting in a new distribution of single De values. The \link{sd}
+#' of this distribution is becomes then the error of the De. With increasing
+#' iterations, the error value becomes more stable. \bold{Note:} It may take
+#' some calculation time with increasing MC runs, especially for the composed
+#' functions (\code{EXP+LIN} and \code{EXP+EXP}).\cr Each error estimation is
+#' done with the function of the chosen fitting method. \cr
+#'
+#' \bold{Subtitle information}\cr
+#'
+#' To avoid plotting the subtitle information, provide an empty user mtext \code{mtext = ""}.
+#' To plot any other subtitle text, use \code{mtext}.
+#'
+#' @param sample \code{\link{data.frame}} (\bold{required}): data frame with
+#' three columns for x=Dose,y=LxTx,z=LxTx.Error, y1=TnTx. The column for the
+#' test dose response is optional, but requires 'TnTx' as column name if used. For exponential
+#' fits at least three dose points (including the natural) should be provided.
+#'
+#' @param na.rm \code{\link{logical}} (with default): excludes \code{NA} values
+#' from the data set prior to any further operations.
+#'
+#' @param fit.method \code{\link{character}} (with default): function used for
+#' fitting. Possible options are: \code{LIN}, \code{QDR}, \code{EXP}, \code{EXP OR LIN},
+#' \code{EXP+LIN} or \code{EXP+EXP}. See details.
+#'
+#' @param fit.force_through_origin \code{\link{logical}} (with default) allow to force
+#' the fitted function through the origin. For \code{method = "EXP+EXP"} the function will
+#' go to the origin in either case, so this option will have no effect.
+#'
+#' @param fit.weights \code{\link{logical}} (with default): option whether the
+#' fitting is done with or without weights. See details.
+#'
+#' @param fit.includingRepeatedRegPoints \code{\link{logical}} (with default):
+#' includes repeated points for fitting (\code{TRUE}/\code{FALSE}).
+#'
+#' @param fit.NumberRegPoints \code{\link{integer}} (optional): set number of
+#' regeneration points manually. By default the number of all (!) regeneration
+#' points is used automatically.
+#'
+#' @param fit.NumberRegPointsReal \code{\link{integer}} (optional): if the
+#' number of regeneration points is provided manually, the value of the real,
+#' regeneration points = all points (repeated points) including reg 0, has to
+#' be inserted.
+#'
+#' @param fit.bounds \code{\link{logical}} (with default): set lower fit bounds
+#' for all fitting parameters to 0. Limited for the use with the fit methods
+#' \code{EXP}, \code{EXP+LIN} and \code{EXP OR LIN}. Argument to be inserted
+#' for experimental application only!
+#'
+#' @param NumberIterations.MC \code{\link{integer}} (with default): number of
+#' Monte Carlo simulations for error estimation. See details.
+#'
+#' @param output.plot \code{\link{logical}} (with default): plot output
+#' (\code{TRUE/FALSE}).
+#'
+#' @param output.plotExtended \code{\link{logical}} (with default): If
+#' \code{TRUE}, 3 plots on one plot area are provided: (1) growth curve, (2)
+#' histogram from Monte Carlo error simulation and (3) a test dose response
+#' plot. If \code{FALSE}, just the growth curve will be plotted.
+#' \bold{Requires:} \code{output.plot = TRUE}.
+#'
+#' @param output.plotExtended.single \code{\link{logical}} (with default):
+#' single plot output (\code{TRUE/FALSE}) to allow for plotting the results in
+#' single plot windows. Requires \code{output.plot = TRUE} and
+#' \code{output.plotExtended = TRUE}.
+#'
+#' @param cex.global \code{\link{numeric}} (with default): global scaling
+#' factor.
+#'
+#' @param txtProgressBar \code{\link{logical}} (with default): enables or disables txtProgressBar.
+#' If \code{verbose = FALSE} also no txtProgressBar is shown.
+#'
+#' @param verbose \code{\link{logical}} (with default): enables or disables terminal feedback.
+#'
+#' @param \dots Further arguments and graphical parameters to be passed. Note:
+#' Standard arguments will only be passed to the growth curve plot. Supported:
+#' \code{xlim}, \code{ylim}, \code{main}, \code{xlab}, \code{ylab}
+#'
+#' @return Along with a plot (so far wanted) an \code{RLum.Results} object is returned containing,
+#' the slot \code{data} contains the following elements:\cr
+#'
+#' \tabular{lll}{
+#' \bold{DATA.OBJECT} \tab \bold{TYPE} \tab \bold{DESCRIPTION} \cr
+#' \code{..$De} : \tab  \code{data.frame} \tab Table with De values \cr
+#' \code{..$De.MC} : \tab \code{numeric} \tab Table with De values from MC runs \cr
+#' \code{..$Fit} : \tab \code{\link{nls}} or \code{\link{lm}} \tab object from the fitting for \code{EXP},
+#' \code{EXP+LIN} and \code{EXP+EXP}. In case of a resulting  linear fit when using \code{LIN}, \code{QDR} or
+#' \code{EXP OR LIN} \cr
+#' \code{..$Formula} : \tab \code{\link{expression}} \tab Fitting formula as R expression \cr
+#' \code{..$call} : \tab \code{call} \tab The original function call\cr
+#' }
+#'
+#' @section Function version: 1.8.16
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France), \cr Michael Dietze, GFZ Potsdam (Germany)
+#'
+#' @seealso \code{\link{nls}}, \code{\linkS4class{RLum.Results}},
+#' \code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}, \code{\link{lm}}, \code{uniroot}
+#'
+#' @examples
+#'
+#' ##(1) plot growth curve for a dummy data.set and show De value
+#' data(ExampleData.LxTxData, envir = environment())
+#' temp <- plot_GrowthCurve(LxTxData)
+#' get_RLum(temp)
+#'
+#' ##(1a) to access the fitting value try
+#' get_RLum(temp, data.object = "Fit")
+#'
+#' ##(2) plot the growth curve only - uncomment to use
+#' ##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special")
+#' plot_GrowthCurve(LxTxData)
+#' ##dev.off()
+#'
+#' ##(3) plot growth curve with pdf output - uncomment to use, single output
+#' ##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special")
+#' plot_GrowthCurve(LxTxData, output.plotExtended.single = TRUE)
+#' ##dev.off()
+#'
+#' ##(4) plot resulting function for given intervall x
+#' x <- seq(1,10000, by = 100)
+#' plot(
+#'  x = x,
+#'  y = eval(temp$Formula),
+#'  type = "l"
+#' )
+#'
+#' @export
+plot_GrowthCurve <- function(
+  sample,
+  na.rm = TRUE,
+  fit.method = "EXP",
+  fit.force_through_origin = FALSE,
+  fit.weights = TRUE,
+  fit.includingRepeatedRegPoints = TRUE,
+  fit.NumberRegPoints = NULL,
+  fit.NumberRegPointsReal = NULL,
+  fit.bounds = TRUE,
+  NumberIterations.MC = 100,
+  output.plot = TRUE,
+  output.plotExtended = TRUE,
+  output.plotExtended.single = FALSE,
+  cex.global = 1,
+  txtProgressBar = TRUE,
+  verbose = TRUE,
+  ...
+) {
+
+  ##1. check if sample is data.frame
+  if(is.data.frame(sample)==FALSE){
+    stop("\n [plot_GrowthCurve()] Sample has to be of type data.fame!")
+  }
+
+  ##2. check if sample contains a least three rows
+  if(length(sample[,1])<3 & fit.method != "LIN"){
+    stop("\n [plot_GrowthCurve()] At least two regeneration points are needed!")
+  }
+
+  ##2.1 check for inf data in the data.frame
+  if(any(is.infinite(unlist(sample)))){
+    warning("[plot_GrowthCurve()] the input data contain at least one Inf value. NULL returned!")
+    return(NULL)
+  }
+
+  ## optionally, count and exclude NA values and print result
+  if(na.rm == TRUE) {
+    n.NA <- sum(!complete.cases(sample))
+
+    if (n.NA == 1) {
+      warning("[plot_GrowthCurve()] 1 NA value excluded.")
+    } else if (n.NA > 1) {
+      warning(paste(" [plot_GrowthCurve()]", n.NA, "NA values excluded."))
+    }
+
+    sample <- na.exclude(sample)
+
+    ##Check if anything is left after removal
+    if(nrow(sample) == 0){
+
+      warning("[plot_GrowthCurve()] Sorry, after NA removal nothing is left from the data set! NULL returned")
+      return(NULL)
+    }
+
+  }
+
+
+  ##3. verbose mode
+  if(!verbose){
+    txtProgressBar <- FALSE
+  }
+
+  ##4. check for Inf values
+
+
+  ##remove rownames from data.frame, as this could causes errors for the reg point calculation
+  rownames(sample) <- NULL
+
+
+  ##NULL values in the data.frame are not allowed for the y-column
+  if(length(sample[sample[,2]==0,2])>0){
+    sample[sample[,2]==0,2]<-0.0001
+    warning(paste("[plot_GrowthCurve()]", length(sample[sample[,2]==0,2]), "values with 0 for Lx/Tx detected; replaced by 0.0001."))
+  }
+
+  ##1. INPUT
+
+  #1.0.1 calculate number of reg points if not set
+  if(is.null(fit.NumberRegPoints)){
+    fit.NumberRegPoints<-length(sample[-1,1])
+  }
+  if(is.null(fit.NumberRegPointsReal)){
+
+    fit.RegPointsReal <- as.integer(
+      rownames(sample[-which(duplicated(sample[,1]) | sample[,1]==0),]))
+
+    fit.NumberRegPointsReal <- length(fit.RegPointsReal)
+
+  }
+
+  #1.1 Produce dataframe from input values
+  xy<-data.frame(x=sample[2:(fit.NumberRegPoints+1),1],y=sample[2:(fit.NumberRegPoints+1),2])
+  y.Error<-sample[2:(fit.NumberRegPoints+1),3]
+
+  ##1.1.1 produce weights for weighted fitting
+  if(fit.weights){
+
+    fit.weights <- 1 / abs(y.Error) / sum(1 / abs(y.Error))
+
+    if(is.na(fit.weights[1])){
+
+      fit.weights <- NULL
+      warning("fit.weights set to NULL as the error column is invalid or 0.")
+
+    }
+
+  }else{
+    fit.weights <- rep(1, length(abs(y.Error)))
+
+  }
+
+
+  # Deal with extra arguments -----------------------------------------------
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {"Growth curve"}
+
+  xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else
+  {"Dose [s]"}
+
+  ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
+  {expression(L[x]/T[x])}
+
+  if("cex" %in% names(extraArgs)) {cex.global <- extraArgs$cex}
+
+  ylim <- if("ylim" %in% names(extraArgs)) {
+    extraArgs$ylim
+  } else {
+    if(fit.force_through_origin){
+      c(0-max(y.Error),(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2}))
+
+    }else{
+      c(min(xy$y)-max(y.Error),(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2}))
+    }
+
+ }
+
+
+  xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else
+  {c(0,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4}))}
+
+  fun   <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE}
+
+  #1.2 Prepare data sets regeneration points for MC Simulation
+  data.MC<-t(vapply(
+    X = seq(2,fit.NumberRegPoints+1,by=1),
+    FUN = function(x){
+      sample(
+        rnorm(n = 10000,
+              mean = sample[x,2], sd = abs(sample[x,3])),
+              size = NumberIterations.MC, replace=TRUE)
+      },
+    FUN.VALUE = vector("numeric", length = NumberIterations.MC)))
+
+  #1.3 Do the same for the natural signal
+  data.MC.De <- numeric(NumberIterations.MC)
+  data.MC.De <- sample(rnorm(10000,mean=sample[1,2], sd=abs(sample[1,3])),
+                       NumberIterations.MC, replace=TRUE)
+
+  #1.3 set x.natural
+  x.natural <- vector("numeric", length = NumberIterations.MC)
+  x.natural <- NA
+
+  ##1.4 set initialise variables
+  De <- NA
+  De.Error <- NA
+
+
+  ##============================================================================##
+  # FITTING ----------------------------------------------------------------------
+  ##============================================================================##
+  ##3. Fitting values with nonlinear least-squares estimation of the parameters
+
+  ##set functions for fitting
+
+  #EXP
+  fit.functionEXP <- function(a,b,c,x) {a*(1-exp(-(x+c)/b))}
+  fit.formulaEXP <- y ~ a * (1 - exp(-(x+c)/b))
+
+  #EXP+LIN
+  fit.functionEXPLIN<-function(a,b,c,g,x) {a*(1-exp(-(x+c)/b)+(g*x))}
+  fit.formulaEXPLIN <- y ~ a*(1-exp(-(x+c)/b)+(g*x))
+
+  #EXP+EXP
+  fit.functionEXPEXP<-function(a1,a2,b1,b2,x){(a1*(1-exp(-(x)/b1)))+(a2*(1-exp(-(x)/b2)))}
+  fit.formulaEXPEXP <- y ~ (a1*(1-exp(-(x)/b1)))+(a2*(1-exp(-(x)/b2)))
+
+  ##input data for fitting; exclude repeated RegPoints
+  if (fit.includingRepeatedRegPoints == FALSE) {
+    data <-
+      data.frame(x = xy[[1]][!duplicated(xy[[1]])], y = xy[[2]][!duplicated(xy[[1]])])
+    fit.weights <- fit.weights[!duplicated(xy[[1]])]
+    data.MC <- data.MC[!duplicated(xy[[1]]),]
+    xy <- xy[!duplicated(xy[[1]]),]
+
+  }else{
+    data <- data.frame(xy)
+  }
+
+
+  ## for unknown reasons with only two points the nls() function is trapped in
+  ## an endless mode, therefore the minimum length for data is 3
+  ## (2016-05-17)
+  if((fit.method == "EXP" | fit.method == "EXP+LIN" | fit.method == "EXP+EXP" | fit.method == "EXP OR LIN")
+     && length(data[,1])<=2){
+
+    ##set to LIN
+    fit.method <- "LIN"
+
+    warning("[plot_GrowthCurve()] fitting using an exponential term requires at least 3 dose points! fit.method set to 'LIN'")
+
+    if(verbose){
+      if(verbose) message("[plot_GrowthCurve()] fit.method set to 'LIN', see warnings()")
+
+    }
+
+
+  }
+
+
+  ##START PARAMETER ESTIMATION
+  ##--------------------------------------------------------------------------##
+  ##general setting of start parameters for fitting
+
+  ##a - estimation for a a the maxium of the y-values (Lx/Tx)
+  a <- max(data[,2])
+
+  ##b - get start parameters from a linear fit of the log(y) data
+  ##    (suppress the warning in case one parameter is negative)
+
+  fit.lm <- lm(suppressWarnings(log(data$y))~data$x)
+  b <- as.numeric(1/fit.lm$coefficients[2])
+
+  ##c - get start parameters from a linear fit - offset on x-axis
+  fit.lm<-lm(data$y~data$x)
+  c <- as.numeric(abs(fit.lm$coefficients[1]/fit.lm$coefficients[2]))
+
+  #take slope from x - y scaling
+  g <- max(data[,2]/max(data[,1]))
+
+  #set D01 and D02 (in case of EXp+EXP)
+  D01 <- NA
+  D01.ERROR <- NA
+  D02 <- NA
+  D02.ERROR <- NA
+
+  ##--------------------------------------------------------------------------##
+  ##to be a little bit more flexible the start parameters varries within a normal distribution
+
+  ##draw 50 start values from a normal distribution a start values
+  if (fit.method != "LIN") {
+    a.MC <- rnorm(50, mean = a, sd = a / 100)
+
+    if (!is.na(b)) {
+      b.MC <- rnorm(50, mean = b, sd = b / 100)
+    } else{
+      b <- NA
+
+    }
+
+    c.MC <- rnorm(50, mean = c, sd = c / 100)
+    g.MC <- rnorm(50, mean = g, sd = g / 1)
+
+    ##set start vector (to avoid errors witin the loop)
+    a.start <- NA
+    b.start <- NA
+    c.start <- NA
+    g.start <- NA
+  }
+
+  ##--------------------------------------------------------------------------##
+  #===========================================================================##
+  #QDR#
+  if (fit.method == "QDR"){
+
+    ##Do fitting with option to force curve through the origin
+    if(fit.force_through_origin){
+
+      ##linear fitting ... polynomial
+      fit  <- lm(data$y ~  0 + I(data$x) + I(data$x^2), weights = fit.weights)
+
+      ##give function for uniroot
+      De.fs <- function(x, y) {
+        0 + coef(fit)[1] * x + coef(fit)[2] * x ^ 2 - y
+
+      }
+
+
+    }else{
+
+
+      ##linear fitting ... polynomial
+      fit  <- lm(data$y ~  I(data$x) + I(data$x^2), weights = fit.weights)
+
+      ##give function for uniroot
+      De.fs <- function(x, y) {
+        coef(fit)[1] + coef(fit)[2] * x + coef(fit)[3] * x ^ 2 - y
+
+      }
+
+    }
+
+    ##solve and get De
+    De.uniroot <- try(
+      uniroot(De.fs, y = sample[1,2], lower = 0, upper = max(sample[,1]) * 1.5), silent = TRUE)
+
+    if(!inherits(De.uniroot, "try-error")){
+      De <- round(De.uniroot$root, digits = 2)
+      if(verbose){
+        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De))
+      }
+
+    }else{
+      if(verbose) writeLines("[plot_GrowthCurve()] no solution found for QDR fit")
+      De <- NA
+
+    }
+
+
+    # +++++++++++++++++++++++++++++++++++++++++
+
+    ##set progressbar
+    if(txtProgressBar){
+      cat("\n\t Run Monte Carlo loops for error estimation of the QDR fit\n")
+      pb<-txtProgressBar(min=0,max=NumberIterations.MC, char="=", style=3)
+    }
+
+
+    #start loop for Monte Carlo Error estimation
+    fit.MC <- sapply(1:NumberIterations.MC, function(i){
+
+      data <- data.frame(x=xy$x, y=data.MC[,i])
+
+      if(fit.force_through_origin){
+
+        ##linear fitting ... polynomial
+        fit.MC  <- lm(data$y ~  0 + I(data$x) + I(data$x^2), weights = fit.weights)
+
+        ##give function for uniroot
+        De.fs.MC <- function(x, y) {
+          0 + coef(fit.MC)[1] * x + coef(fit.MC)[2] * x ^ 2 - y
+          0 + coef(fit.MC)[1] * x + coef(fit.MC)[2] * x ^ 2 - y
+
+        }
+
+
+      }else{
+
+
+        ##linear fitting ... polynomial
+        fit.MC  <- lm(data$y ~  I(data$x) + I(data$x^2), weights = fit.weights)
+
+        ##give function for uniroot
+        De.fs.MC <- function(x, y) {
+          coef(fit.MC)[1] + coef(fit.MC)[2] * x + coef(fit.MC)[3] * x ^ 2 - y
+
+        }
+
+      }
+
+      ##solve and get De
+      De.uniroot.MC <- try(uniroot(
+        De.fs.MC,
+        y = data.MC.De[i],
+        lower = 0,
+        upper = max(sample[, 1]) * 1.5
+      ), silent = TRUE)
+
+      if(!inherits(De.uniroot.MC, "try-error")){
+        De.MC <- round(De.uniroot.MC$root, digits = 2)
+
+      }else{
+        De.MC <- NA
+
+      }
+
+      ##update progress bar
+      if(txtProgressBar) setTxtProgressBar(pb, i)
+
+      return(De.MC)
+
+    })
+
+    if(txtProgressBar) close(pb)
+
+    x.natural<- fit.MC
+  }
+  #===========================================================================##
+  #EXP#
+
+  if (fit.method=="EXP" | fit.method=="EXP OR LIN" | fit.method=="LIN"){
+
+    if((is.na(a) | is.na(b) | is.na(c)) && fit.method != "LIN"){
+
+      warning("[plot_GrowthCurve()] Fit could not applied for this data set. NULL returned!")
+      return(NULL)
+
+    }
+
+    if(fit.method!="LIN"){
+
+      ##FITTING on GIVEN VALUES##
+      #	--use classic R fitting routine to fit the curve
+
+      ##try to create some start parameters from the input values to make
+      ## the fitting more stable
+      for(i in 1:50){
+
+        a<-a.MC[i];b<-b.MC[i];c<-c.MC[i]
+
+        fit.initial <- try(nls(
+          y ~ fit.functionEXP(a, b, c, x),
+          data = data,
+          start = c(a = a, b = b, c = c),
+          trace = FALSE,
+          algorithm = "port",
+          lower = c(a = 0, b > 0, c = 0),
+          nls.control(
+            maxiter = 100,
+            warnOnly = TRUE,
+            minFactor = 1 / 2048
+          )
+        ),
+        silent = TRUE
+        )
+
+        if(class(fit.initial)!="try-error"){
+          #get parameters out of it
+          parameters<-(coef(fit.initial))
+          b.start[i]<-as.vector((parameters["b"]))
+          a.start[i]<-as.vector((parameters["a"]))
+          c.start[i]<-as.vector((parameters["c"]))
+        }
+      }
+
+      ##used median as start parameters for the final fitting
+      a <- median(na.exclude(a.start))
+      b <- median(na.exclude(b.start))
+      c <- median(na.exclude(c.start))
+
+      #FINAL Fit curve on given values
+      fit <- try(minpack.lm::nlsLM(
+        formula = fit.formulaEXP,
+        data = data,
+        start = list(a = a, b = b,c = c),
+        weights = fit.weights,
+        trace = FALSE,
+        algorithm = "LM",
+        lower = if (fit.bounds) {
+          c(0,0,0)
+        }else{
+          c(-Inf,-Inf,-Inf)
+        },
+        upper = if (fit.force_through_origin) {
+          c(Inf, Inf, 0)
+        }else{
+          c(Inf, Inf, Inf)
+        },
+        control = minpack.lm::nls.lm.control(maxiter = 500)
+      ), silent = TRUE
+      )
+
+      if (inherits(fit, "try-error") & inherits(fit.initial, "try-error")){
+
+        if(verbose) writeLines("[plot_GrowthCurve()] try-error for EXP fit")
+
+      }else{
+
+        ##this is to avoid the singular convergence failure due to a perfect fit at the beginning
+        ##this may happen especially for simulated data
+        if(inherits(fit, "try-error") & !inherits(fit.initial, "try-error")){
+          fit <- fit.initial
+          rm(fit.initial)
+
+        }
+
+        #get parameters out of it
+        parameters <- (coef(fit))
+        b <- as.vector((parameters["b"]))
+        a <- as.vector((parameters["a"]))
+        c <- as.vector((parameters["c"]))
+
+
+        #calculate De
+        De<-suppressWarnings(round(-c-b*log(1-sample[1,2]/a), digits=2))
+
+        #print D01 value
+        D01<-round(b,digits=2)
+        if(verbose){
+          writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method," | De = ", De, " | D01 = ",D01))
+        }
+
+
+        ##Monte Carlo Simulation
+        #	--Fit many curves and calculate a new De +/- De_Error
+        #	--take De_Error
+
+        #set variables
+        var.b<-vector(mode="numeric", length=NumberIterations.MC)
+        var.a<-vector(mode="numeric", length=NumberIterations.MC)
+        var.c<-vector(mode="numeric", length=NumberIterations.MC)
+
+        #start loop
+        for (i in 1:NumberIterations.MC) {
+
+          ##set data set
+          data <- data.frame(x = xy$x,y = data.MC[,i])
+
+          fit.MC <- try(minpack.lm::nlsLM(
+            formula = fit.formulaEXP,
+            data = data,
+            start = list(a = a, b = b,c = c),
+            weights = fit.weights,
+            trace = FALSE,
+            algorithm = "LM",
+            lower = if (fit.bounds) {
+              c(0,0,0)
+            }else{
+              c(-Inf,-Inf,-Inf)
+            },
+            upper = if (fit.force_through_origin) {
+              c(Inf, Inf, 0)
+            }else{
+              c(Inf, Inf, Inf)
+            },
+            control = minpack.lm::nls.lm.control(maxiter = 500)
+          ), silent = TRUE
+          )
+
+          #get parameters out of it including error handling
+          if (class(fit.MC)=="try-error") {
+
+            x.natural[i]<-NA
+
+          }else {
+
+            #get parameters out
+            parameters<-coef(fit.MC)
+            var.b[i]<-as.vector((parameters["b"])) #D0
+            var.a[i]<-as.vector((parameters["a"])) #Imax
+            var.c[i]<-as.vector((parameters["c"]))
+
+            #calculate x.natural for error calculation
+            x.natural[i]<-suppressWarnings(
+              round(-var.c[i]-var.b[i]*log(1-data.MC.De[i]/var.a[i]), digits=2))
+
+          }
+
+        }#end for loop
+
+
+        ##write D01.ERROR
+        D01.ERROR <- sd(var.b, na.rm = TRUE)
+
+        ##remove values
+        rm(var.b, var.a, var.c)
+
+      }#endif::try-error fit
+
+    }#endif:fit.method!="LIN"
+    #========================================================================
+    #LIN#
+    ##two options: just linear fit or LIN fit after the EXP fit failed
+
+    #set fit object, if fit objekt was not set before
+    if(exists("fit")==FALSE){fit<-NA}
+
+    if ((fit.method=="EXP OR LIN" & class(fit)=="try-error") |
+        fit.method=="LIN" | length(data[,1])<2) {
+
+      ##Do fitting again as just allows fitting through the origin
+      if(fit.force_through_origin){
+
+        fit.lm<-lm(data$y ~ 0 + data$x, weights = fit.weights)
+
+        #calculate De
+        De <- round((sample[1,2]/fit.lm$coefficients[1]), digits=2)
+
+
+      }else{
+
+        fit.lm<-lm(data$y ~ data$x, weights = fit.weights)
+
+        #calculate De
+        De <- round((sample[1,2]-fit.lm$coefficients[1])/fit.lm$coefficients[2], digits=2)
+
+      }
+
+
+      ##remove vector labels
+      De <- as.numeric(as.character(De))
+
+      if(verbose){
+        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De))
+      }
+
+
+      #start loop for Monte Carlo Error estimation
+      for (i in 1:NumberIterations.MC) {
+
+        data <- data.frame(x=xy$x, y=data.MC[,i])
+
+        if(fit.force_through_origin){
+
+          ##do fitting
+          fit.lmMC <- lm(data$y ~ 0 + data$x, weights=fit.weights)
+
+          #calculate x.natural
+          x.natural[i]<-round((data.MC.De[i]/fit.lmMC$coefficients[1]), digits=2)
+
+
+        }else{
+
+          ##do fitting
+          fit.lmMC <- lm(data$y~ data$x, weights=fit.weights)
+
+
+          #calculate x.natural
+          x.natural[i]<-round((data.MC.De[i]-fit.lmMC$coefficients[1])/
+                                fit.lmMC$coefficients[2], digits=2)
+
+        }
+
+
+
+
+      }#endfor::loop for MC
+
+      #correct for fit.method
+      fit.method<-"LIN"
+
+      ##set fit object
+      if(fit.method=="LIN"){fit<-fit.lm}
+
+    }else{fit.method<-"EXP"}#endif::LIN
+  }#end if EXP (this includes the LIN fit option)
+  #===========================================================================
+  #===========================================================================
+  #EXP+LIN#
+  else if (fit.method=="EXP+LIN") {
+
+
+    ##try some start parameters from the input values to makes the fitting more stable
+    for(i in 1:length(a.MC)){
+
+      a<-a.MC[i];b<-b.MC[i];c<-c.MC[i];g<-g.MC[i]
+
+      ##---------------------------------------------------------##
+      ##start: with EXP function
+      fit.EXP<-try(nls(y~fit.functionEXP(a,b,c,x),
+                       data=data,
+                       start=c(a=a,b=b,c=c),
+                       trace=FALSE,
+                       algorithm="port",
+                       lower=c(a=0,b>10,c=0),
+                       nls.control(maxiter=100,warnOnly=FALSE,minFactor=1/1048)
+      ),silent=TRUE)
+
+
+      if(class(fit.EXP)!="try-error"){
+        #get parameters out of it
+        parameters<-(coef(fit.EXP))
+        b<-as.vector((parameters["b"]))
+        a<-as.vector((parameters["a"]))
+        c<-as.vector((parameters["c"]))
+
+        ##end: with EXP function
+        ##---------------------------------------------------------##
+      }
+
+
+      fit<-try(nls(y~fit.functionEXPLIN(a,b,c,g,x),
+                   data=data,
+                   start=c(a=a,b=b,c=c,g=g),
+                   trace=FALSE,
+                   algorithm="port",
+                   lower = if(fit.bounds==TRUE){lower=c(a=0,b>10,c=0,g=0)}else{c()},
+                   nls.control(maxiter=500,warnOnly=FALSE,minFactor=1/2048) #increase max. iterations
+      ),silent=TRUE)
+
+      if(class(fit)!="try-error"){
+        #get parameters out of it
+        parameters<-(coef(fit))
+        b.start[i]<-as.vector((parameters["b"]))
+        a.start[i]<-as.vector((parameters["a"]))
+        c.start[i]<-as.vector((parameters["c"]))
+        g.start[i]<-as.vector((parameters["g"]))
+      }
+
+
+
+    }##end for loop
+
+
+    ##used mean as start parameters for the final fitting
+    a<-median(na.exclude(a.start))
+    b<-median(na.exclude(b.start))
+    c<-median(na.exclude(c.start))
+    g<-median(na.exclude(g.start))
+
+    ##perform final fitting
+    fit <- try(minpack.lm::nlsLM(
+      formula = fit.formulaEXPLIN,
+      data = data,
+      start = list(a = a, b = b,c = c, g = g),
+      weights = fit.weights,
+      trace = FALSE,
+      algorithm = "LM",
+      lower = if (fit.bounds) {
+        c(0,10,0,0)
+      }else{
+        c(-Inf,-Inf,-Inf,-Inf)
+      },
+      upper = if (fit.force_through_origin) {
+        c(Inf, Inf, 0, Inf)
+      }else{
+        c(Inf, Inf, Inf, Inf)
+      },
+      control = minpack.lm::nls.lm.control(maxiter = 500)
+    ), silent = TRUE
+    )
+
+
+    #if try error stop calculation
+    if(class(fit)!="try-error"){
+
+      #get parameters out of it
+      parameters<-(coef(fit))
+      b<-as.vector((parameters["b"]))
+      a<-as.vector((parameters["a"]))
+      c<-as.vector((parameters["c"]))
+      g<-as.vector((parameters["g"]))
+
+      #problem: analytically it is not easy to calculate x,
+      #use uniroot to solve that problem ... readjust function first
+      f.unirootEXPLIN <- function(a,b,c,g,x,LnTn){fit.functionEXPLIN(a,b,c,g,x)-LnTn}
+
+      temp.De <-  try(uniroot(f = f.unirootEXPLIN,
+                              interval = c(0,max(xy$x)*1.5),
+                              tol = 0.001,
+                              a = a,
+                              b = b,
+                              c = c,
+                              g = g,
+                              LnTn = sample[1,2],
+                              extendInt = "yes",
+                              maxiter = 3000), silent = TRUE)
+
+
+
+      if (class(temp.De) != "try-error") {
+        De <- round(temp.De$root, digits = 2)
+      }else{
+        De <- NA
+      }
+
+
+      if(verbose){
+        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De))
+      }
+
+
+      ##Monte Carlo Simulation for error estimation
+      #	--Fit many curves and calculate a new De +/- De_Error
+      #	--take De_Error
+
+      #set variables
+      var.b <- vector(mode="numeric", length=NumberIterations.MC)
+      var.a <- vector(mode="numeric", length=NumberIterations.MC)
+      var.c <- vector(mode="numeric", length=NumberIterations.MC)
+      var.g <- vector(mode="numeric", length=NumberIterations.MC)
+
+      ##set progressbar
+      if(txtProgressBar){
+        cat("\n\t Run Monte Carlo loops for error estimation of the EXP+LIN fit\n")
+        pb<-txtProgressBar(min=0,max=NumberIterations.MC, char="=", style=3)
+      }
+
+
+      #start Monto Carlo loops
+      for(i in  1:NumberIterations.MC){
+
+        data <- data.frame(x=xy$x,y=data.MC[,i])
+
+        ##perform MC fitting
+        fit.MC <- try(minpack.lm::nlsLM(
+          formula = fit.formulaEXPLIN,
+          data = data,
+          start = list(a = a, b = b,c = c, g = g),
+          weights = fit.weights,
+          trace = FALSE,
+          algorithm = "LM",
+          lower = if (fit.bounds) {
+            c(0,10,0,0)
+          }else{
+            c(-Inf,-Inf,-Inf, -Inf)
+          },
+          control = minpack.lm::nls.lm.control(maxiter = 500)
+        ), silent = TRUE
+        )
+
+        #get parameters out of it including error handling
+        if (class(fit.MC)=="try-error") {
+
+          x.natural[i]<-NA
+
+        }else {
+          parameters <- coef(fit.MC)
+          var.b[i] <- as.vector((parameters["b"]))
+          var.a[i] <- as.vector((parameters["a"]))
+          var.c[i] <- as.vector((parameters["c"]))
+          var.g[i] <- as.vector((parameters["g"]))
+
+          #problem: analytical it is not easy to calculate x,
+          #use uniroot to solve this problem
+
+          temp.De.MC <-  try(uniroot(
+            f = f.unirootEXPLIN,
+            interval = c(0,max(xy$x) * 1.5),
+            tol = 0.001,
+            a = var.a[i],
+            b = var.b[i],
+            c = var.c[i],
+            g = var.g[i],
+            LnTn = data.MC.De[i]
+          ), silent = TRUE)
+
+          if (class(temp.De.MC) != "try-error") {
+            x.natural[i] <- temp.De.MC$root
+          }else{
+            x.natural[i] <- NA
+          }
+
+
+        }
+        ##update progress bar
+        if(txtProgressBar) setTxtProgressBar(pb, i)
+
+      }#end for loop
+
+      ##close
+      if(txtProgressBar) close(pb)
+
+      ##remove objects
+      rm(var.b, var.a, var.c, var.g)
+
+    }else{
+
+      #print message
+      if(verbose){
+        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = NA (fitting FAILED)"))
+
+      }
+
+
+    } #end if "try-error" Fit Method
+
+  } #End if EXP+LIN
+  #==========================================================================
+  #===========================================================================
+  #EXP+EXP#
+  else if (fit.method=="EXP+EXP") {
+
+    a1.start <- NA
+    a2.start <- NA
+    b1.start <- NA
+    b2.start <- NA
+
+    ## try to create some start parameters from the input values to make the fitting more stable
+    for(i in 1:50) {
+      a1 <- a.MC[i];b1 <- b.MC[i];
+      a2 <- a.MC[i] / 2; b2 <- b.MC[i] / 2
+
+      fit.start <- try(nls(
+        y ~ fit.functionEXPEXP(a1,a2,b1,b2,x),
+        data = data,
+        start = c(
+          a1 = a1,a2 = a2,b1 = b1,b2 = b2
+        ),
+        trace = FALSE,
+        algorithm = "port",
+        lower = c(a1 > 0,a2 > 0,b1 > 0,b2 > 0),
+        nls.control(
+          maxiter = 500,warnOnly = FALSE,minFactor = 1 / 2048
+        ) #increase max. iterations
+      ),silent = TRUE)
+
+
+      if (class(fit.start) != "try-error") {
+        #get parameters out of it
+        parameters <- coef(fit.start)
+        a1.start[i] <- as.vector((parameters["a1"]))
+        b1.start[i] <- as.vector((parameters["b1"]))
+        a2.start[i] <- as.vector((parameters["a2"]))
+        b2.start[i] <- as.vector((parameters["b2"]))
+      }
+    }
+
+    ##use obtained parameters for fit input
+    a1.start <- median(a1.start, na.rm = TRUE)
+    b1.start <- median(b1.start, na.rm = TRUE)
+    a2.start <- median(a2.start, na.rm = TRUE)
+    b2.start <- median(b2.start, na.rm = TRUE)
+
+    ##perform final fitting
+    fit <- try(minpack.lm::nlsLM(
+      formula = fit.formulaEXPEXP,
+      data = data,
+      start = list(a1 = a1, b1 = b1, a2 = a2, b2 = b2),
+      weights = fit.weights,
+      trace = FALSE,
+      algorithm = "LM",
+      lower = if (fit.bounds) {
+        c(0,0,0,0)
+      }else{
+        c(-Inf,-Inf,-Inf, -Inf)
+      },
+      control = minpack.lm::nls.lm.control(maxiter = 500)
+    ), silent = TRUE
+    )
+
+
+    ##insert if for try-error
+    if (class(fit)!="try-error") {
+
+      #get parameters out of it
+      parameters <- (coef(fit))
+      b1 <- as.vector((parameters["b1"]))
+      b2 <- as.vector((parameters["b2"]))
+      a1 <- as.vector((parameters["a1"]))
+      a2 <- as.vector((parameters["a2"]))
+
+      ##set D0 values
+      D01 <- round(b1,digits = 2)
+      D02 <- round(b2,digits = 2)
+
+
+      #problem: analytically it is not easy to calculate x, use uniroot
+      f.unirootEXPEXP <- function(a1,a2,b1,b2,x,LnTn){fit.functionEXPEXP(a1,a2,b1,b2,x)-LnTn}
+
+      temp.De <-  try(uniroot(f = f.unirootEXPEXP,
+                              interval = c(0,max(xy$x)*1.5),
+                              tol = 0.001,
+                              a1 = a1,
+                              a2 = a2,
+                              b1 = b1,
+                              b2 = b2,
+                              LnTn = sample[1,2],
+                              extendInt = "yes",
+                              maxiter = 3000), silent = TRUE)
+
+
+      if (class(temp.De) != "try-error") {
+        De <- round(temp.De$root, digits = 2)
+      }else{
+        De <- NA
+      }
+
+      ##remove object
+      rm(temp.De)
+
+      #print D0 and De value values
+      if(verbose){
+        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De, "| D01 = ",D01, " | D02 = ",D02))
+      }
+
+
+      ##Monte Carlo Simulation for error estimation
+      #	--Fit many curves and calculate a new De +/- De_Error
+      #	--take De_Error from the simulation
+      # --comparison of De from the MC and original fitted De gives a value for quality
+
+      #set variables
+      var.b1<-vector(mode="numeric", length=NumberIterations.MC)
+      var.b2<-vector(mode="numeric", length=NumberIterations.MC)
+      var.a1<-vector(mode="numeric", length=NumberIterations.MC)
+      var.a2<-vector(mode="numeric", length=NumberIterations.MC)
+
+      ##progress bar
+      if(txtProgressBar){
+        cat("\n\t Run Monte Carlo loops for error estimation of the EXP+EXP fit\n")
+        pb<-txtProgressBar(min=0,max=NumberIterations.MC, initial=0, char="=", style=3)
+      }
+
+      #start Monto Carlo loops
+      for (i in 1:NumberIterations.MC) {
+
+        #update progress bar
+        if(txtProgressBar) setTxtProgressBar(pb,i)
+
+        data<-data.frame(x=xy$x,y=data.MC[,i])
+
+        ##perform final fitting
+        fit.MC <- try(minpack.lm::nlsLM(
+          formula = fit.formulaEXPEXP,
+          data = data,
+          start = list(a1 = a1, b1 = b1, a2 = a2, b2 = b2),
+          weights = fit.weights,
+          trace = FALSE,
+          algorithm = "LM",
+          lower = if (fit.bounds) {
+            c(0,0,0,0)
+          }else{
+            c(-Inf,-Inf,-Inf, -Inf)
+          },
+          control = minpack.lm::nls.lm.control(maxiter = 500)
+        ), silent = TRUE
+        )
+
+        #get parameters out of it including error handling
+        if (class(fit.MC)=="try-error") {
+
+          x.natural[i]<-NA
+
+        }else {
+          parameters <- (coef(fit.MC))
+          var.b1[i] <- as.vector((parameters["b1"]))
+          var.b2[i] <- as.vector((parameters["b2"]))
+          var.a1[i] <- as.vector((parameters["a1"]))
+          var.a2[i] <- as.vector((parameters["a2"]))
+
+          #problem: analytically it is not easy to calculate x, here an simple approximation is made
+
+          temp.De.MC <-  try(uniroot(
+            f = f.unirootEXPEXP,
+            interval = c(0,max(xy$x) * 1.5),
+            tol = 0.001,
+            a1 = var.a1[i],
+            a2 = var.a2[i],
+            b1 = var.b1[i],
+            b2 = var.b2[i],
+            LnTn = data.MC.De[i]
+          ), silent = TRUE)
+
+          if (class(temp.De.MC) != "try-error") {
+            x.natural[i] <- temp.De.MC$root
+          }else{
+            x.natural[i] <- NA
+          }
+
+        } #end if "try-error" MC simulation
+
+      } #end for loop
+
+      ##write D01.ERROR
+      D01.ERROR <- sd(var.b1, na.rm = TRUE)
+      D02.ERROR <- sd(var.b2, na.rm = TRUE)
+
+      ##remove values
+      rm(var.b1, var.b2, var.a1, var.a2)
+
+    }else{
+
+      #print message
+      if(verbose){
+        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = NA (fitting FAILED)"))
+
+      }
+
+    } #end if "try-error" Fit Method
+
+
+    ##close
+    if(txtProgressBar) if(exists("pb")){close(pb)}
+
+
+    #===========================================================================
+  } #End if Fit Method
+
+
+  #Get De values from Monto Carlo simulation
+
+  #calculate mean and sd (ignore NaN values)
+  De.MonteCarlo<-round(mean(na.exclude(x.natural)),digits=2)
+
+  #De.Error is Error of the whole De (ignore NaN values)
+  De.Error <- sd(na.exclude(x.natural))
+
+  ##choose format in dependency of the size of the error
+  De.Error <- ifelse(De.Error <= 0.01,
+                     format(De.Error, scientific = TRUE, digits = 2),
+                     round(De.Error, digits = 2))
+
+
+
+
+  # Formula creation --------------------------------------------------------
+  if(!is(fit,"try-error") & !is.na(fit[1])){
+
+    if(fit.method == "EXP") {
+      f <- parse(text = paste0(format(coef(fit)[1], scientific = TRUE), " * (1 - exp( - ( x + ",
+                               format(coef(fit)[3], scientific = TRUE), ") / ",
+                               format(coef(fit)[2], scientific = TRUE), "))"))
+
+    }
+
+    if(fit.method == "EXP+LIN") {
+      f <- parse(text = paste0(format(coef(fit)[1], scientific = TRUE), " * (1-exp(-(x+",
+                               format(coef(fit)[3], scientific = TRUE), ") / ",
+                               format(coef(fit)[2], scientific = TRUE), ")+(",
+                               format(coef(fit)[4], scientific = TRUE), " * x))"))
+    }
+
+    if(fit.method == "EXP+EXP") {
+      f <- parse(text = paste0(format(coef(fit)[1], scientific = TRUE), " * (1 - exp( -x / ",
+                               format(coef(fit)[3], scientific = TRUE), ")) + ",
+                               format(coef(fit)[2], scientific = TRUE), " * (1 - exp( -x / ",
+                               format(coef(fit)[4], scientific = TRUE), "))"))
+    }
+
+    if(fit.method == "LIN" &  fit.force_through_origin) {
+      f <- parse(text = paste0(format(fit.lm$coefficients[1], scientific = TRUE), " * x"))
+
+    }
+
+    if(fit.method == "LIN" &  !fit.force_through_origin) {
+      f <- parse(text = paste0(format(fit.lm$coefficients[2], scientific = TRUE),
+                               "* x + ", format(fit.lm$coefficients[1], scientific = TRUE)))
+
+    }
+
+    if(fit.method == "QDR"  &  fit.force_through_origin) {
+      f <- parse(text = paste0(format(coef(fit)[1], scientific = TRUE), " * x ",
+                               " + ", format(coef(fit)[2], scientific = TRUE), " * x^2"
+      ))
+
+    }
+
+    if(fit.method == "QDR" & !fit.force_through_origin) {
+      f <- parse(text = paste0(format(coef(fit)[1], scientific = TRUE),
+                               " + ", format(coef(fit)[2], scientific = TRUE), " * x ",
+                               " + ", format(coef(fit)[3], scientific = TRUE), " * x^2"
+      ))
+
+    }
+
+  }else{
+
+    f <- NA
+
+  }
+
+  ##============================================================================##
+  # PLOTTING ---------------------------------------------------------------------
+  ##============================================================================##
+
+  ##5. Plotting if plotOutput==TRUE
+  if(output.plot) {
+
+    ##set plot check
+    plot_check <- NULL
+
+    ##cheat the R check
+    x<-NULL; rm(x)
+
+    #PAR	#open plot area
+    if(output.plot== TRUE &
+       output.plotExtended== TRUE &
+       output.plotExtended.single == FALSE ){
+
+      ####grep recent plot parameter for later reset
+      par.default.complex <- par(no.readonly = TRUE)
+      on.exit(par(par.default.complex))
+
+      ##set new parameter
+      layout(matrix(c(1,1,1,1,2,3), 3, 2, byrow=TRUE), respect=TRUE)
+      par(cex=0.8*cex.global)
+
+    }else{
+
+      par.default.single <- par(no.readonly = TRUE)$cex
+      on.exit(par(cex = par.default.single))
+      par(cex=cex.global)
+
+    }
+
+    #PLOT		#Plot input values
+
+    ##Make selection to support manual number of reg points input
+    if(exists("fit.RegPointsReal")==TRUE){
+
+      ##here the object sample has to be used otherwise the first regeneration point is not plotted.
+      temp.xy.plot  <- sample[fit.RegPointsReal,]
+
+    }else{
+
+      temp.xy.plot  <- xy[1:fit.NumberRegPointsReal,]
+
+    }
+
+    plot_check <- try(plot(
+      temp.xy.plot[, 1:2],
+      ylim = ylim,
+      xlim = xlim,
+      pch = 19,
+      xlab = xlab,
+      ylab = ylab
+    ),
+    silent = TRUE)
+
+    if (!is(plot_check, "try-error")) {
+      #ADD HEADER
+      title(main = main, line = 3)
+
+      #CURVE	#plot fitted curve
+      if (fit.method == "EXP+LIN") {
+        try(curve(a * (1 - exp(-(x + c) / b) + (g * x)), lwd = 1.5, add = TRUE))
+      }
+      else
+        if (fit.method  ==  "LIN" &
+            fit.force_through_origin)
+        {
+          curve(fit.lm$coefficients[1]  *  x, lwd  =  1.5,
+                add  =  TRUE)
+        }
+      else if (fit.method == "LIN") {
+        curve(fit.lm$coefficients[2] * x + fit.lm$coefficients[1],
+              lwd = 1.5,
+              add = TRUE)
+      }
+      else if (fit.method == "QDR" & fit.force_through_origin) {
+        curve(coef(fit)[1] * x + coef(fit)[2] * x ^ 2,
+              lwd = 1.5,
+              add = TRUE)
+      }
+      else if (fit.method == "QDR") {
+        curve(coef(fit)[1] + coef(fit)[2] * x + coef(fit)[3] * x ^ 2,
+              lwd = 1.5,
+              add = TRUE)
+      }
+      else if (fit.method == "EXP") {
+        try(curve(fit.functionEXP(a, b, c, x), lwd = 1.5, add = TRUE))
+      }
+      else
+        if (fit.method  ==  "EXP+EXP")
+        {
+          try(curve(fit.functionEXPEXP(a1, a2, b1, b2, x),
+                    lwd  =  1.5,
+                    add  =  TRUE))
+        }
+
+      ##POINTS	#Plot Reg0 and Repeated Points
+
+      #Natural value
+      points(sample[1, 1:2], col = "red")
+      segments(sample[1, 1], sample[1, 2] - sample[1, 3],
+               sample[1, 1], sample[1, 2] + sample[1, 3], col = "red")
+
+      #Repeated Point
+      points(xy[which(duplicated(xy[, 1])), 1], xy[which(duplicated(xy[, 1])), 2],
+             pch = 2)
+
+      #Reg Point 0
+      points(xy[which(xy == 0), 1], xy[which(xy == 0), 2], pch = 1, cex = 1.5 *
+               cex.global)
+
+      ##ARROWS	#y-error Bars
+
+      segments(xy$x, xy$y - y.Error, xy$x, xy$y + y.Error)
+
+      ##LINES	#Insert Ln/Tn
+      if (is.na(De)) {
+        lines(
+          c(0, max(sample[, 1]) * 2),
+          c(sample[1, 2], sample[1, 2]),
+          col = "red",
+          lty = 2,
+          lwd = 1.25
+        )
+
+      } else{
+        try(lines(
+          c(0, De),
+          c(sample[1, 2], sample[1, 2]),
+          col = "red",
+          lty = 2,
+          lwd = 1.25
+        ), silent = TRUE)
+
+      }
+
+      try(lines(c(De, De),
+                c(0, sample[1, 2]),
+                col = "red",
+                lty = 2,
+                lwd = 1.25), silent = TRUE)
+      try(points(De, sample[1, 2], col = "red", pch = 19), silent = TRUE)
+
+      ## check/set mtext
+      mtext <- if ("mtext" %in% names(list(...))) {
+        list(...)$mtext
+      } else {
+        substitute(D[e] == De,
+                   list(De = paste(
+                     De, "\u00B1", De.Error, " | fit: ", fit.method
+                   )))
+      }
+
+
+
+      ##TEXT		#Insert fit and result
+      try(mtext(side = 3,
+                mtext,
+                line = 0.5,
+                cex = 0.8 * cex.global), silent = TRUE)
+
+      #write error message in plot if De is NaN
+      try(if (De == "NaN") {
+        text(
+          sample[2, 1],
+          0,
+          "Error: De could not be calculated!",
+          adj = c(0, 0),
+          cex = 0.8,
+          col = "red"
+        )
+      }, silent = TRUE)
+
+      ##LEGEND	#plot legend
+
+      legend(
+        "topleft",
+        c("REG points", "REG point repeated", "REG point 0"),
+        pch = c(19, 2, 1),
+        cex = 0.8 * cex.global,
+        bty = "n"
+      )
+
+      ##plot only if wanted
+      if (output.plot == TRUE & output.plotExtended == TRUE) {
+        ##HIST		#try to plot histogramm of De values from the Monte Carlo simulation
+
+        if (output.plotExtended.single != TRUE) {
+          par(cex = 0.7 * cex.global)
+
+        }
+
+        ##(A) Calculate histogram data
+        try(histogram <- hist(x.natural, plot = FALSE), silent = TRUE)
+
+        #to avoid errors plot only if histogram exists
+        if (exists("histogram") && length(histogram$counts) > 2) {
+          ##calculate normal distribution curves for overlay
+          norm.curve.x <- seq(min(x.natural, na.rm = TRUE),
+                              max(x.natural, na.rm = TRUE),
+                              length = 101)
+
+          norm.curve.y <- dnorm(
+            norm.curve.x,
+            mean = mean(x.natural, na.rm = TRUE),
+            sd = sd(x.natural, na.rm = TRUE)
+          )
+
+          ##plot histogram
+          histogram <- try(hist(
+            x.natural,
+            xlab = xlab,
+            ylab = "Frequency",
+            main = expression(paste(D[e], " from MC simulation")),
+            freq = FALSE,
+            border = "white",
+            axes = FALSE,
+            ylim = c(0, max(norm.curve.y)),
+            sub =
+              paste(
+                "n = ",
+                NumberIterations.MC,
+                ", valid fits =",
+                length(na.exclude(x.natural))
+              ),
+            col = "grey"
+          ), silent = TRUE)
+
+          if (!is(histogram, "try-error")) {
+            ##add axes
+            axis(side = 1)
+            axis(
+              side = 2,
+              at = seq(min(histogram$density),
+                       max(histogram$density),
+                       length = 5),
+              labels = round(seq(
+                min(histogram$counts), max(histogram$counts), length = 5
+              ),
+              digits = 0)
+            )
+
+            ##add norm curve
+            lines(norm.curve.x, norm.curve.y, col = "red")
+
+            ##add rug
+            rug(x.natural)
+
+            ##write De + Error from Monte Carlo simulation + write quality of error estimation
+            try(mtext(side = 3,
+                      substitute(D[e[MC]] == De,
+                                 list(
+                                   De = paste(
+                                     De.MonteCarlo,
+                                     "\u00B1",
+                                     De.Error,
+                                     " | quality = ",
+                                     round((1 - abs(De - De.MonteCarlo) / De) * 100,
+                                           digits =
+                                             1),
+                                     "%"
+                                   )
+                                 )),
+                      cex = 0.6 * cex.global), silent = TRUE)
+
+          }else{
+            plot_check <- histogram
+          }
+
+        } else {
+          plot_check <- try(plot(
+            NA,
+            NA,
+            xlim = c(0, 10),
+            ylim = c(0, 10),
+            main = expression(paste(D[e], " from Monte Carlo simulation"))),
+            silent = TRUE
+          )
+
+          if(!is(plot_check,"try-error")){
+            text(5, 5, "not available")
+
+          }
+
+        }#end ifelse
+
+
+        ##PLOT		#PLOT test dose response curve if available if not plot not available
+        #plot Tx/Tn value for sensitiviy change
+        if (!is(plot_check, "try-error")) {
+          if ("TnTx" %in% colnames(sample) == TRUE) {
+            plot(
+              1:length(sample[, "TnTx"]),
+              sample[1:(length(sample[, "TnTx"])), "TnTx"] / sample[1, "TnTx"],
+              xlab = "SAR cycle",
+              ylab = expression(paste(T[n] / T[x])),
+              main = "Test dose response",
+              type = "o",
+              pch = 20,
+            )
+
+            ##LINES		#plot 1 line
+            lines(c(1, length(sample[, "TnTx"])), c(1, 1), lty = 2, col = "gray")
+          } else {
+            plot(
+              NA,
+              NA,
+              xlim = c(0, 10),
+              ylim = c(0, 10),
+              main = "Test dose response"
+            )
+            text(5, 5, "not available\n no TnTx column")
+          }#end if else
+        }
+
+
+        ## FUN by R Luminescence Team
+        if (fun == TRUE) {
+          sTeve()
+        }
+
+      }#endif::output.plotExtended
+
+    }#end if plotOutput
+
+    ##reset graphic device if the plotting failed!
+    if(is(plot_check, "try-error")){
+      try(stop("[plot_GrowthCurve()] Figure margins too large, nothing plotted, but results returned!", call. = FALSE),)
+      dev.off()
+    }
+
+  }
+
+  ##RETURN - return De values and parameter
+  output <- try(data.frame(
+    De = De,
+    De.Error = De.Error,
+    D01 = D01,
+    D01.ERROR = D01.ERROR,
+    D02 = D02,
+    D02.ERROR = D02.ERROR,
+    De.MC = De.MonteCarlo,
+    Fit = fit.method
+  ),
+  silent = TRUE
+  )
+
+  ##make RLum.Results object
+  output.final <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      De = output,
+      De.MC = x.natural,
+      Fit = fit,
+      Formula = f
+    ),
+    info = list(
+      call = sys.call()
+    )
+  )
+  invisible(output.final)
+
+}
+
diff --git a/R/plot_Histogram.R b/R/plot_Histogram.R
new file mode 100644
index 0000000..0e349c6
--- /dev/null
+++ b/R/plot_Histogram.R
@@ -0,0 +1,773 @@
+#' Plot a histogram with separate error plot
+#'
+#' Function plots a predefined histogram with an accompanying error plot as
+#' suggested by Rex Galbraith at the UK LED in Oxford 2010.
+#'
+#' If the normal curve is added, the y-axis in the histogram will show the
+#' probability density.\cr\cr
+#' A statistic summary, i.e. a collection of statistic measures of
+#' centrality and dispersion (and further measures) can be added by specifying
+#' one or more of the following keywords: \code{"n"} (number of samples),
+#' \code{"mean"} (mean De value), \code{"mean.weighted"} (error-weighted mean),
+#' \code{"median"} (median of the De values), \code{"sdrel"} (relative standard
+#' deviation in percent), \code{"sdrel.weighted"} (error-weighted relative
+#' standard deviation in percent), \code{"sdabs"} (absolute standard deviation),
+#' \code{"sdabs.weighted"} (error-weighted absolute standard deviation),
+#' \code{"serel"} (relative standard error), \code{"serel.weighted"} (
+#' error-weighted relative standard error), \code{"seabs"} (absolute standard
+#' error), \code{"seabs.weighted"} (error-weighted absolute standard error),
+#' \code{"kurtosis"} (kurtosis) and \code{"skewness"} (skewness).
+#'
+#' @param data \code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+#' object (required): for \code{data.frame}: two columns: De (\code{data[,1]})
+#' and De error (\code{data[,2]})
+#'
+#' @param na.rm \code{\link{logical}} (with default): excludes \code{NA}
+#' values from the data set prior to any further operations.
+#'
+#' @param mtext \code{\link{character}} (optional): further sample information
+#' (\link{mtext}).
+#'
+#' @param cex.global \code{\link{numeric}} (with default): global scaling
+#' factor.
+#'
+#' @param se \code{\link{logical}} (optional): plots standard error points over
+#' the histogram, default is \code{FALSE}.
+#'
+#' @param rug \code{\link{logical}} (optional): adds rugs to the histogram,
+#' default is \code{TRUE}.
+#'
+#' @param normal_curve \code{\link{logical}} (with default): adds a normal
+#' curve to the histogram. Mean and sd are calculated from the input data. More
+#' see details section.
+#'
+#' @param summary \code{\link{character}} (optional): add statistic measures of
+#' centrality and dispersion to the plot. Can be one or more of several
+#' keywords. See details for available keywords.
+#'
+#' @param summary.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+#' specified to place the summary below the plot header. However, this latter
+#' option in only possible if \code{mtext} is not used. In case of coordinate
+#' specification, y-coordinate refers to the right y-axis.
+#'
+#' @param colour \code{\link{numeric}} or \link{character} (with default):
+#' optional vector of length 4 which specifies the colours of the following
+#' plot items in exactly this order: histogram bars, rug lines, normal
+#' distribution curve and standard error points\cr (e.g., \code{c("grey",
+#' "black", "red", "grey")}).
+#'
+#' @param interactive \code{\link{logical}} (with default): create an interactive
+#' histogram plot (requires the 'plotly' package)
+#'
+#' @param \dots further arguments and graphical parameters passed to
+#' \code{\link{plot}} or \code{\link{hist}}. If y-axis labels are provided,
+#' these must be specified as a vector of length 2 since the plot features two
+#' axes (e.g. \code{ylab = c("axis label 1", "axis label 2")}). Y-axes limits
+#' (\code{ylim}) must be provided as vector of length four, with the first two
+#' elements specifying the left axes limits and the latter two elements giving
+#' the right axis limits.
+#'
+#' @note The input data is not restricted to a special type.
+#' @section Function version: 0.4.4
+#' @author Michael Dietze, GFZ Potsdam (Germany), \cr Sebastian Kreutzer,
+#' IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#' @seealso \code{\link{hist}}, \code{\link{plot}}
+#' @examples
+#'
+#' ## load data
+#' data(ExampleData.DeValues, envir = environment())
+#' ExampleData.DeValues <-
+#'   Second2Gray(ExampleData.DeValues$BT998, dose.rate = c(0.0438,0.0019))
+#'
+#' ## plot histogram the easiest way
+#' plot_Histogram(ExampleData.DeValues)
+#'
+#' ## plot histogram with some more modifications
+#' plot_Histogram(ExampleData.DeValues,
+#'                rug = TRUE,
+#'                normal_curve = TRUE,
+#'                cex.global = 0.9,
+#'                pch = 2,
+#'                colour = c("grey", "black", "blue", "green"),
+#'                summary = c("n", "mean", "sdrel"),
+#'                summary.pos = "topleft",
+#'                main = "Histogram of De-values",
+#'                mtext = "Example data set",
+#'                ylab = c(expression(paste(D[e], " distribution")),
+#'                         "Standard error"),
+#'                xlim = c(100, 250),
+#'                ylim = c(0, 0.1, 5, 20))
+#'
+#'
+#' @export
+plot_Histogram <- function(
+  data,
+  na.rm = TRUE,
+  mtext,
+  cex.global,
+  se,
+  rug,
+  normal_curve,
+  summary,
+  summary.pos,
+  colour,
+  interactive = FALSE,
+  ...
+) {
+
+  # Integrity tests ---------------------------------------------------------
+  ## check/adjust input data structure
+  if(is(data, "RLum.Results") == FALSE &
+     is(data, "data.frame") == FALSE) {
+
+    stop(paste("[plot_Histogram()] Input data format is neither",
+               "'data.frame' nor 'RLum.Results'"))
+  } else {
+
+    if(is(data, "RLum.Results") == TRUE) {
+      data <- get_RLum(data, "data")[,1:2]
+    }
+  }
+
+  ## handle error-free data sets
+  if(length(data) < 2) {
+    data <- cbind(data, rep(NA, length(data)))
+  }
+
+
+  ## Set general parameters ---------------------------------------------------
+  ## Check/set default parameters
+  if(missing(cex.global) == TRUE) {
+    cex.global <- 1
+  }
+
+  if(missing(mtext) == TRUE) {
+    mtext <- ""
+  }
+
+  if(missing(se) == TRUE) {
+    se = TRUE
+  }
+
+  if(missing(rug) == TRUE) {
+    rug = TRUE
+  }
+
+  if(missing(colour) == TRUE) {
+    colour = c("white", "black", "red", "black")
+  }
+
+  if(missing(summary) == TRUE) {
+    summary <- ""
+  }
+
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- "sub"
+  }
+
+  if(missing(normal_curve) == TRUE) {
+    normal_curve = FALSE
+  }
+
+  ## read out additional arguments list
+  extraArgs <- list(...)
+
+  ## define fun
+  if("fun" %in% names(extraArgs)) {
+    fun <- extraArgs$fun
+  } else {
+    fun <- FALSE
+  }
+
+  ## optionally, count and exclude NA values and print result
+  if(na.rm == TRUE) {
+    n.NA <- sum(is.na(data[,1]))
+    if(n.NA == 1) {
+      print("1 NA value excluded.")
+    } else if(n.NA > 1) {
+      print(paste(n.NA, "NA values excluded."))
+    }
+    data <- data[!is.na(data[,1]),]
+  }
+
+  if("main" %in% names(extraArgs)) {
+    main.plot <- extraArgs$main
+  } else {
+    main.plot <- "Histogram"
+  }
+
+  if("xlab" %in% names(extraArgs)) {
+    xlab.plot <- extraArgs$xlab
+  } else {
+    xlab.plot <- expression(paste(D[e], " [Gy]"))
+  }
+
+  if("ylab" %in% names(extraArgs)) {
+    ylab.plot <- extraArgs$ylab
+  } else {
+    ylab.plot <- c("Frequency",
+                   "Standard error")
+  }
+
+  if("breaks" %in% names(extraArgs)) {
+    breaks.plot <- extraArgs$breaks
+  } else {
+    breaks.plot <- hist(x = data[,1],
+                        plot = FALSE)$breaks
+  }
+
+  if("xlim" %in% names(extraArgs)) {
+    xlim.plot <- extraArgs$xlim
+  } else {
+    xlim.plot <- range(breaks.plot)
+  }
+
+  if("ylim" %in% names(extraArgs)) {
+    ylim.plot <- extraArgs$ylim
+  } else {
+    H.lim <- hist(data[,1],
+                  breaks = breaks.plot,
+                  plot = FALSE)
+    if(normal_curve == TRUE) {
+      left.ylim <- c(0, max(H.lim$density))
+    } else {
+      left.ylim <- c(0, max(H.lim$counts))
+    }
+    range.error <- try(expr = range(data[,2], na.rm = TRUE),
+                       silent = TRUE)
+    range.error[1] <- ifelse(is.infinite(range.error[1]), 0, range.error[1])
+    range.error[2] <- ifelse(is.infinite(range.error[2]), 0, range.error[2])
+    ylim.plot <- c(left.ylim, range.error)
+  }
+
+  if("pch" %in% names(extraArgs)) {
+    pch.plot <- extraArgs$pch
+  } else {
+    pch.plot <- 1
+  }
+  ## Set plot area format
+  par(mar = c(4.5, 4.5, 4.5, 4.5),
+      cex = cex.global)
+
+  ## Plot histogram -----------------------------------------------------------
+  HIST <- hist(data[,1],
+               main = "",
+               xlab = xlab.plot,
+               ylab = ylab.plot[1],
+               xlim = xlim.plot,
+               ylim = ylim.plot[1:2],
+               breaks = breaks.plot,
+               freq = !normal_curve,
+               col = colour[1]
+  )
+
+  ## add title
+  title(line = 2,
+        main = main.plot)
+
+  ## Optionally, add rug ------------------------------------------------------
+  if(rug == TRUE) {rug(data[,1], col = colour[2])}
+
+  ## Optionally, add a normal curve based on the data -------------------------
+  if(normal_curve == TRUE){
+    ## cheat the R check routine, tztztz how neat
+    x <- NULL
+    rm(x)
+
+    ## add normal distribution curve
+    curve(dnorm(x,
+                mean = mean(na.exclude(data[,1])),
+                sd = sd(na.exclude(data[,1]))),
+          col = colour[3],
+          add = TRUE,
+          lwd = 1.2 * cex.global)
+  }
+
+  ## calculate and paste statistical summary
+  data.stats <- list(data = data)
+
+  ## calculate and paste statistical summary
+  De.stats <- matrix(nrow = length(data), ncol = 18)
+  colnames(De.stats) <- c("n",
+                          "mean",
+                          "mean.weighted",
+                          "median",
+                          "median.weighted",
+                          "kde.max",
+                          "sd.abs",
+                          "sd.rel",
+                          "se.abs",
+                          "se.rel",
+                          "q25",
+                          "q75",
+                          "skewness",
+                          "kurtosis",
+                          "sd.abs.weighted",
+                          "sd.rel.weighted",
+                          "se.abs.weighted",
+                          "se.rel.weighted")
+
+  for(i in 1:length(data)) {
+    statistics <- calc_Statistics(data)
+    De.stats[i,1] <- statistics$weighted$n
+    De.stats[i,2] <- statistics$unweighted$mean
+    De.stats[i,3] <- statistics$weighted$mean
+    De.stats[i,4] <- statistics$unweighted$median
+    De.stats[i,5] <- statistics$unweighted$median
+    De.stats[i,7] <- statistics$unweighted$sd.abs
+    De.stats[i,8] <- statistics$unweighted$sd.rel
+    De.stats[i,9] <- statistics$unweighted$se.abs
+    De.stats[i,10] <- statistics$weighted$se.rel
+    De.stats[i,11] <- quantile(data[,1], 0.25)
+    De.stats[i,12] <- quantile(data[,1], 0.75)
+    De.stats[i,13] <- statistics$unweighted$skewness
+    De.stats[i,14] <- statistics$unweighted$kurtosis
+    De.stats[i,15] <- statistics$weighted$sd.abs
+    De.stats[i,16] <- statistics$weighted$sd.rel
+    De.stats[i,17] <- statistics$weighted$se.abs
+    De.stats[i,18] <- statistics$weighted$se.rel
+
+    ##kdemax - here a little doubled as it appears below again
+    if(nrow(data) >= 2){
+      De.density <-density(x = data[,1],
+                           kernel = "gaussian",
+                           from = xlim.plot[1],
+                           to = xlim.plot[2])
+
+      De.stats[i,6] <- De.density$x[which.max(De.density$y)]
+
+    }else{
+      De.denisty <- NA
+      De.stats[i,6] <- NA
+
+    }
+
+  }
+
+  label.text = list(NA)
+
+  if(summary.pos[1] != "sub") {
+    n.rows <- length(summary)
+
+    for(i in 1:length(data)) {
+      stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "")
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          paste(
+                            "",
+                            ifelse("n" %in% summary[j] == TRUE,
+                                   paste("n = ",
+                                         De.stats[i,1],
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("mean" %in% summary[j] == TRUE,
+                                   paste("mean = ",
+                                         round(De.stats[i,2], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("mean.weighted" %in% summary[j] == TRUE,
+                                   paste("weighted mean = ",
+                                         round(De.stats[i,3], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("median" %in% summary[j] == TRUE,
+                                   paste("median = ",
+                                         round(De.stats[i,4], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("median.weighted" %in% summary[j] == TRUE,
+                                   paste("weighted median = ",
+                                         round(De.stats[i,5], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kdemax" %in% summary[j] == TRUE,
+                                   paste("kdemax = ",
+                                         round(De.stats[i,6], 2),
+                                         " \n ",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdabs" %in% summary[j] == TRUE,
+                                   paste("sd = ",
+                                         round(De.stats[i,7], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdrel" %in% summary[j] == TRUE,
+                                   paste("rel. sd = ",
+                                         round(De.stats[i,8], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("seabs" %in% summary[j] == TRUE,
+                                   paste("se = ",
+                                         round(De.stats[i,9], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("serel" %in% summary[j] == TRUE,
+                                   paste("rel. se = ",
+                                         round(De.stats[i,10], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("skewness" %in% summary[j] == TRUE,
+                                   paste("skewness = ",
+                                         round(De.stats[i,13], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kurtosis" %in% summary[j] == TRUE,
+                                   paste("kurtosis = ",
+                                         round(De.stats[i,14], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdabs.weighted" %in% summary[j] == TRUE,
+                                   paste("abs. weighted sd = ",
+                                         round(De.stats[i,15], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdrel.weighted" %in% summary[j] == TRUE,
+                                   paste("rel. weighted sd = ",
+                                         round(De.stats[i,16], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("seabs.weighted" %in% summary[j] == TRUE,
+                                   paste("abs. weighted se = ",
+                                         round(De.stats[i,17], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("serel.weighted" %in% summary[j] == TRUE,
+                                   paste("rel. weighted se = ",
+                                         round(De.stats[i,18], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            sep = ""))
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]] <- paste(stops,
+                                                    summary.text,
+                                                    stops,
+                                                    sep = "")
+    }
+  } else {
+    for(i in 1:length(data)) {
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          ifelse("n" %in% summary[j] == TRUE,
+                                 paste("n = ",
+                                       De.stats[i,1],
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("mean" %in% summary[j] == TRUE,
+                                 paste("mean = ",
+                                       round(De.stats[i,2], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("mean.weighted" %in% summary[j] == TRUE,
+                                 paste("weighted mean = ",
+                                       round(De.stats[i,3], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("median" %in% summary[j] == TRUE,
+                                 paste("median = ",
+                                       round(De.stats[i,4], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("median.weighted" %in% summary[j] == TRUE,
+                                 paste("weighted median = ",
+                                       round(De.stats[i,5], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kdemax" %in% summary[j] == TRUE,
+                                 paste("kdemax = ",
+                                       round(De.stats[i,6], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdrel" %in% summary[j] == TRUE,
+                                 paste("rel. sd = ",
+                                       round(De.stats[i,8], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdabs" %in% summary[j] == TRUE,
+                                 paste("abs. sd = ",
+                                       round(De.stats[i,7], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("serel" %in% summary[j] == TRUE,
+                                 paste("rel. se = ",
+                                       round(De.stats[i,10], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("seabs" %in% summary[j] == TRUE,
+                                 paste("abs. se = ",
+                                       round(De.stats[i,9], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("skewness" %in% summary[j] == TRUE,
+                                 paste("skewness = ",
+                                       round(De.stats[i,13], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kurtosis" %in% summary[j] == TRUE,
+                                 paste("kurtosis = ",
+                                       round(De.stats[i,14], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdabs.weighted" %in% summary[j] == TRUE,
+                                 paste("abs. weighted sd = ",
+                                       round(De.stats[i,15], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdrel.weighted" %in% summary[j] == TRUE,
+                                 paste("rel. weighted sd = ",
+                                       round(De.stats[i,16], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("seabs.weighted" %in% summary[j] == TRUE,
+                                 paste("abs. weighted se = ",
+                                       round(De.stats[i,17], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("serel.weighted" %in% summary[j] == TRUE,
+                                 paste("rel. weighted se = ",
+                                       round(De.stats[i,18], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 "")
+        )
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]]  <- paste(
+        "  ",
+        summary.text,
+        sep = "")
+    }
+
+    ## remove outer vertical lines from string
+    for(i in 2:length(label.text)) {
+      label.text[[i]] <- substr(x = label.text[[i]],
+                                start = 3,
+                                stop = nchar(label.text[[i]]) - 3)
+    }
+  }
+
+  ## remove dummy list element
+  label.text[[1]] <- NULL
+
+  ## convert keywords into summary placement coordinates
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- c(xlim.plot[1], ylim.plot[2])
+    summary.adj <- c(0, 1)
+  } else if(length(summary.pos) == 2) {
+    summary.pos <- summary.pos
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "topleft") {
+    summary.pos <- c(xlim.plot[1], ylim.plot[2])
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "top") {
+    summary.pos <- c(mean(xlim.plot), ylim.plot[2])
+    summary.adj <- c(0.5, 1)
+  } else if(summary.pos[1] == "topright") {
+    summary.pos <- c(xlim.plot[2], ylim.plot[2])
+    summary.adj <- c(1, 1)
+  }  else if(summary.pos[1] == "left") {
+    summary.pos <- c(xlim.plot[1], mean(ylim.plot[1:2]))
+    summary.adj <- c(0, 0.5)
+  } else if(summary.pos[1] == "center") {
+    summary.pos <- c(mean(xlim.plot), mean(ylim.plot[1:2]))
+    summary.adj <- c(0.5, 0.5)
+  } else if(summary.pos[1] == "right") {
+    summary.pos <- c(xlim.plot[2], mean(ylim.plot[1:2]))
+    summary.adj <- c(1, 0.5)
+  }else if(summary.pos[1] == "bottomleft") {
+    summary.pos <- c(xlim.plot[1], ylim.plot[1])
+    summary.adj <- c(0, 0)
+  } else if(summary.pos[1] == "bottom") {
+    summary.pos <- c(mean(xlim.plot), ylim.plot[1])
+    summary.adj <- c(0.5, 0)
+  } else if(summary.pos[1] == "bottomright") {
+    summary.pos <- c(xlim.plot[2], ylim.plot[1])
+    summary.adj <- c(1, 0)
+  }
+
+  ## add summary content
+  for(i in 1:length(data.stats)) {
+    if(summary.pos[1] != "sub") {
+      text(x = summary.pos[1],
+           y = summary.pos[2],
+           adj = summary.adj,
+           labels = label.text[[i]],
+           col = colour[2],
+           cex = cex.global * 0.8)
+    } else {
+      if(mtext == "") {
+        mtext(side = 3,
+              line = 1 - i,
+              text = label.text[[i]],
+              col = colour[2],
+              cex = cex.global * 0.8)
+      }
+    }
+  }
+
+  ## Optionally, add standard error plot --------------------------------------
+  if(sum(is.na(data[,2])) == length(data[,2])) {
+    se <- FALSE
+  }
+
+  if(se == TRUE) {
+    par(new = TRUE)
+    plot.data <- data[!is.na(data[,2]),]
+
+    plot(x = plot.data[,1],
+         y = plot.data[,2],
+         xlim = xlim.plot,
+         ylim = ylim.plot[3:4],
+         pch = pch.plot,
+         col = colour[4],
+         main = "",
+         xlab = "",
+         ylab = "",
+         axes = FALSE,
+         frame.plot = FALSE
+    )
+    axis(side = 4,
+         labels = TRUE,
+         cex = cex.global
+    )
+    mtext(ylab.plot[2],
+          side = 4,
+          line = 3,
+          cex = cex.global)
+
+    #    par(new = FALSE)
+  }
+
+  ## Optionally add user-defined mtext
+  mtext(side = 3,
+        line = 0.5,
+        text = mtext,
+        cex = 0.8 * cex.global)
+
+  ## FUN by R Luminescence Team
+  if(fun & !interactive)
+    sTeve()
+
+  ## Optionally: Interactive Plot ----------------------------------------------
+  if (interactive) {
+
+    if (!requireNamespace("plotly", quietly = TRUE))
+      stop("The interactive histogram requires the 'plotly' package. To install",
+           " this package run 'install.packages('plotly')' in your R console.",
+           call. = FALSE)
+
+    ## tidy data ----
+    data <- as.data.frame(data)
+    colnames(data) <- c("x", "y")
+    x <- y <- NULL # suffice CRAN check for no visible binding
+    if (length(grep("paste", as.character(xlab.plot))) > 0)
+      xlab.plot <- "Equivalent dose [Gy]"
+
+
+    ## create plots ----
+
+    # histogram
+    hist <- plotly::plot_ly(data = data, x = x,
+                            type = "histogram",
+                            showlegend = FALSE,
+                            name = "Bin", opacity = 0.75,
+                            marker = list(color = "428BCA",
+                                          line = list(width = 1.0,
+                                                      color = "white")),
+                            histnorm = ifelse(normal_curve, "probability density", ""),
+                            yaxis = "y"
+    )
+
+    # normal curve ----
+    if (normal_curve) {
+
+      density.curve <- density(data$x)
+      normal.curve <- data.frame(x = density.curve$x, y = density.curve$y)
+
+      hist <- plotly::add_trace(hist, data = normal.curve, x = x, y = y,
+                                type = "scatter", mode = "lines",
+                                marker = list(color = "red"),
+                                name = "Normal curve",
+                                yaxis = "y")
+
+    }
+
+    # scatter plot of individual errors
+    if (se) {
+      yaxis2 <- list(overlaying = "y", side = "right",
+                     showgrid = FALSE, title = ylab.plot[2],
+                     ticks = "", showline = FALSE)
+
+      se.text <- paste0("Measured value:</br>",
+                        data$x, " ± ", data$y,"</br>")
+
+      hist <- plotly::add_trace(hist, data = data, x = x, y = y,
+                                type = "scatter", mode = "markers",
+                                name = "Error", hoverinfo = "text",
+                                text = se.text,
+                                marker = list(color = "black"),
+                                yaxis = "y2")
+
+      hist <- plotly::layout(yaxis2 = yaxis2)
+    }
+
+    # set layout ----
+    hist <- plotly::layout(hist, hovermode = "closest",
+                           title = paste("<b>", main.plot, "</b>"),
+                           margin = list(r = 90),
+                           xaxis = list(title = xlab.plot,
+                                        ticks = ""),
+                           yaxis = list(title = ylab.plot[1],
+                                        ticks = "",
+                                        showline = FALSE,
+                                        showgrid = FALSE)
+    )
+
+    ## show and return plot ----
+    print(hist)
+    return(hist)
+  }
+
+}
diff --git a/R/plot_KDE.R b/R/plot_KDE.R
new file mode 100644
index 0000000..9d9e9aa
--- /dev/null
+++ b/R/plot_KDE.R
@@ -0,0 +1,1213 @@
+#' Plot kernel density estimate with statistics
+#'
+#' Plot a kernel density estimate of measurement values in combination with the
+#' actual values and associated error bars in ascending order. If enabled, the
+#' boxplot will show the usual distribution parameters (median as
+#' bold line, box delimited by the first and third quartile, whiskers defined
+#' by the extremes and outliers shown as points) and also the mean and
+#' standard deviation as pale bold line and pale polygon, respectively.
+#'
+#' The function allows passing several plot arguments, such as \code{main},
+#' \code{xlab}, \code{cex}. However, as the figure is an overlay of two
+#' separate plots, \code{ylim} must be specified in the order: c(ymin_axis1,
+#' ymax_axis1, ymin_axis2, ymax_axis2) when using the cumulative values plot
+#' option. See examples for some further explanations. For details on the
+#' calculation of the bin-width (parameter \code{bw}) see
+#' \code{\link{density}}.\cr\cr
+#' A statistic summary, i.e. a collection of statistic measures of
+#' centrality and dispersion (and further measures) can be added by specifying
+#' one or more of the following keywords:
+#' \itemize{
+#' \item \code{"n"} (number of samples)
+#' \item \code{"mean"} (mean De value)
+#' \item \code{"median"} (median of the De values)
+#' \item \code{"sd.rel"} (relative standard deviation in percent)
+#' \item \code{"sd.abs"} (absolute standard deviation)
+#' \item \code{"se.rel"} (relative standard error)
+#' \item \code{"se.abs"} (absolute standard error)
+#' \item \code{"in.2s"} (percent of samples in 2-sigma range)
+#' \item \code{"kurtosis"} (kurtosis)
+#' \item \code{"skewness"} (skewness)
+#' }
+#' Note that the input data for the statistic summary is sent to the function
+#' \code{calc_Statistics()} depending on the log-option for the z-scale. If
+#' \code{"log.z = TRUE"}, the summary is based on the logarithms of the input
+#' data. If \code{"log.z = FALSE"} the linearly scaled data is used. \cr
+#' Note as well, that \code{"calc_Statistics()"} calculates these statistic
+#' measures in three different ways: \code{unweighted}, \code{weighted} and
+#' \code{MCM-based} (i.e., based on Monte Carlo Methods). By default, the
+#' MCM-based version is used. If you wish to use another method, indicate this
+#' with the appropriate keyword using the argument \code{summary.method}.\cr\cr
+#' @param data \code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+#' object (required): for \code{data.frame}: two columns: De
+#' (\code{values[,1]}) and De error (\code{values[,2]}). For plotting multiple
+#' data sets, these must be provided as \code{list} (e.g. \code{list(dataset1,
+#' dataset2)}).
+#'
+#' @param na.rm \code{\link{logical}} (with default): exclude NA values
+#' from the data set prior to any further operations.
+#'
+#' @param values.cumulative \code{\link{logical}} (with default): show
+#' cumulative individual data.
+#'
+#' @param order \code{\link{logical}}: Order data in ascending order.
+#'
+#' @param boxplot \code{\link{logical}} (with default): optionally show a
+#' boxplot (depicting median as thick central line, first and third quartile
+#' as box limits, whiskers denoting +/- 1.5 interquartile ranges and dots
+#' further outliers).
+#'
+#' @param rug \code{\link{logical}} (with default): optionally add rug.
+#'
+#' @param summary \code{\link{character}} (optional): add statistic measures of
+#' centrality and dispersion to the plot. Can be one or more of several
+#' keywords. See details for available keywords.
+#'
+#' @param summary.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+#' specified to place the summary below the plot header. However, this latter
+#' option in only possible if \code{mtext} is not used. In case of coordinate
+#' specification, y-coordinate refers to the right y-axis.
+#'
+#' @param summary.method \code{\link{character}} (with default): keyword
+#' indicating the method used to calculate the statistic summary. One out of
+#' \code{"unweighted"}, \code{"weighted"} and \code{"MCM"}. See
+#' \code{\link{calc_Statistics}} for details.
+#'
+#' @param bw \code{\link{character}} (with default): bin-width, chose a numeric
+#' value for manual setting.
+#'
+#' @param output \code{\link{logical}}: Optional output of numerical plot
+#' parameters. These can be useful to reproduce similar plots. Default is
+#' \code{FALSE}.
+#'
+#' @param \dots further arguments and graphical parameters passed to
+#' \code{\link{plot}}.
+#'
+#' @note The plot output is no 'probability density' plot (cf. the discussion
+#' of Berger and Galbraith in Ancient TL; see references)!
+#'
+#' @section Function version: 3.5.3
+#'
+#' @author Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+#' IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#'
+#' @seealso \code{\link{density}}, \code{\link{plot}}
+#'
+#' @examples
+#'
+#' ## read example data set
+#' data(ExampleData.DeValues, envir = environment())
+#' ExampleData.DeValues <-
+#'   Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+#'
+#' ## create plot straightforward
+#' plot_KDE(data = ExampleData.DeValues)
+#'
+#' ## create plot with logarithmic x-axis
+#' plot_KDE(data = ExampleData.DeValues,
+#'          log = "x")
+#'
+#' ## create plot with user-defined labels and axes limits
+#' plot_KDE(data = ExampleData.DeValues,
+#'          main = "Dose distribution",
+#'          xlab = "Dose (s)",
+#'          ylab = c("KDE estimate", "Cumulative dose value"),
+#'          xlim = c(100, 250),
+#'          ylim = c(0, 0.08, 0, 30))
+#'
+#' ## create plot with boxplot option
+#' plot_KDE(data = ExampleData.DeValues,
+#'          boxplot = TRUE)
+#'
+#' ## create plot with statistical summary below header
+#' plot_KDE(data = ExampleData.DeValues,
+#'          summary = c("n", "median", "skewness", "in.2s"))
+#'
+#' ## create plot with statistical summary as legend
+#' plot_KDE(data = ExampleData.DeValues,
+#'          summary = c("n", "mean", "sd.rel", "se.abs"),
+#'          summary.pos = "topleft")
+#'
+#' ## split data set into sub-groups, one is manipulated, and merge again
+#' data.1 <- ExampleData.DeValues[1:15,]
+#' data.2 <- ExampleData.DeValues[16:25,] * 1.3
+#' data.3 <- list(data.1, data.2)
+#'
+#' ## create plot with two subsets straightforward
+#' plot_KDE(data = data.3)
+#'
+#' ## create plot with two subsets and summary legend at user coordinates
+#' plot_KDE(data = data.3,
+#'          summary = c("n", "median", "skewness"),
+#'          summary.pos = c(110, 0.07),
+#'          col = c("blue", "orange"))
+#'
+#' ## example of how to use the numerical output of the function
+#' ## return plot output to draw a thicker KDE line
+#' KDE_out <- plot_KDE(data = ExampleData.DeValues,
+#' output = TRUE)
+#'
+#' @export
+plot_KDE <- function(
+  data,
+  na.rm = TRUE,
+  values.cumulative = TRUE,
+  order = TRUE,
+  boxplot = TRUE,
+  rug = TRUE,
+  summary,
+  summary.pos,
+  summary.method = "MCM",
+  bw = "nrd0",
+  output = FALSE,
+  ...
+) {
+
+  ## check data and parameter consistency -------------------------------------
+
+  ## account for depreciated arguments
+  if("centrality" %in% names(list(...))) {
+
+    boxplot <- TRUE
+
+    warning(paste("[plot_KDE()] Argument 'centrality' no longer supported. ",
+                  "Replaced by 'boxplot = TRUE'."))
+  }
+
+  if("dispersion" %in% names(list(...))) {
+
+    boxplot <- TRUE
+
+    warning(paste("[plot_KDE()] Argument 'dispersion' no longer supported. ",
+                  "Replaced by 'boxplot = TRUE'."))
+  }
+
+  if("polygon.col" %in% names(list(...))) {
+
+    boxplot <- TRUE
+
+    warning(paste("[plot_KDE()] Argument 'polygon.col' no longer supported. ",
+                  "Replaced by 'boxplot = TRUE'."))
+  }
+
+  if("weights" %in% names(list(...))) {
+
+    warning(paste("[plot_KDE()] Argument 'weights' no longer supported. ",
+                  "Weights are omitted."))
+  }
+
+  ## Homogenise input data format
+  if(is(data, "list") == FALSE) {
+
+    data <- list(data)
+  }
+
+  ## check/adjust input data structure
+  for(i in 1:length(data)) {
+
+    if(is(data[[i]], "RLum.Results") == FALSE &
+         is(data[[i]], "data.frame") == FALSE &
+         is.numeric(data[[i]]) == FALSE) {
+      stop(paste("[plot_KDE()] Input data format is neither",
+                 "'data.frame', 'RLum.Results' nor 'numeric'"))
+    } else {
+
+      if(is(data[[i]], "RLum.Results") == TRUE) {
+        data[[i]] <- get_RLum(data[[i]], "data")[,1:2]
+      }
+
+      if(length(data[[i]]) < 2) {
+        data[[i]] <- cbind(data[[i]], rep(NA, length(data[[i]])))
+      }
+    }
+  }
+
+  ## check/set function parameters
+  if(missing(summary) == TRUE) {
+    summary <- ""
+  }
+
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- "sub"
+  }
+
+  ## set mtext output
+  if("mtext" %in% names(list(...))) {
+    mtext <- list(...)$mtext
+  } else {
+    mtext <- ""
+  }
+
+  ## check/set layout definitions
+  if("layout" %in% names(list(...))) {
+    layout <- get_Layout(layout = list(...)$layout)
+  } else {
+    layout <- get_Layout(layout = "default")
+  }
+
+  ## data preparation steps ---------------------------------------------------
+
+  ## optionally, count and exclude NA values and print result
+  if(na.rm == TRUE) {
+    for(i in 1:length(data)) {
+      n.NA <- sum(is.na(data[[i]][,1]))
+      if(n.NA == 1) {
+        message(paste("1 NA value excluded from data set", i, "."))
+      } else if(n.NA > 1) {
+        message(paste(n.NA, "NA values excluded from data set", i, "."))
+      }
+      data[[i]] <- na.exclude(data[[i]])
+    }
+  }
+
+  ## optionally, order data set
+  if(order == TRUE) {
+    for(i in 1:length(data)) {
+      data[[i]] <- data[[i]][order(data[[i]][,1]),]
+    }
+  }
+
+  ## calculate and paste statistical summary
+  De.stats <- matrix(nrow = length(data), ncol = 12)
+  colnames(De.stats) <- c("n",
+                          "mean",
+                          "median",
+                          "kde.max",
+                          "sd.abs",
+                          "sd.rel",
+                          "se.abs",
+                          "se.rel",
+                          "q.25",
+                          "q.75",
+                          "skewness",
+                          "kurtosis")
+  De.density <- list(NA)
+
+  ## loop through all data sets
+  for(i in 1:length(data)) {
+    statistics <- calc_Statistics(data[[i]])[[summary.method]]
+
+    De.stats[i,1] <- statistics$n
+    De.stats[i,2] <- statistics$mean
+    De.stats[i,3] <- statistics$median
+    De.stats[i,5] <- statistics$sd.abs
+    De.stats[i,6] <- statistics$sd.rel
+    De.stats[i,7] <- statistics$se.abs
+    De.stats[i,8] <- statistics$se.rel
+    De.stats[i,9] <- quantile(data[[i]][,1], 0.25)
+    De.stats[i,10] <- quantile(data[[i]][,1], 0.75)
+    De.stats[i,11] <- statistics$skewness
+    De.stats[i,12] <- statistics$kurtosis
+
+    if(nrow(data[[i]]) >= 2){
+      De.density[[length(De.density) + 1]] <- density(data[[i]][,1],
+                                                      kernel = "gaussian",
+                                                      bw = bw)
+
+    }else{
+      De.density[[length(De.density) + 1]] <- NA
+      warning("[plot_KDE()] Less than 2 points provided, no density plotted.", call. = FALSE)
+
+    }
+
+  }
+
+  ## remove dummy list element
+  De.density[[1]] <- NULL
+
+  ## create global data set
+  De.global <- data[[1]][,1]
+  De.error.global <- data[[1]][,2]
+  De.density.range <- matrix(nrow = length(data),
+                             ncol = 4)
+
+  for(i in 1:length(data)) {
+    ##global De and De.error vector
+    De.global <- c(De.global, data[[i]][,1])
+    De.error.global <- c(De.error.global, data[[i]][,2])
+
+    ## density range
+    if(!is.na(De.density[[i]])){
+      De.density.range[i,1] <- min(De.density[[i]]$x)
+      De.density.range[i,2] <- max(De.density[[i]]$x)
+      De.density.range[i,3] <- min(De.density[[i]]$y)
+      De.density.range[i,4] <- max(De.density[[i]]$y)
+
+      ## position of maximum KDE value
+      De.stats[i,4] <- De.density[[i]]$x[which.max(De.density[[i]]$y)]
+
+    }else{
+      De.density.range[i,1:4] <- NA
+      De.stats[i,4] <- NA
+    }
+
+
+  }
+
+  ## Get global range of densities
+  De.density.range <- c(min(De.density.range[,1]),
+                        max(De.density.range[,2]),
+                        min(De.density.range[,3]),
+                        max(De.density.range[,4]))
+
+  label.text = list(NA)
+
+  if(summary.pos[1] != "sub") {
+    n.rows <- length(summary)
+
+    for(i in 1:length(data)) {
+      stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "")
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          paste(
+                            "",
+                            ifelse("n" %in% summary[j] == TRUE,
+                                   paste("n = ",
+                                         De.stats[i,1],
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("mean" %in% summary[j] == TRUE,
+                                   paste("mean = ",
+                                         round(De.stats[i,2], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("median" %in% summary[j] == TRUE,
+                                   paste("median = ",
+                                         round(De.stats[i,3], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kde.max" %in% summary[j] == TRUE,
+                                   paste("kdemax = ",
+                                         round(De.stats[i,4], 2),
+                                         " \n ",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sd.abs" %in% summary[j] == TRUE,
+                                   paste("sd = ",
+                                         round(De.stats[i,5], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sd.rel" %in% summary[j] == TRUE,
+                                   paste("rel. sd = ",
+                                         round(De.stats[i,6], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("se.abs" %in% summary[j] == TRUE,
+                                   paste("se = ",
+                                         round(De.stats[i,7], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("se.rel" %in% summary[j] == TRUE,
+                                   paste("rel. se = ",
+                                         round(De.stats[i,8], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("skewness" %in% summary[j] == TRUE,
+                                   paste("skewness = ",
+                                         round(De.stats[i,11], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kurtosis" %in% summary[j] == TRUE,
+                                   paste("kurtosis = ",
+                                         round(De.stats[i,12], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("in.2s" %in% summary[j] == TRUE,
+                                   paste("in 2 sigma = ",
+                                         round(sum(data[[i]][,1] >
+                                                     (De.stats[i,2] - 2 *
+                                                        De.stats[i,5]) &
+                                                     data[[i]][,1] <
+                                                     (De.stats[i,2] + 2 *
+                                                        De.stats[i,5])) /
+                                                 nrow(data[[i]]) * 100 , 1),
+                                         " %",
+                                         sep = ""),
+                                   ""),
+                            sep = ""))
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]] <- paste(stops,
+                                                    summary.text,
+                                                    stops,
+                                                    sep = "")
+    }
+  } else {
+    for(i in 1:length(data)) {
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          ifelse("n" %in% summary[j] == TRUE,
+                                 paste("n = ",
+                                       De.stats[i,1],
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("mean" %in% summary[j] == TRUE,
+                                 paste("mean = ",
+                                       round(De.stats[i,2], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("median" %in% summary[j] == TRUE,
+                                 paste("median = ",
+                                       round(De.stats[i,3], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kde.max" %in% summary[j] == TRUE,
+                                 paste("kdemax = ",
+                                       round(De.stats[i,4], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sd.rel" %in% summary[j] == TRUE,
+                                 paste("rel. sd = ",
+                                       round(De.stats[i,6], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sd.abs" %in% summary[j] == TRUE,
+                                 paste("abs. sd = ",
+                                       round(De.stats[i,5], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("se.rel" %in% summary[j] == TRUE,
+                                 paste("rel. se = ",
+                                       round(De.stats[i,8], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("se.abs" %in% summary[j] == TRUE,
+                                 paste("abs. se = ",
+                                       round(De.stats[i,7], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("skewness" %in% summary[j] == TRUE,
+                                 paste("skewness = ",
+                                       round(De.stats[i,11], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kurtosis" %in% summary[j] == TRUE,
+                                 paste("kurtosis = ",
+                                       round(De.stats[i,12], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("in.2s" %in% summary[j] == TRUE,
+                                 paste("in 2 sigma = ",
+                                       round(sum(data[[i]][,1] >
+                                                   (De.stats[i,2] - 2 *
+                                                      De.stats[i,5]) &
+                                                   data[[i]][,1] <
+                                                   (De.stats[i,2] + 2 *
+                                                      De.stats[i,5])) /
+                                               nrow(data[[i]]) * 100 , 1),
+                                       " %   ",
+                                       sep = ""),
+                                 "")
+        )
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]]  <- paste(
+        "  ",
+        summary.text,
+        sep = "")
+    }
+
+    ## remove outer vertical lines from string
+    for(i in 2:length(label.text)) {
+      label.text[[i]] <- substr(x = label.text[[i]],
+                                start = 3,
+                                stop = nchar(label.text[[i]]) - 3)
+    }
+  }
+
+  ## remove dummy list element
+  label.text[[1]] <- NULL
+
+  ## read out additional parameters -------------------------------------------
+  if("main" %in% names(list(...))) {
+    main <- list(...)$main
+  } else {
+    main <- expression(bold(paste(D[e], " distribution")))
+  }
+
+  if("sub" %in% names(list(...))) {
+    sub <- list(...)$sub
+  } else {
+    sub <- NULL
+  }
+
+  if("xlab" %in% names(list(...))) {
+    xlab <- list(...)$xlab
+  } else {
+    xlab <- expression(paste(D[e], " [Gy]"))
+  }
+
+  if("ylab" %in% names(list(...))) {
+    ylab <- list(...)$ylab
+  } else {
+    ylab <- c("Density", "Cumulative frequency")
+  }
+
+  if("xlim" %in% names(list(...))) {
+    xlim.plot <- list(...)$xlim
+  } else {
+    xlim.plot <- c(min(c(De.global - De.error.global),
+                       De.density.range[1],
+                       na.rm = TRUE),
+                   max(c(De.global + De.error.global),
+                       De.density.range[2],
+                       na.rm = TRUE))
+  }
+
+  if("ylim" %in% names(list(...))) {
+    ylim.plot <- list(...)$ylim
+  } else {
+    if(!is.na(De.density.range[1])){
+      ylim.plot <- c(De.density.range[3],
+                     De.density.range[4],
+                     0,
+                     max(De.stats[,1]))
+
+    }else{
+      ylim.plot <- c(0,
+                     max(De.stats[,1]),
+                     0,
+                     max(De.stats[,1]))
+
+    }
+
+  }
+
+  if("log" %in% names(list(...))) {
+    log.option <- list(...)$log
+  } else {
+    log.option <- ""
+  }
+
+  if("col" %in% names(list(...))) {
+
+    col.main <- list(...)$col
+    col.xlab <- 1
+    col.ylab1 <- 1
+    col.ylab2 <- 1
+    col.xtck <- 1
+    col.ytck1 <- 1
+    col.ytck2 <- 1
+    col.box <- 1
+    col.mtext <- 1
+    col.stats <- list(...)$col
+    col.kde.line <- list(...)$col
+    col.kde.fill <- NA
+    col.value.dot <- list(...)$col
+    col.value.bar <- list(...)$col
+    col.value.rug <- list(...)$col
+    col.boxplot <- list(...)$col
+    col.mean.line <- adjustcolor(col = list(...)$col,
+                                 alpha.f = 0.4)
+    col.sd.bar <- adjustcolor(col = list(...)$col,
+                              alpha.f = 0.4)
+    col.background <- NA
+  } else {
+
+    if(length(layout$kde$colour$main) == 1) {
+      col.main <- 1:length(data)
+    } else {
+      col.main <- layout$kde$colour$main
+    }
+
+    if(length(layout$kde$colour$xlab) == 1) {
+      col.xlab <- 1:length(data)
+    } else {
+      col.xlab <- layout$kde$colour$xlab
+    }
+
+    if(length(layout$kde$colour$ylab1) == 1) {
+      col.ylab1 <- 1:length(data)
+    } else {
+      col.ylab1 <- layout$kde$colour$ylab1
+    }
+
+    if(length(layout$kde$colour$ylab2) == 1) {
+      col.ylab2 <- 1:length(data)
+    } else {
+      col.ylab2 <- layout$kde$colour$ylab2
+    }
+
+    if(length(layout$kde$colour$xtck) == 1) {
+      col.xtck <- 1:length(data)
+    } else {
+      col.xtck <- layout$kde$colour$xtck
+    }
+
+    if(length(layout$kde$colour$ytck1) == 1) {
+      col.ytck1 <- 1:length(data)
+    } else {
+      col.ytck1 <- layout$kde$colour$ytck1
+    }
+
+    if(length(layout$kde$colour$ytck2) == 1) {
+      col.ytck2 <- 1:length(data)
+    } else {
+      col.ytck2 <- layout$kde$colour$ytck2
+    }
+
+    if(length(layout$kde$colour$box) == 1) {
+      col.box <- 1:length(data)
+    } else {
+      col.box <- layout$kde$colour$box
+    }
+
+    if(length(layout$kde$colour$mtext) == 1) {
+      col.mtext <- 1:length(data)
+    } else {
+      col.mtext <- layout$kde$colour$mtext
+    }
+
+    if(length(layout$kde$colour$stats) == 1) {
+      col.stats <- 1:length(data)
+    } else {
+      col.stats <- layout$kde$colour$stats
+    }
+
+    if(length(layout$kde$colour$kde.line) == 1) {
+      col.kde.line <- 1:length(data)
+    } else {
+      col.kde.line <- layout$kde$colour$kde.line
+    }
+
+    if(length(layout$kde$colour$kde.fill) == 1) {
+      col.kde.fill <- 1:length(data)
+    } else {
+      col.kde.fill <- layout$kde$colour$kde.fill
+    }
+
+    if(length(layout$kde$colour$value.dot) == 1) {
+      col.value.dot <- 1:length(data)
+    } else {
+      col.value.dot <- layout$kde$colour$value.dot
+    }
+
+    if(length(layout$kde$colour$value.bar) == 1) {
+      col.value.bar <- 1:length(data)
+    } else {
+      col.value.bar <- layout$kde$colour$value.bar
+    }
+
+    if(length(layout$kde$colour$value.rug) == 1) {
+      col.value.rug <- 1:length(data)
+    } else {
+      col.value.rug <- layout$kde$colour$value.rug
+    }
+
+    if(length(layout$kde$colour$boxplot) == 1) {
+      col.boxplot <- 1:length(data)
+    } else {
+      col.boxplot <- layout$kde$colour$boxplot
+    }
+
+    if(length(layout$kde$colour$mean.line) == 1) {
+      col.mean.line <- adjustcolor(col = 1:length(data),
+                                   alpha.f = 0.4)
+    } else {
+      col.mean.line <- layout$kde$colour$mean.point
+    }
+
+    if(length(layout$kde$colour$sd.bar) == 1) {
+      col.sd.bar <- 1:length(data)
+    } else {
+      col.sd.bar <- layout$kde$colour$sd.line
+    }
+
+    if(length(layout$kde$colour$background) == 1) {
+      col.background <- 1:length(data)
+    } else {
+      col.background <- layout$kde$colour$background
+    }
+
+  }
+
+  if("lty" %in% names(list(...))) {
+    lty <- list(...)$lty
+  } else {
+    lty <- rep(1, length(data))
+  }
+
+  if("lwd" %in% names(list(...))) {
+    lwd <- list(...)$lwd
+  } else {
+    lwd <- rep(1, length(data))
+  }
+
+  if("cex" %in% names(list(...))) {
+    cex <- list(...)$cex
+  } else {
+    cex <- 1
+  }
+
+  if("fun" %in% names(list(...))) {
+    fun <- list(...)$fun
+  } else {
+    fun <- FALSE
+  }
+
+  ## convert keywords into summary placement coordinates
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- c(xlim.plot[1], ylim.plot[2])
+    summary.adj <- c(0, 1)
+  } else if(length(summary.pos) == 2) {
+    summary.pos <- summary.pos
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "topleft") {
+    summary.pos <- c(xlim.plot[1], ylim.plot[2])
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "top") {
+    summary.pos <- c(mean(xlim.plot), ylim.plot[2])
+    summary.adj <- c(0.5, 1)
+  } else if(summary.pos[1] == "topright") {
+    summary.pos <- c(xlim.plot[2], ylim.plot[2])
+    summary.adj <- c(1, 1)
+  }  else if(summary.pos[1] == "left") {
+    summary.pos <- c(xlim.plot[1], mean(ylim.plot[1:2]))
+    summary.adj <- c(0, 0.5)
+  } else if(summary.pos[1] == "center") {
+    summary.pos <- c(mean(xlim.plot), mean(ylim.plot[1:2]))
+    summary.adj <- c(0.5, 0.5)
+  } else if(summary.pos[1] == "right") {
+    summary.pos <- c(xlim.plot[2], mean(ylim.plot[1:2]))
+    summary.adj <- c(1, 0.5)
+  }else if(summary.pos[1] == "bottomleft") {
+    summary.pos <- c(xlim.plot[1], ylim.plot[1])
+    summary.adj <- c(0, 0)
+  } else if(summary.pos[1] == "bottom") {
+    summary.pos <- c(mean(xlim.plot), ylim.plot[1])
+    summary.adj <- c(0.5, 0)
+  } else if(summary.pos[1] == "bottomright") {
+    summary.pos <- c(xlim.plot[2], ylim.plot[1])
+    summary.adj <- c(1, 0)
+  }
+
+  ## plot data sets -----------------------------------------------------------
+
+  ## setup plot area
+  if(length(summary) >= 1 & summary.pos[1] == "sub") {
+
+    toplines <- length(data)
+  } else {
+
+    toplines <- 1
+  }
+
+  ## extract original plot parameters
+  par(bg = layout$kde$colour$background)
+  bg.original <- par()$bg
+
+  par(mar = c(5, 5.5, 2.5 + toplines, 4.5),
+      xpd = FALSE,
+      cex = cex)
+
+  if(layout$kde$dimension$figure.width != "auto" |
+     layout$kde$dimension$figure.height != "auto") {
+    par(mai = layout$kde$dimension$margin / 25.4,
+        pin = c(layout$kde$dimension$figure.width / 25.4 -
+                  layout$kde$dimension$margin[2] / 25.4 -
+                  layout$kde$dimension$margin[4] / 25.4,
+                layout$kde$dimension$figure.height / 25.4 -
+                  layout$kde$dimension$margin[1] / 25.4 -
+                  layout$kde$dimension$margin[3]/25.4))
+  }
+
+  ## create empty plot to get plot dimensions
+  plot(NA,
+       xlim = xlim.plot,
+       ylim = ylim.plot[1:2],
+       sub = sub,
+       log = log.option,
+       axes = FALSE,
+       ann = FALSE)
+
+  ## get line height in xy coordinates
+  l_height <- par()$cxy[2]
+
+  ## optionally update ylim
+  if(boxplot == TRUE) {
+
+    ylim.plot[1] <- ylim.plot[1] - 1.4 * l_height
+  }
+
+  ## create empty plot to set adjusted plot dimensions
+  par(new = TRUE)
+  plot(NA,
+       xlim     = xlim.plot,
+       ylim     = ylim.plot[1:2],
+       log      = log.option,
+       cex      = cex,
+       axes = FALSE,
+       ann = FALSE)
+
+  ## add box
+  box(which = "plot",
+      col = layout$kde$colour$box)
+
+  ## add x-axis
+  axis(side = 1,
+       col = layout$kde$colour$xtck,
+       col.axis = layout$kde$colour$xtck,
+       labels = NA,
+       tcl = -layout$kde$dimension$xtcl / 200,
+       cex = cex)
+
+  axis(side = 1,
+       line = 2 * layout$kde$dimension$xtck.line / 100 - 2,
+       lwd = 0,
+       col = layout$kde$colour$xtck,
+       family = layout$kde$font.type$xtck,
+       font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                      layout$kde$font.deco$xtck],
+       col.axis = layout$kde$colour$xtck,
+       cex.axis = layout$kde$font.size$xlab/12)
+
+  mtext(text = xlab,
+        side = 1,
+        line = 3 * layout$kde$dimension$xlab.line / 100,
+        col = layout$kde$colour$xlab,
+        family = layout$kde$font.type$xlab,
+        font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                       layout$kde$font.deco$xlab],
+        cex = cex * layout$kde$font.size$xlab/12)
+
+  ## add left y-axis
+  axis(side = 2,
+       at = pretty(x = range(De.density.range[3:4])),
+       col = layout$kde$colour$ytck1,
+       col.axis = layout$kde$colour$ytck1,
+       labels = NA,
+       tcl = -layout$kde$dimension$ytck1 / 200,
+       cex = cex)
+
+  axis(side = 2,
+       at = pretty(x = range(De.density.range[3:4])),
+       line = 2 * layout$kde$dimension$ytck1.line / 100 - 2,
+       lwd = 0,
+       col = layout$kde$colour$ytck1,
+       family = layout$kde$font.type$ytck1,
+       font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                      layout$kde$font.deco$ytck1],
+       col.axis = layout$kde$colour$ytck1,
+       cex.axis = layout$kde$font.size$ylab1/12)
+
+  mtext(text = ylab[1],
+        side = 2,
+        line = 3 * layout$kde$dimension$ylab1.line / 100,
+        col = layout$kde$colour$ylab1,
+        family = layout$kde$font.type$ylab1,
+        font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                       layout$kde$font.deco$ylab1],
+        cex = cex * layout$kde$font.size$ylab1/12)
+
+  for(i in 1:length(data)) {
+    if(!is.na(De.density[[i]])){
+      polygon(x = c(par()$usr[1], De.density[[i]]$x, par()$usr[2]),
+              y = c(min(De.density[[i]]$y),De.density[[i]]$y, min(De.density[[i]]$y)),
+              border = col.kde.line[i],
+              col = col.kde.fill,
+              lty = lty[i],
+              lwd = lwd[i])
+
+    }
+
+  }
+
+  ## add plot title
+  cex.old <- par()$cex
+  par(cex = layout$kde$font.size$main / 12)
+  title(main = main,
+        family = layout$kde$font.type$main,
+        font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                       layout$kde$font.deco$main],
+        col.main = layout$kde$colour$main,
+        line = (toplines + 1.2) * layout$kde$dimension$main / 100)
+  par(cex = cex.old)
+
+  ## optionally add mtext line
+  if(mtext != "") {
+
+    mtext(text = mtext,
+          side = 3,
+          line = 0.5,
+          family = layout$kde$font.type$mtext,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$kde$font.deco$mtext],
+          col.main = layout$kde$colour$mtext,
+          cex = layout$kde$font.size$mtext / 12)
+  }
+
+  ## add summary content
+  for(i in 1:length(data)) {
+
+    if(summary.pos[1] != "sub") {
+
+      text(x = summary.pos[1],
+           y = summary.pos[2],
+           adj = summary.adj,
+           labels = label.text[[i]],
+           col = col.stats[i],
+           cex = layout$kde$font.size$stats / 12)
+    } else {
+
+      if(mtext == "") {
+
+        mtext(side = 3,
+              line = (toplines + 0.3 - i) * layout$kde$dimension$stats.line / 100,
+              text = label.text[[i]],
+              col = col.stats[i],
+              cex = layout$kde$font.size$stats / 12)
+      }
+    }
+  }
+
+  if(values.cumulative == TRUE) {
+
+    ## create empty overlay plot
+    par(new = TRUE) # adjust plot options
+
+    ## add empty plot, scaled to preliminary secondary plot content
+    plot(x = NA,
+         xlim = xlim.plot,
+         ylim = ylim.plot[3:4],
+         log  = log.option,
+         ann = FALSE,
+         axes = FALSE
+         )
+
+    ## get line height in xy coordinates
+    l_height <- par()$cxy[2]
+
+    ## optionally update ylim
+    if(boxplot == TRUE) {
+
+      ylim.plot[3] <- ylim.plot[3] - 1.4 * l_height
+    }
+
+    ## create correctly scaled empty overlay plot
+    par(new = TRUE) # adjust plot options
+
+    ## add empty plot, scaled to secondary plot content
+    plot(NA,
+         xlim = xlim.plot,
+         ylim = ylim.plot[3:4],
+         log  = log.option,
+         ann = FALSE,
+         axes = FALSE)
+
+    ## optionally add boxplot
+    if(boxplot == TRUE) {
+
+      ## add zero line
+      abline(h = 0)
+
+      ## get extended boxplot data
+      boxplot.data <- list(NA)
+
+      for(i in 1:length(data)) {
+        boxplot.i <- boxplot(x = data[[i]][,1],
+                             plot = FALSE)
+        boxplot.i$group <- mean(x = data[[i]][,1],
+                                                   na.rm = TRUE)
+        boxplot.i$names <- sd(x = data[[i]][,1],
+                                                   na.rm = TRUE)
+        boxplot.data[[length(boxplot.data) + 1]] <- boxplot.i
+      }
+
+      ## remove dummy list object
+      boxplot.data[[1]] <- NULL
+
+      ## get new line hights
+      l_height <- par()$cxy[2]
+
+      for(i in 1:length(data)) {
+
+        # ## draw sd line
+        # lines(x = c(boxplot.data[[i]]$group[1] - boxplot.data[[i]]$names[1],
+        #             boxplot.data[[i]]$group[1] + boxplot.data[[i]]$names[1]),
+        #       y = c(-5/8 * l_height,
+        #             -5/8 * l_height),
+        #       col = col.mean.line[i])
+        #
+        # ## draw mean line
+        # points(x = boxplot.data[[i]]$group[1],
+        #       y = -5/8 * l_height,
+        #       pch = 18,
+        #       col = col.mean.line[i])
+
+        ## draw median line
+        lines(x = c(boxplot.data[[i]]$stats[3,1],
+                    boxplot.data[[i]]$stats[3,1]),
+              y = c(-11/8 * l_height,
+                    -7/8 * l_height),
+              lwd = 2,
+              col = col.boxplot[i])
+
+        ## draw q25-q75-polygon
+        polygon(x = c(boxplot.data[[i]]$stats[2,1],
+                      boxplot.data[[i]]$stats[2,1],
+                      boxplot.data[[i]]$stats[4,1],
+                      boxplot.data[[i]]$stats[4,1]),
+                y = c(-11/8 * l_height,
+                      -7/8 * l_height,
+                      -7/8 * l_height,
+                      -11/8 * l_height),
+                border = col.boxplot[i])
+
+        ## draw whiskers
+        lines(x = c(boxplot.data[[i]]$stats[2,1],
+                    boxplot.data[[i]]$stats[1,1]),
+              y = c(-9/8 * l_height,
+                    -9/8 * l_height),
+              col = col.boxplot[i])
+
+        lines(x = c(boxplot.data[[i]]$stats[1,1],
+                    boxplot.data[[i]]$stats[1,1]),
+              y = c(-10/8 * l_height,
+                    -8/8 * l_height),
+              col = col.boxplot[i])
+
+        lines(x = c(boxplot.data[[i]]$stats[4,1],
+                    boxplot.data[[i]]$stats[5,1]),
+              y = c(-9/8 * l_height,
+                    -9/8 * l_height),
+              col = col.boxplot[i])
+
+        lines(x = c(boxplot.data[[i]]$stats[5,1],
+                    boxplot.data[[i]]$stats[5,1]),
+              y = c(-10/8 * l_height,
+                    -8/8 * l_height),
+              col = col.boxplot[i])
+
+        ## draw outliers
+        points(x = boxplot.data[[i]]$out,
+               y = rep(-9/8 * l_height,
+                       length(boxplot.data[[i]]$out)),
+               col = col.boxplot[i],
+               cex = cex * 0.8)
+      }
+
+    }
+
+    ## optionally add rug
+    if(rug == TRUE) {
+
+      for(i in 1:length(data)) {
+
+        for(j in 1:nrow(data[[i]])) {
+
+          lines(x = c(data[[i]][j,1],
+                      data[[i]][j,1]),
+                y = c(0,
+                      -2/8 * l_height),
+                col = col.value.rug[i])
+        }
+      }
+    }
+
+    ## add secondary y-axis
+    ticks_axis <- pretty(x = c(1, ylim.plot[4]))
+    ticks_axis <- ifelse(test = ticks_axis == 0,
+                         yes = NA,
+                         no = ticks_axis)
+
+    ## add right y-axis
+    axis(side = 4,
+         at = ticks_axis,
+         col = layout$kde$colour$ytck2,
+         col.axis = layout$kde$colour$ytck2,
+         labels = NA,
+         tcl = -layout$kde$dimension$ytck2 / 200,
+         cex = cex)
+
+    axis(side = 4,
+         at = ticks_axis,
+         line = 2 * layout$kde$dimension$ytck2.line / 100 - 2,
+         lwd = 0,
+         col = layout$kde$colour$ytck2,
+         family = layout$kde$font.type$ytck2,
+         font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                        layout$kde$font.deco$ytck2],
+         col.axis = layout$kde$colour$ytck2,
+         cex.axis = layout$kde$font.size$ylab2/12)
+
+    mtext(text = ylab[2],
+          side = 4,
+          line = 3 * layout$kde$dimension$ylab2.line / 100,
+          col = layout$kde$colour$ylab2,
+          family = layout$kde$font.type$ylab2,
+          font = (1:4)[c("plain", "bold", "italic", "bold italic") ==
+                         layout$kde$font.deco$ylab2],
+          cex = cex * layout$kde$font.size$ylab2/12)
+
+    ## add De error bars
+    for(i in 1:length(data)) {
+      arrows(data[[i]][,1] - data[[i]][,2]/2,
+             1:length(data[[i]][,1]),
+             data[[i]][,1] + data[[i]][,2]/2,
+             1:length(data[[i]][,1]),
+             code = 3,
+             angle = 90,
+             length = 0.05,
+             col = col.value.bar[i])
+
+      ## add De measurements
+      points(data[[i]][,1], 1:De.stats[i,1],
+             col = col.value.dot[i],
+             pch = 20)
+    }
+  }
+
+  ## add empty plot
+  par(new = TRUE)
+  plot(NA,
+       ann = FALSE,
+       axes = FALSE,
+       xlim     = xlim.plot,
+       ylim     = ylim.plot[1:2],
+       log      = log.option,
+       cex      = cex,
+       cex.lab  = cex,
+       cex.main = cex,
+       cex.axis = cex)
+
+  ## FUN by R Luminescence Team
+  if(fun==TRUE){sTeve()}
+
+  if(output == TRUE) {
+    return(list(De.stats = De.stats,
+                summary.pos = summary.pos,
+                De.density = De.density))
+  }
+
+}
diff --git a/R/plot_NRt.R b/R/plot_NRt.R
new file mode 100644
index 0000000..cc77ebf
--- /dev/null
+++ b/R/plot_NRt.R
@@ -0,0 +1,235 @@
+#' Visualise natural/regenerated signal ratios
+#'
+#' This function creates a Natural/Regenerated signal vs. time (NR(t)) plot
+#' as shown in Steffen et al. 2009
+#'
+#' This function accepts the individual curve data in many different formats. If
+#' \code{data} is a \code{list}, each element of the list must contain a two
+#' column \code{data.frame} or \code{matrix} containing the XY data of the curves
+#' (time and counts). Alternatively, the elements can be objects of class
+#' \code{\linkS4class{RLum.Data.Curve}}.
+#' Input values can also be provided as a \code{data.frame} or \code{matrix} where
+#' the first column contains the time values and each following column contains
+#' the counts of each curve.
+#'
+#' @param data a \code{\link{list}}, \code{\link{data.frame}}, \code{\link{matrix}} or
+#' \code{\linkS4class{RLum.Analysis}} object (\bold{required}). X,Y data of measured values
+#' (time and counts). See details on individual data structure.
+#'
+#' @param log \code{\link{character}} (optional): logarithmic axes
+#' (\code{c("x", "y", "xy")}).
+#'
+#' @param smooth \code{\link{character}} (optional): apply data smoothing. Use
+#' \code{"rmean"} to calculate the rolling where \code{k} determines the width
+#' of the rolling window (see \code{\link{rollmean}}).
+#' \code{"spline"} applies a smoothing spline to each curve
+#' (see \code{\link{smooth.spline}})
+#'
+#' @param k \code{\link{integer}} (with default): integer width of the rolling
+#' window.
+#'
+#' @param legend \code{\link{logical}} (with default): show or hide the plot legend.
+#'
+#' @param legend.pos \code{\link{character}} (with default): keyword specifying
+#' the position of the legend (see \code{\link{legend}}).
+#'
+#' @param ... further parameters passed to \code{\link{plot}} (also see \code{\link{par}}).
+#'
+#'
+#' @author Christoph Burow, University of Cologne (Germany)
+#'
+#' @seealso \code{\link{plot}}
+#'
+#' @return Returns a plot and \code{\linkS4class{RLum.Analysis}} object.
+#'
+#' @references
+#' Steffen, D., Preusser, F., Schlunegger, F., 2009. OSL quartz underestimation due to
+#' unstable signal components. Quaternary Geochronology, 4, 353-362.
+#'
+#' @examples
+#'
+#' ## load example data
+#' data("ExampleData.BINfileData", envir = environment())
+#'
+#' ## EXAMPLE 1
+#'
+#' ## convert Risoe.BINfileData object to RLum.Analysis object
+#' data <- Risoe.BINfileData2RLum.Analysis(object = CWOSL.SAR.Data, pos = 8, ltype = "OSL")
+#'
+#' ## extract all OSL curves
+#' allCurves <- get_RLum(data)
+#'
+#' ## keep only the natural and regenerated signal curves
+#' pos <- seq(1, 9, 2)
+#' curves <- allCurves[pos]
+#'
+#' ## plot a standard NR(t) plot
+#' plot_NRt(curves)
+#'
+#' ## re-plot with rolling mean data smoothing
+#' plot_NRt(curves, smooth = "rmean", k = 10)
+#'
+#' ## re-plot with a logarithmic x-axis
+#' plot_NRt(curves, log = "x", smooth = "rmean", k = 5)
+#'
+#' ## re-plot with custom axes ranges
+#' plot_NRt(curves, smooth = "rmean", k = 5,
+#'          xlim = c(0.1, 5), ylim = c(0.4, 1.6),
+#'          legend.pos = "bottomleft")
+#'
+#' ## re-plot with smoothing spline on log scale
+#' plot_NRt(curves, smooth = "spline", log = "x",
+#'          legend.pos = "top")
+#'
+#' ## EXAMPLE 2
+#'
+#' # you may also use this function to check whether all
+#' # TD curves follow the same shape (making it a TnTx(t) plot).
+#' posTD <- seq(2, 14, 2)
+#' curves <- allCurves[posTD]
+#'
+#' plot_NRt(curves, main = "TnTx(t) Plot",
+#'          smooth = "rmean", k = 20,
+#'          ylab = "TD natural / TD regenerated",
+#'          xlim = c(0, 20), legend = FALSE)
+#'
+#' ## EXAMPLE 3
+#'
+#' # extract data from all positions
+#' data <- lapply(1:24, FUN = function(pos) {
+#'    Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = pos, ltype = "OSL")
+#' })
+#'
+#' # get individual curve data from each aliquot
+#' aliquot <- lapply(data, get_RLum)
+#'
+#' # set graphical parameters
+#' par(mfrow = c(2, 2))
+#'
+#' # create NR(t) plots for all aliquots
+#' for (i in 1:length(aliquot)) {
+#'    plot_NRt(aliquot[[i]][pos],
+#'             main = paste0("Aliquot #", i),
+#'             smooth = "rmean", k = 20,
+#'             xlim = c(0, 10),
+#'             cex = 0.6, legend.pos = "bottomleft")
+#' }
+#'
+#' # reset graphical parameters
+#' par(mfrow = c(1, 1))
+#'
+#'
+#' @export
+plot_NRt <- function(data, log = FALSE, smooth = c("none", "spline", "rmean"), k = 3,
+                     legend = TRUE, legend.pos = "topright", ...) {
+
+  ## DATA INPUT EVALUATION -----
+  if (inherits(data, "list")) {
+    if (length(data) < 2)
+      stop(paste("The provided list only contains curve data of the natural signal"), call. = FALSE)
+    if (all(sapply(data, class) == "RLum.Data.Curve"))
+      curves <- lapply(data, get_RLum)
+  }
+  else if (inherits(data, "data.frame") || inherits(data, "matrix")) {
+    if (ncol(data) < 3)
+      stop(paste("The provided", class(data), "only contains curve data of the natural signal"), call. = FALSE)
+    if (is.matrix(data))
+      data <- as.data.frame(data)
+    curves <- apply(data[2:ncol(data)], MARGIN = 2, function(curve) {
+      data.frame(data[ ,1], curve)
+    })
+  }
+  else if (inherits(data, "RLum.Analysis")) {
+    RLum.objects <- get_RLum(data)
+    if (!any(sapply(RLum.objects, class) == "RLum.Data.Curve"))
+      stop(paste("The provided RLum.Analysis object must exclusively contain RLum.Data.Curve objects."), call. = FALSE)
+    curves <- lapply(RLum.objects, get_RLum)
+    if (length(curves) < 2)
+      stop(paste("The provided RLum.Analysis object only contains curve data of the natural signal"), call. = FALSE)
+  }
+
+  ## BASIC SETTINGS ------
+  natural <- curves[[1]]
+  regCurves <- curves[2:length(curves)]
+  time <- curves[[1]][ ,1]
+
+
+  ## DATA TRANSFORMATION -----
+
+  # calculate ratios
+  NR <- lapply(regCurves, FUN = function(reg, nat) { nat[ ,2] / reg[ ,2] }, natural)
+
+  # smooth spline
+  if (smooth[1] == "spline") {
+    NR <- lapply(NR, function(nr) { smooth.spline(nr)$y })
+  }
+  if (smooth[1] == "rmean") {
+    NR <- lapply(NR, function(nr) { zoo::rollmean(nr, k) })
+    time <- zoo::rollmean(time, k)
+  }
+
+  # normalise data
+  NRnorm <- lapply(NR, FUN = function(nr) { nr / nr[1] })
+
+
+  ## EXTRA ARGUMENTS -----
+
+  # default values
+  settings <- list(
+    xlim = if (log == "x" || log ==  "xy") c(0.1, max(time)) else c(0, max(time)),
+    ylim = range(pretty(c(min(sapply(NRnorm, min)), max(sapply(NRnorm, max))))),
+    xlab = "Time [s]",
+    ylab = "Natural signal / Regenerated signal",
+    cex = 1L,
+    main = "NR(t) Plot")
+
+  # override defaults with user settings
+  settings <- modifyList(settings, list(...))
+
+
+
+  ## PLOTTING ----------
+
+  # set graphical parameter
+  par(cex = settings$cex)
+
+  # empty plot
+  if (is.na(pmatch(log, c("x", "y", "xy"))))
+    log <- ""
+
+  do.call(plot, modifyList(list(x = NA, y = NA, log = log, xaxs = "i", yaxs = "i"),
+                           settings))
+
+  # horizontal line
+  abline(h = 1, lty = 3, col = "grey")
+
+  col <- 1:length(NRnorm)
+
+  # add N/R lines
+  mapply(FUN = function(curve, col) {
+    points(time, curve, type = "l", col = col)
+  }, NRnorm, col)
+
+  # add legend
+  if (legend) {
+    labels <- paste0("N/R", 1:length(NRnorm))
+    ncol <- ifelse(length(NRnorm) > 4, ceiling(length(NRnorm) / 4) , 1)
+    legend(legend.pos, legend = labels, col = col, lty = 1, ncol = ncol, cex = 0.8, bty = "n")
+  }
+
+  ## RETURN VALUES ----
+  obj <- set_RLum("RLum.Analysis", protocol = "UNKNOWN",
+                  records = mapply(FUN = function(curve, id) {
+                    set_RLum("RLum.Data.Curve",
+                             recordType = paste0("N/R", id),
+                             curveType = "NRt",
+                             data = matrix(c(time, curve), ncol = 2),
+                             info = list(
+                               data = curves,
+                               call = sys.call(-6L),
+                               args = as.list(sys.call(-6L)[-1])
+                             ))
+                  }, NRnorm, seq_len(length(NRnorm)))
+  )
+  invisible(obj)
+}
diff --git a/R/plot_RLum.Analysis.R b/R/plot_RLum.Analysis.R
new file mode 100644
index 0000000..e3c8dd3
--- /dev/null
+++ b/R/plot_RLum.Analysis.R
@@ -0,0 +1,727 @@
+#' Plot function for an RLum.Analysis S4 class object
+#'
+#' The function provides a standardised plot output for curve data of an
+#' RLum.Analysis S4 class object
+#'
+#' The function produces a multiple plot output. A file output is recommended
+#' (e.g., \code{\link{pdf}}).
+#'
+#' \bold{curve.transformation}\cr
+#'
+#' This argument allows transforming continuous wave (CW) curves to pseudo
+#' (linear) modulated curves. For the transformation, the functions of the
+#' package are used. Currently, it is not possible to pass further arguments to
+#' the transformation functions. The argument works only for \code{ltype}
+#' \code{OSL} and \code{IRSL}.\cr
+#'
+#' Please note: The curve transformation within this functions works roughly,
+#' i.e. every IRSL or OSL curve is transformed, without considerung whether it
+#' is measured with the PMT or not! However, for a fast look it might be
+#' helpful.\cr
+#'
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} (\bold{required}): S4
+#' object of class \code{RLum.Analysis}
+#'
+#' @param subset named \code{\link{list}} (optional): subsets elements for plotting. The
+#' arguments in the named \code{\link{list}} will be directly passed to the function \code{\link{get_RLum}}
+#' (e.g., \code{subset = list(curveType = "measured")})
+#'
+#' @param nrows \code{\link{integer}} (optional): sets number of rows for
+#' plot output, if nothing is set the function tries to find a value.
+#'
+#' @param ncols \code{\link{integer}} (optional): sets number of columns
+#' for plot output, if nothing is set the function tries to find a value.
+#'
+#' @param abline \code{\link{list}} (optional): allows to add ablines to the plot. Argument are provided
+#' in a list and will be forwared to the function \code{\link{abline}}, e.g., \code{list(v = c(10, 100))}
+#' adds two vertical lines add 10 and 100 to all plots. In contrast \code{list(v = c(10), v = c(100)}
+#' adds a vertical at 10 to the first and a vertical line at 100 to the 2nd plot.
+#'
+#' @param combine \code{\link{logical}} (with default): allows to combine all
+#' \code{\linkS4class{RLum.Data.Curve}} objects in one single plot.
+#'
+#' @param curve.transformation \code{\link{character}} (optional): allows
+#' transforming CW-OSL and CW-IRSL curves to pseudo-LM curves via
+#' transformation functions. Allowed values are: \code{CW2pLM}, \code{CW2pLMi},
+#' \code{CW2pHMi} and \code{CW2pPMi}. See details.
+#'
+#' @param plot.single \code{\link{logical}} (with default): global par settings are
+#' considered, normally this should end in one plot per page
+#'
+#' @param \dots further arguments and graphical parameters will be passed to
+#' the \code{plot} function. Supported arguments: \code{main}, \code{mtext},
+#' \code{log}, \code{lwd}, \code{lty} \code{type}, \code{pch}, \code{col},
+#' \code{norm}, \code{xlim},\code{ylim}, \code{xlab}, \code{ylab}... and for \code{combine = TRUE}
+#' also: \code{sub}, \code{legend}, \code{legend.text}, \code{legend.pos} (typical plus 'outside'), \code{legend.col}, \code{smooth}.
+#' All arguments can be provided as \code{vector} or \code{list} to gain in full control
+#' of all plot settings.
+#'
+#' @return Returns multiple plots.
+#'
+#' @note Not all arguments available for \code{\link{plot}} will be passed!
+#' Only plotting of \code{RLum.Data.Curve} and \code{RLum.Data.Spectrum}
+#' objects are currently supported.\cr
+#'
+#' @section Function version: 0.3.6
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{plot}}, \code{\link{plot_RLum}},
+#' \code{\link{plot_RLum.Data.Curve}}
+#'
+#' @references #
+#'
+#' @keywords aplot
+#'
+#' @examples
+#'
+#'##load data
+#'data(ExampleData.BINfileData, envir = environment())
+#'
+#'##convert values for position 1
+#'temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+#'
+#'##plot (combine) TL curves in one plot
+#'plot_RLum.Analysis(
+#' temp,
+#' subset = list(recordType = "TL"),
+#' combine = TRUE,
+#' norm = TRUE,
+#' abline = list(v = c(110))
+#' )
+#'
+#' @export
+plot_RLum.Analysis <- function(
+  object,
+  subset,
+  nrows,
+  ncols,
+  abline = NULL,
+  combine = FALSE,
+  curve.transformation,
+  plot.single = FALSE,
+  ...
+){
+
+  # Integrity check ----------------------------------------------------------------------------
+
+  ##check if object is of class RLum.Analysis (lists are handled via plot_RLum())
+  if (!is(object, "RLum.Analysis")) {
+    stop("[plot_RLum.Analysis()] Input object is not of type 'RLum.Analysis'")
+
+  }
+
+  # Make selection if wanted  -------------------------------------------------------------------
+
+  if(!missing(subset)){
+
+    ##check whether the user set the drop option and remove it, as we cannot work with it
+    subset <- subset[!sapply(names(subset), function(x){"drop" %in% x})]
+    object <- do.call(get_RLum, c(object = object, subset, drop = FALSE))
+
+  }
+
+  # Deal with additional arguments.  ------------------------------------------------------------
+
+  ##create plot settings list
+  plot.settings <- list(
+    main = NULL,
+    mtext = NULL,
+    log = "",
+    lwd = 1,
+    lty = 1,
+    type = "l",
+    xlab = NULL,
+    ylab = NULL,
+    xlim = NULL,
+    ylim = NULL,
+    pch = 1,
+    col = "black",
+    norm = FALSE,
+    sub = NULL,
+    cex = 1,
+    legend = TRUE,
+    legend.text = NULL,
+    legend.pos = NULL,
+    legend.col = NULL,
+    smooth = FALSE
+  )
+
+  plot.settings <- modifyList(x = plot.settings, val = list(...), keep.null = TRUE)
+
+  ##try to find optimal parameters, this is however, a little bit stupid, but
+  ##better than without any presetting
+
+  if(combine){
+    n.plots <- length(unique(as.character(structure_RLum(object)$recordType)))
+
+  }else{
+    n.plots <- length_RLum(object)
+
+  }
+
+
+  if (missing(ncols) | missing(nrows)) {
+    if (missing(ncols) & !missing(nrows)) {
+      if (n.plots  == 1) {
+        ncols <- 1
+
+      } else{
+        ncols <- 2
+
+      }
+
+    }
+    else if (!missing(ncols) & missing(nrows)) {
+      if (n.plots  == 1) {
+        nrows <- 1
+
+      }
+      else if (n.plots  > 1 & n.plots <= 4) {
+        nrows <- 2
+
+      } else{
+        nrows <- 3
+
+      }
+
+
+    } else{
+      if (n.plots  == 1) {
+        nrows <- 1
+        ncols <- 1
+
+      }
+      else if (n.plots  > 1 & n.plots  <= 2) {
+        nrows <- 1
+        ncols <- 2
+
+      } else if (n.plots  > 2 & n.plots <= 4) {
+        nrows <- 2
+        ncols <- 2
+
+      }
+      else{
+        nrows <- 3
+        ncols <- 2
+
+      }
+
+    }
+
+  }
+
+
+  # Plotting ------------------------------------------------------------------
+
+  ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+  ##(1) NORMAL (combine == FALSE)
+  ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+  if(!combine || length(object at records) == 1){
+
+    ##show warning message
+    if(combine & length(object at records) == 1){
+      warning("Nothing to combine, object contains a single curve.")
+
+    }
+
+    ##grep RLum.Data.Curve or RLum.Data.Spectrum objects
+    temp <- lapply(1:length(object at records), function(x){
+
+      if(is(object at records[[x]], "RLum.Data.Curve") ||
+         is(object at records[[x]], "RLum.Data.Spectrum")){
+
+        object at records[[x]]
+
+      }})
+
+    ##calculate number of pages for mtext
+    if (length(temp) %% (nrows * ncols) > 0) {
+      n.pages <- round(length(temp) / (nrows * ncols), digits = 0) + 1
+
+    } else{
+      n.pages <- length(temp) / (nrows * ncols)
+
+    }
+
+    ##set par
+    par.default <- par("mfrow")
+    if(!plot.single){on.exit(par(mfrow = par.default))}
+    if(!plot.single) {
+      par(mfrow = c(nrows, ncols))
+    }
+
+
+    ##expand plot settings list
+    plot.settings <- lapply(setNames(1:length(plot.settings), names(plot.settings)),
+                            function(x) {
+                              if (!is.null(plot.settings[[x]])) {
+                                if(length(plot.settings[[x]]) > 1){
+
+                                  if(is(plot.settings[[x]], "list")){
+                                    rep_len(plot.settings[[x]], length.out = length(temp))
+
+                                  }else{
+                                   rep_len(list(plot.settings[[x]]), length.out = length(temp))
+
+                                  }
+
+                                }else{
+                                  rep_len(plot.settings[[x]], length.out = length(temp))
+
+                                }
+
+                              } else{
+                                plot.settings[[x]]
+
+                              }
+                            })
+
+    ##expand abline
+    if(!is.null(abline)){
+      abline.names <- rep_len(names(abline), length.out = length(temp))
+      abline <- rep_len(abline, length.out = length(temp))
+      names(abline) <- abline.names
+
+    }
+
+    ##apply curve transformation
+    for(i in 1:length(temp)){
+
+      if(is(temp[[i]], "RLum.Data.Curve") == TRUE){
+
+        ##set curve transformation if wanted
+        if((grepl("IRSL", temp[[i]]@recordType) | grepl("OSL", temp[[i]]@recordType)) &
+           !missing(curve.transformation)){
+
+          if(curve.transformation=="CW2pLM"){
+            temp[[i]] <- CW2pLM(temp[[i]])
+
+          }else if(curve.transformation=="CW2pLMi"){
+            temp[[i]] <- CW2pLMi(temp[[i]])
+
+          }else if(curve.transformation=="CW2pHMi"){
+            temp[[i]]<- CW2pHMi(temp[[i]])
+
+          }else if(curve.transformation=="CW2pPMi"){
+            temp[[i]] <- CW2pPMi(temp[[i]])
+
+          }else{
+            warning("Function for 'curve.transformation' is unknown. No transformation is performed.")
+
+          }
+
+        }
+
+
+        ##check plot settings and adjust
+        ##xlim
+        if (!is.null(plot.settings$xlim)) {
+          xlim.set <- plot.settings$xlim[[i]]
+          if (plot.settings$xlim[[i]][1] < min(temp[[i]]@data[,1])) {
+            xlim.set[1] <- min(temp[[i]]@data[,1])
+          }
+          if (plot.settings$xlim[[i]][2] > max(temp[[i]]@data[,1])) {
+            xlim.set[2] <- max(temp[[i]]@data[,1])
+          }
+
+        }else{
+          xlim.set <- plot.settings$xlim[[i]]
+
+        }
+
+        ##ylim
+        if (!is.null(plot.settings$ylim)) {
+          ylim.set <- plot.settings$ylim
+          if (plot.settings$ylim[[i]][1] < min(temp[[i]]@data[,2])) {
+            ylim.set[1] <- min(temp[[i]]@data[,2])
+          }
+          if (plot.settings$ylim[[i]][2] > max(temp[[i]]@data[,2])) {
+            ylim.set[2] <- max(temp[[i]]@data[,2])
+          }
+
+        }else{
+          ylim.set <- plot.settings$ylim[[i]]
+
+        }
+
+        ##col
+        if (unique(plot.settings$col) != "black") {
+          col <- plot.settings$col[i]
+        } else{
+          if (grepl("IRSL", temp[[i]]@recordType)) {
+            col <- "red"
+          } else
+            if (grepl("OSL", temp[[i]]@recordType)) {
+              col <- "blue"
+            } else
+            {
+              col <- plot.settings$col[[i]]
+            }
+        }
+
+        ##main
+        main <- if (is.null(plot.settings$main[[i]])) {
+          temp[[i]]@recordType
+        } else{
+          plot.settings$main[[i]]
+        }
+
+        ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+        ##PLOT
+        ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+        ##plot RLum.Data.Curve curve
+        plot_RLum.Data.Curve(
+          temp[[i]],
+          col = col,
+          mtext = if(!is.null(plot.settings$mtext[[i]])){
+            plot.settings$mtext[[i]]
+          }else{
+            paste("#", i, sep = "")
+          },
+          par.local = FALSE,
+          main = main,
+          log = plot.settings$log[[i]],
+          lwd = plot.settings$lwd[[i]],
+          type = plot.settings$type[[i]],
+          lty = plot.settings$lty[[i]],
+          xlim = xlim.set,
+          ylim = ylim.set,
+          pch = plot.settings$pch[[i]],
+          cex = plot.settings$cex[[i]],
+          smooth = plot.settings$smooth[[i]],
+          ...
+        )
+
+        ##add abline
+        if(!is.null(abline[[i]])){
+          do.call(what = "abline", args = abline[i])
+
+        }
+
+
+      } else if(is(temp[[i]], "RLum.Data.Spectrum")) {
+
+        plot_RLum.Data.Spectrum(temp[[i]],
+                                mtext =  if(!is.null(plot.settings$mtext[[i]])){
+                                  plot.settings$mtext[[i]]
+                                }else{
+                                  paste("#", i, sep = "")
+                                },
+                                par.local = FALSE,
+                                main = if(!is.null(plot.settings$main)){
+                                  plot.settings$main
+                                }else{
+                                  temp[[i]]@recordType
+                                })
+
+      }
+
+    }#end for loop
+
+  }else{
+
+    ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+    ##(2) NORMAL (combine == TRUE)
+    ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+    ##(1) check RLum objects in the set
+    object.list <- get_RLum(object)
+
+    sapply(1:length(object.list), function(x){
+      if(is(object.list[[x]])[1] != "RLum.Data.Curve"){
+        stop("[plot_RLum.Analysis()] Using 'combine' is limited to 'RLum.Data.Curve' objects.")
+
+      }
+
+    })
+
+
+    ##account for different curve types, combine similar
+    temp.object.structure  <- structure_RLum(object)
+    temp.recordType <- as.character(unique(temp.object.structure$recordType))
+
+
+    ##change graphic settings
+    if(!plot.single){
+      par.default <- par()[c("cex", "mfrow")]
+
+      if(!missing(ncols) & !missing(nrows)){
+        par(mfrow = c(nrows, ncols))
+
+      }
+
+
+      ##this 2nd par request is needed as seeting mfrow resets the par settings ... this might
+      ##not be wanted
+      par(cex = plot.settings$cex[1])
+
+    }else{
+      par.default <- par()[c("cex")]
+      par(cex = plot.settings$cex)
+
+    }
+
+
+    ##expand plot settings list
+    ##expand list
+    plot.settings <- lapply(setNames(1:length(plot.settings), names(plot.settings)), function(x) {
+      if (!is.null(plot.settings[[x]])) {
+        if(is.list(plot.settings[[x]])){
+          rep_len(plot.settings[[x]], length.out = length(temp.recordType))
+
+        }else{
+          rep_len(list(plot.settings[[x]]), length.out = length(temp.recordType))
+
+        }
+
+
+      } else{
+        plot.settings[[x]]
+
+      }
+    })
+
+    ##expand abline
+    if(!is.null(abline)){
+      abline.names <- rep_len(names(abline), length.out = length(temp.recordType))
+      abline <- rep_len(abline, length.out = length(temp.recordType))
+      names(abline) <- abline.names
+
+    }
+
+
+    ##(2) PLOT values
+    for(k in 1:length(temp.recordType)) {
+
+      ###get type of curves
+      temp.object <-
+        get_RLum(object, recordType = temp.recordType[k], drop = FALSE)
+
+      ##get structure
+      object.structure  <- structure_RLum(temp.object)
+
+      ##now get the real list object (note the argument recursive = FALSE)
+      object.list <-
+        get_RLum(object, recordType = temp.recordType[k], recursive = FALSE)
+
+      ##prevent problems for non set argument
+      if (missing(curve.transformation)) {
+        curve.transformation <- "None"
+      }
+
+      ##transform values to data.frame and norm values
+      temp.data.list <- lapply(1:length(object.list), function(x) {
+        ##set curve transformation if wanted
+
+        if (grepl("IRSL", object.list[[x]]@recordType) |
+            grepl("OSL", object.list[[x]]@recordType)) {
+          if (curve.transformation == "CW2pLM") {
+            object.list[[x]] <- CW2pLM(object.list[[x]])
+
+          }else if (curve.transformation == "CW2pLMi") {
+            object.list[[x]] <- CW2pLMi(object.list[[x]])
+
+          }else if (curve.transformation == "CW2pHMi") {
+            object.list[[x]] <- CW2pHMi(object.list[[x]])
+
+          }else if (curve.transformation == "CW2pPMi") {
+            object.list[[x]] <- CW2pPMi(object.list[[x]])
+
+          }
+
+        }
+
+
+        temp.data <- as(object.list[[x]], "data.frame")
+
+        ##normalise curves if argument has been set
+        if (plot.settings$norm[[k]]) {
+          temp.data[,2] <- temp.data[,2] / max(temp.data[,2])
+
+        }
+
+        return(temp.data)
+
+      })
+
+      ##set plot parameters
+      ##main
+      main <- if (!is.null(plot.settings$main[[k]])) {
+        plot.settings$main[[k]]
+      } else{
+        paste0(temp.recordType[[k]], " combined")
+      }
+
+      ##xlab
+      xlab <- if(!is.null(plot.settings$xlab[[k]])){
+        plot.settings$xlab[[k]]
+      }else{
+        switch(temp.recordType[[k]],
+               "TL" = "Temperature [\u00B0C]",
+               "IRSL" = "Time [s]",
+               "OSL" = "Time [s]",
+               "RF" = "Time [s]",
+               "RBR" = "Time [s]",
+               "LM-OSL" = "Time [s]"
+        )
+
+      }
+
+      ##ylab
+      ylab <- if(!is.null(plot.settings$ylab[[k]])){
+        plot.settings$ylab[[k]]
+      }else{
+        paste0(temp.recordType[[k]], " [a.u.]")
+      }
+
+      ##xlim
+      xlim <- if (!is.null(plot.settings$xlim[[k]]) & length(plot.settings$xlim[[k]]) >1) {
+        plot.settings$xlim[[k]]
+      } else {
+        c(min(object.structure$x.min), max(object.structure$x.max))
+      }
+
+      ##ylim
+      ylim <- if (!is.null(plot.settings$ylim[[k]]) & length(plot.settings$ylim[[k]]) > 1) {
+        plot.settings$ylim[[k]]
+      } else {
+        range(unlist(lapply(X = temp.data.list, FUN = function(x){
+          range(x[,2])
+        })))
+
+      }
+
+      ##col (again)
+      col <- if(length(plot.settings$col[[k]]) > 1 || plot.settings$col[[k]][1] != "black"){
+        plot.settings$col[[k]]
+
+      }else{
+        col <- get("col", pos = .LuminescenceEnv)
+      }
+
+      ##if length of provided colours is < the number of objects, just one colour is supported
+      if (length(col) < length(object.list)) {
+        col <- rep_len(col, length(object.list))
+
+      }
+
+      ##lty
+      if (length(plot.settings$lty[[k]]) < length(object.list)) {
+        lty <- rep(plot.settings$lty[[k]], times = length(object.list))
+
+      }else{
+        lty <- plot.settings$lty[[k]]
+
+      }
+
+      ##legend.text
+      legend.text <- if(!is.null(plot.settings$legend.text[[k]])){
+        plot.settings$legend.text[[k]]
+
+      }else{
+        paste("Curve", 1:length(object.list))
+
+      }
+
+      ##legend.col
+      legend.col <- if(!is.null(plot.settings$legend.col[[k]])){
+        plot.settings$legend.col[[k]]
+
+      }else{
+        NULL
+
+      }
+
+      ##legend.pos
+      legend.pos <- if(!is.null(plot.settings$legend.pos[[k]])){
+        plot.settings$legend.pos[[k]]
+
+      }else{
+        "topright"
+
+      }
+
+      if (legend.pos == "outside") {
+        par.default.outside <- par()[c("mar", "xpd")]
+        par(mar = c(5.1, 4.1, 4.1, 8.1), xpd = TRUE)
+      }
+
+
+      ##open plot area
+      plot(
+        NA,NA,
+        xlim = xlim,
+        ylim = ylim,
+        main = main,
+        xlab = xlab,
+        ylab = ylab,
+        log = plot.settings$log[[k]],
+        sub = plot.settings$sub[[k]]
+      )
+
+      ##plot single curve values
+      ## ...?Why using matplot is a bad idea: The channel resolution might be different
+      for (n in 1:length(temp.data.list)) {
+
+
+        ##smooth
+        ##Why here again ... because the call differs from the one before, where the argument
+        ##is passed to plot_RLum.Data.Curve()
+        if(plot.settings$smooth[[k]]){
+
+          k_factor <- ceiling(length(temp.data.list[[n]][, 2])/100)
+          temp.data.list[[n]][, 2] <- zoo::rollmean(temp.data.list[[n]][, 2],
+                                            k = k_factor, fill = NA)
+        }
+
+        ##print lines
+        lines(temp.data.list[[n]],
+              col = col[n],
+              lty = lty[n],
+              lwd = plot.settings$lwd[[k]])
+
+      }
+
+      ##add abline
+      if(!is.null(abline[[k]])){
+        do.call(what = "abline", args = abline[k])
+
+      }
+
+      ##mtext
+      mtext(plot.settings$mtext[[k]], side = 3, cex = .8 * plot.settings$cex[[k]])
+
+      ##legend
+      if (plot.settings$legend[[k]]) {
+        legend(
+          x = ifelse(legend.pos == "outside", par()$usr[2], legend.pos),
+          y = ifelse(legend.pos == "outside", par()$usr[4], NULL),
+          legend = legend.text,
+          lwd = plot.settings$lwd[[k]],
+          lty = plot.settings$lty[[k]],
+          col = if (is.null(legend.col)) {
+            col[1:length(object.list)]
+          } else{
+            legend.col
+          },
+          bty = "n",
+          cex = 0.8 * plot.settings$cex[[k]]
+        )
+
+      }
+
+    }
+
+    ##reset graphic settings
+    if (exists("par.default.outside")) {
+      par(par.default.outside)
+      rm(par.default.outside)
+    }
+    par(par.default)
+    rm(par.default)
+
+  }
+
+}
diff --git a/R/plot_RLum.Data.Curve.R b/R/plot_RLum.Data.Curve.R
new file mode 100644
index 0000000..0f8343a
--- /dev/null
+++ b/R/plot_RLum.Data.Curve.R
@@ -0,0 +1,315 @@
+#' Plot function for an RLum.Data.Curve S4 class object
+#'
+#' The function provides a standardised plot output for curve data of an
+#' RLum.Data.Curve S4 class object
+#'
+#' Only single curve data can be plotted with this function.  Arguments
+#' according to \code{\link{plot}}.
+#'
+#' @param object \code{\linkS4class{RLum.Data.Curve}} (\bold{required}): S4
+#' object of class \code{RLum.Data.Curve}
+#'
+#' @param par.local \code{\link{logical}} (with default): use local graphical
+#' parameters for plotting, e.g. the plot is shown in one column and one row.
+#' If \code{par.local = FALSE}, global parameters are inherited.
+#'
+#' @param norm \code{\link{logical}} (with default): allows curve normalisation
+#' to the highest count value
+#'
+#' @param smooth \code{\link{logical}} (with default): provides an automatic curve smoothing
+#' based on \code{\link[zoo]{rollmean}}
+#'
+#' @param \dots further arguments and graphical parameters that will be passed
+#' to the \code{plot} function
+#'
+#' @return Returns a plot.
+#'
+#' @note Not all arguments of \code{\link{plot}} will be passed!
+#'
+#' @section Function version: 0.2.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{plot}}, \code{\link{plot_RLum}}
+#'
+#' @references #
+#'
+#' @keywords aplot
+#'
+#' @examples
+#'
+#'
+#' ##plot curve data
+#'
+#' #load Example data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' #transform data.frame to RLum.Data.Curve object
+#' temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve")
+#'
+#' #plot RLum.Data.Curve object
+#' plot_RLum.Data.Curve(temp)
+#'
+#'
+#' @export
+plot_RLum.Data.Curve<- function(
+  object,
+  par.local = TRUE,
+  norm = FALSE,
+  smooth = FALSE,
+  ...
+){
+
+  # Integrity check -------------------------------------------------------------
+
+  ##check if object is of class RLum.Data.Curve
+  if(class(object) != "RLum.Data.Curve"){
+
+    stop("[plot_RLum.Data.Curve()] Input object is not of type RLum.Data.Curve")
+
+  }
+
+  ##stop for NA values
+  if (!anyNA(object at data)) {
+    ##set labeling unit
+    lab.unit <- if (object at recordType == "OSL" |
+                    object at recordType == "IRSL" |
+                    object at recordType == "RL" |
+                    object at recordType == "RF" |
+                    object at recordType == "LM-OSL" |
+                    object at recordType == "RBR") {
+      "s"
+    }
+    else if (object at recordType == "TL") {
+      "\u00B0C"
+    }
+    else {
+      "Unknown"
+    }
+
+    lab.xlab <- if (object at recordType == "OSL" |
+                    object at recordType == "IRSL" |
+                    object at recordType == "RL" |
+                    object at recordType == "RF" |
+                    object at recordType == "RBR" |
+                    object at recordType == "LM-OSL"){
+
+      "Stimulation time"
+    }
+    else if (object at recordType == "TL") {
+      "Temperature"
+    }
+    else {
+      "Independent"
+    }
+
+    ##XSYG
+    ##check for curveDescripter
+    if ("curveDescripter" %in% names(object at info)) {
+      temp.lab <-
+        strsplit(object at info$curveDescripter,
+                 split = ";",
+                 fixed = TRUE)[[1]]
+
+      xlab.xsyg <- temp.lab[1]
+      ylab.xsyg <- temp.lab[2]
+
+    } else{
+      xlab.xsyg <- NA
+      ylab.xsyg <- NA
+
+    }
+
+    ##normalise curves if argument has been set
+    if (norm) {
+      object at data[,2] <- object at data[,2] / max(object at data[,2])
+
+    }
+
+    ##deal with additional arguments
+    extraArgs <- list(...)
+
+    main <- if ("main" %in% names(extraArgs)) {
+      extraArgs$main
+    } else
+    {
+      object at recordType
+    }
+
+    xlab <- if ("xlab" %in% names(extraArgs)) {
+      extraArgs$xlab
+    } else
+    {
+      if (!is.na(xlab.xsyg)) {
+        xlab.xsyg
+      } else
+      {
+        paste0(lab.xlab, " [", lab.unit, "]")
+      }
+    }
+
+    ylab <- if ("ylab" %in% names(extraArgs)) {
+      extraArgs$ylab
+    }else if (!is.na(ylab.xsyg)) {
+      ylab.xsyg
+    }
+    else if (lab.xlab == "Independent") {
+      "Dependent [unknown]"
+    }
+    else {
+      paste(
+        object at recordType,
+        " [cts/", round(max(object at data[,1]) / length(object at data[,1]),digits =
+                          2)
+        , " ", lab.unit,"]", sep = ""
+      )
+    }
+
+    sub <-  if ("sub" %in% names(extraArgs)) {
+      extraArgs$sub
+    } else
+    {
+      if ((grepl("TL", object at recordType) == TRUE) &
+          "RATE" %in% names(object at info)) {
+        paste("(",object at info$RATE," K/s)", sep = "")
+      }
+
+      if ((grepl("OSL", object at recordType) |
+           grepl("IRSL", object at recordType)) &
+          "interval" %in% names(object at info)) {
+        paste("(resolution: ",object at info$interval," s)", sep = "")
+      }
+
+    }
+    cex <- if ("cex" %in% names(extraArgs)) {
+      extraArgs$cex
+    } else
+    {
+      1
+    }
+
+    type <- if ("type" %in% names(extraArgs)) {
+      extraArgs$type
+    } else
+    {
+      "l"
+    }
+
+    lwd <- if ("lwd" %in% names(extraArgs)) {
+      extraArgs$lwd
+    } else
+    {
+      1
+    }
+
+    lty <- if ("lty" %in% names(extraArgs)) {
+      extraArgs$lty
+    } else
+    {
+      1
+    }
+
+    pch <- if ("pch" %in% names(extraArgs)) {
+      extraArgs$pch
+    } else
+    {
+      1
+    }
+
+    col <- if ("col" %in% names(extraArgs)) {
+      extraArgs$col
+    } else
+    {
+      1
+    }
+
+    ylim <- if ("ylim" %in% names(extraArgs)) {
+      extraArgs$ylim
+    } else
+    {
+      c(min(object at data[,2]),max(object at data[,2]))
+    }
+
+    xlim <- if ("xlim" %in% names(extraArgs)) {
+      extraArgs$xlim
+    } else
+    {
+      c(min(object at data[,1]),max(object at data[,1]))
+    }
+
+    log <- if ("log" %in% names(extraArgs)) {
+      extraArgs$log
+    } else
+    {
+      ""
+    }
+
+    mtext <- if ("mtext" %in% names(extraArgs)) {
+      extraArgs$mtext
+    } else
+    {
+      ""
+    }
+
+    fun  <-
+      if ("fun" %in% names(extraArgs)) {
+        extraArgs$fun
+      } else {
+        FALSE
+      }
+
+    ##to avoid problems with plot method of RLum.Analysis
+    plot.trigger <-
+      if ("plot.trigger" %in% names(extraArgs)) {
+        extraArgs$plot.trigger
+      } else
+      {
+        FALSE
+      }
+
+    ##par setting for possible combination with plot method for RLum.Analysis objects
+    if (par.local == TRUE) {
+      par(mfrow = c(1,1), cex = cex)
+    }
+
+    ##smooth
+    if(smooth){
+
+      k <- ceiling(length(object at data[, 2])/100)
+      object at data[, 2] <- zoo::rollmean(object at data[, 2],
+                                        k = k, fill = NA)
+    }
+
+
+    ##plot curve
+    plot(
+      object at data[,1], object at data[,2],
+      main = main,
+      xlim = xlim,
+      ylim = ylim,
+      xlab = xlab,
+      ylab = ylab,
+      sub = sub,
+      type = type,
+      log = log,
+      col = col,
+      lwd = lwd,
+      pch = pch,
+      lty = lty
+    )
+
+    ##plot additional mtext
+    mtext(mtext, side = 3, cex = cex * 0.8)
+
+    if (fun == TRUE) {
+      sTeve()
+    }
+
+  }else{
+
+    warning("[plot_RLum.Data.Curve()] Curve contains NA-values, nothing plotted.")
+
+  }
+
+}
diff --git a/R/plot_RLum.Data.Image.R b/R/plot_RLum.Data.Image.R
new file mode 100644
index 0000000..58d30bb
--- /dev/null
+++ b/R/plot_RLum.Data.Image.R
@@ -0,0 +1,211 @@
+#' Plot function for an \code{RLum.Data.Image} S4 class object
+#'
+#' The function provides a standardised plot output for image data of an
+#' \code{RLum.Data.Image}S4 class object, mainly using the plot functions
+#' provided by the \code{\link{raster}} package.
+#'
+#' \bold{Details on the plot functions} \cr
+#'
+#' Image is visualised as 2D plot usinng generic plot types provided by other
+#' packages.
+#'
+#' Supported plot types: \cr
+#'
+#' \bold{\code{plot.type = "plot.raster"}}\cr
+#'
+#' Uses the standard plot function for raster data from the package
+#' \code{\link[raster]{raster}}: \code{\link[raster]{plot}}. For each raster layer in a
+#' raster brick one plot is produced.
+#'
+#' Arguments that are passed through the function call:\cr
+#'
+#' \code{main},\code{axes}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim},
+#' \code{col}
+#'
+#' \bold{\code{plot.type = "plotRGB"}}\cr
+#'
+#' Uses the function \code{\link[raster]{plotRGB}} from the
+#' \code{\link[raster]{raster}} package. Only one image plot is produced as all layers
+#' in a brick a combined.  This plot type is useful to see whether any signal
+#' is recorded by the camera.\cr Arguments that are passed through the function
+#' call:\cr
+#'
+#' \code{main},\code{axes}, \code{xlab}, \code{ylab}, \code{ext},
+#' \code{interpolate}, \code{maxpixels}, \code{alpha}, \code{colNA},
+#' \code{stretch}\cr
+#'
+#' \bold{\code{plot.type = "contour"}}\cr
+#'
+#' Uses the function contour plot function from the \code{\link{raster}}
+#' function (\code{\link[raster]{contour}}). For each raster layer one contour
+#' plot is produced. Arguments that are passed through the function call:\cr
+#'
+#' \code{main},\code{axes}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim},
+#' \code{col}
+#'
+#' @param object \code{\linkS4class{RLum.Data.Image}} (\bold{required}): S4
+#' object of class \code{RLum.Data.Image}
+#' @param par.local \code{\link{logical}} (with default): use local graphical
+#' parameters for plotting, e.g. the plot is shown in one column and one row.
+#' If \code{par.local = FALSE} global parameters are inherited.
+#' @param plot.type \code{\link{character}} (with default): plot types.
+#' Supported types are \code{plot.raster}, \code{plotRGB} or \code{contour}
+#' @param \dots further arguments and graphical parameters that will be passed
+#' to the specific plot functions.
+#' @return Returns a plot.
+#' @note This function has been created to faciliate the plotting of image data
+#' imported by the function \code{\link{read_SPE2R}}. However, so far the
+#' function is not optimized to handle image data > ca. 200 MByte and thus
+#' plotting of such data is extremely slow.
+#' @section Function version: 0.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#' @seealso \code{\linkS4class{RLum.Data.Image}}, \code{\link{plot}},
+#' \code{\link{plot_RLum}}, \code{\link[raster]{raster}},
+#' @references -
+#' @keywords aplot
+#' @examples
+#'
+#'
+#' ##load data
+#' data(ExampleData.RLum.Data.Image, envir = environment())
+#'
+#' ##plot data
+#' plot_RLum.Data.Image(ExampleData.RLum.Data.Image)
+#'
+#' @export
+plot_RLum.Data.Image <- function(
+  object,
+  par.local = TRUE,
+  plot.type = "plot.raster",
+  ...
+){
+
+
+  # Integrity check -----------------------------------------------------------
+
+  ##check if object is of class RLum.Data.Image
+  if(class(object) != "RLum.Data.Image"){
+
+    stop("[plot_RLum.Data.Image()] Input object is not of type RLum.Data.Image")
+
+  }
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  ##TODO
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {"RLum.Data.Image"}
+
+  axes <- if("axes" %in% names(extraArgs)) {extraArgs$axes} else
+  {TRUE}
+
+  xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else
+  {"Length [px]"}
+
+  ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
+  {"Height [px]"}
+
+  xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else
+  {c(0,dim(get_RLum(object))[2])}
+
+  ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else
+  {c(0,dim(get_RLum(object))[1])}
+
+  ##plotRGB::ext
+  ext <- if("ext" %in% names(extraArgs)) {extraArgs$ext} else
+  {NULL}
+
+  ##plotRGB::interpolate
+  interpolate <- if("interpolate" %in% names(extraArgs)) {extraArgs$interpolate} else
+  {FALSE}
+
+  ##plotRGB::stretch
+  stretch <- if("stretch" %in% names(extraArgs)) {extraArgs$stretch} else
+  {"hist"}
+
+  ##plotRGB::maxpixels
+  maxpixels <- if("maxpixels" %in% names(extraArgs)) {extraArgs$maxpixels} else
+  {dim(get_RLum(object))[1]*dim(get_RLum(object))[2]}
+
+  ##plotRGB::alpha
+  alpha <- if("alpha" %in% names(extraArgs)) {extraArgs$alpha} else
+  {255}
+
+  ##plotRGB::colNA
+  colNA <- if("colNA" %in% names(extraArgs)) {extraArgs$colNA} else
+  {"white"}
+
+  col <- if("col" %in% names(extraArgs)) {extraArgs$col} else
+  {topo.colors(255)}
+
+  cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else
+  {1}
+
+  ##par setting for possible combination with plot method for RLum.Analysis objects
+  if(par.local == TRUE){
+
+    par(mfrow=c(1,1), cex = cex)
+
+  }
+
+  ##grep raster
+
+  if(plot.type == "plotRGB"){
+    ## ==========================================================================#
+    ## standard raster plotRGB (package raster)
+    ## ==========================================================================#
+
+    raster::plotRGB(
+      get_RLum(object),
+      main = main,
+      axes = TRUE,
+      xlab = xlab,
+      ylab = ylab,
+      ext = ext,
+      interpolate = interpolate,
+      maxpixels = maxpixels,
+      alpha = alpha,
+      colNA = colNA,
+      stretch = stretch)
+
+
+    ## ==========================================================================#
+    ## standard raster plot (package raster)
+    ## ==========================================================================#
+  }else if(plot.type == "plot.raster"){
+
+    plot(get_RLum(object),
+         main = main,
+         xlim = xlim,
+         ylim = ylim,
+         xlab = xlab,
+         ylab = ylab,
+         col = col)
+
+    ## ==========================================================================#
+    ## standard contour (package raster)
+    ## ==========================================================================#
+  }else if(plot.type == "contour"){
+
+    for(i in 1:raster::nlayers(get_RLum(object))){
+
+
+      raster::contour(raster::raster(get_RLum(object), layer = i),
+                      main = main,
+                      xlim = xlim,
+                      ylim = ylim,
+                      xlab = xlab,
+                      ylab = ylab,
+                      col = col)
+
+    }
+
+  }else{
+
+    stop("[plot_RLum.Data.Image()] Unknown plot type.")
+
+  }
+
+}
diff --git a/R/plot_RLum.Data.Spectrum.R b/R/plot_RLum.Data.Spectrum.R
new file mode 100644
index 0000000..94ac93a
--- /dev/null
+++ b/R/plot_RLum.Data.Spectrum.R
@@ -0,0 +1,902 @@
+#' Plot function for an RLum.Data.Spectrum S4 class object
+#'
+#' The function provides a standardised plot output for spectrum data of an
+#' RLum.Data.Spectrum S4 class object
+#'
+#' \bold{Matrix structure} \cr (cf. \code{\linkS4class{RLum.Data.Spectrum}})
+#'
+#' \itemize{ \item \code{rows} (x-values): wavelengths/channels (xlim, xlab)
+#' \item \code{columns} (y-values): time/temperature (ylim, ylab) \item
+#' \code{cells} (z-values): count values (zlim, zlab) }
+#'
+#' \emph{Note: This nomenclature is valid for all plot types of this
+#' function!}\cr
+#'
+#' \bold{Nomenclature for value limiting}
+#'
+#' \code{xlim}: Limits values along the wavelength axis\cr \code{ylim}: Limits
+#' values along the time/temperature axis\cr \code{zlim}: Limits values along
+#' the count value axis\cr
+#'
+#' \bold{Energy axis re-calculation}
+#'
+#' If the argument \code{xaxis.energy = TRUE} is chosen, instead intensity vs.
+#' wavelength the spectrum is plotted as intensiyt vs. energy. Therefore the
+#' entire spectrum is re-recaluated (e.g., Appendix 4 in Blasse and Grabmeier,
+#' 1994):
+#'
+#' The intensity of the spectrum (z-values) is re-calcualted using the
+#' following equation:
+#'
+#' \deqn{\phi_{E} = \phi_{\lambda} * \lambda^2 / (hc)}
+#'
+#' with \eqn{\phi_{E}} the intensity per interval of energy \eqn{E} (eV),
+#' \eqn{\phi_{\lambda}} the intensity per interval of wavelength \eqn{\lambda}
+#' (nm) and \eqn{h} (eV/s) the Planck constant and \eqn{c} (m/s) the velocity
+#' of light.
+#'
+#' For transforming the wavelength axis (x-values) the equation
+#'
+#' \deqn{E = hc/\lambda}
+#'
+#' is used. For further details please see the cited the literature.\cr
+#'
+#' \bold{Details on the plot functions}
+#'
+#' Spectrum is visualised as 3D or 2D plot. Both plot types are based on
+#' internal R plot functions. \cr
+#'
+#' \bold{\code{plot.type = "persp"}}
+#'
+#' Arguments that will be passed to \code{\link{persp}}: \itemize{ \item
+#' \code{shade}: default is \code{0.4} \item \code{phi}: default is \code{15}
+#' \item \code{theta}: default is \code{-30} \item \code{expand}: default is
+#' \code{1} \item \code{ticktype}: default is \code{detailed}, \code{r}: default is \code{10}}
+#'
+#' \emph{Note: Further parameters can be adjusted via \code{par}. For example
+#' to set the background transparent and reduce the thickness of the lines use:
+#' \code{par(bg = NA, lwd = 0.7)} previous the function call.}
+#'
+#' \bold{\code{plot.type = "single"}}\cr
+#'
+#' Per frame a single curve is returned. Frames are time or temperature
+#' steps.\cr
+#'
+#' \bold{\code{plot.type = "multiple.lines"}}\cr
+#'
+#' All frames plotted in one frame.\cr
+#'
+#' \bold{\code{plot.type = "transect"}}\cr
+#'
+#' Depending on the selected wavelength/channel range a transect over the
+#' time/temperature (y-axis) will be plotted along the wavelength/channels
+#' (x-axis). If the range contains more than one channel, values (z-values) are
+#' summed up. To select a transect use the \code{xlim} argument, e.g.
+#' \code{xlim = c(300,310)} plot along the summed up count values of channel
+#' 300 to 310.\cr
+#'
+#' \bold{Further arguments that will be passed (depending on the plot type)}
+#'
+#' \code{xlab}, \code{ylab}, \code{zlab}, \code{xlim}, \code{ylim},
+#' \code{zlim}, \code{main}, \code{mtext}, \code{pch}, \code{type}, \code{col},
+#' \code{border}, \code{box} \code{lwd}, \code{bty} \cr
+#'
+#' @param object \code{\linkS4class{RLum.Data.Spectrum}} or \code{\link{matrix}} (\bold{required}): S4
+#' object of class \code{RLum.Data.Spectrum} or a \code{matrix} containing count values of the spectrum.\cr
+#' Please note that in case of a matrix rownames and colnames are set automatically if not provided.
+#'
+#' @param par.local \code{\link{logical}} (with default): use local graphical
+#' parameters for plotting, e.g. the plot is shown in one column and one row.
+#' If \code{par.local = FALSE} global parameters are inherited.
+#' @param plot.type \code{\link{character}} (with default): plot type, for
+#' 3D-plot use \code{persp}, or \code{interactive}, for a 2D-plot \code{contour},
+#' \code{single} or \code{multiple.lines} (along the time or temperature axis)
+#' or \code{transect} (along the wavelength axis) \cr
+#'
+#' @param optical.wavelength.colours \code{\link{logical}} (with default): use
+#' optical wavelength colour palette. Note: For this, the spectrum range is
+#' limited: \code{c(350,750)}. Own colours can be set with the argument
+#' \code{col}.
+#'
+#' @param bg.channels \code{\link{vector}} (optional): defines channel for
+#' background subtraction If a vector is provided the mean of the channels is
+#' used for subtraction. Note: Background subtraction is applied prior to
+#' channel binning
+#'
+#' @param bin.rows \code{\link{integer}} (with defaul): allow summing-up
+#' wavelength channels (horizontal binning), e.g. \code{bin.rows = 2} two
+#' channels are summed up
+#'
+#' @param bin.cols \code{\link{integer}} (with default): allow summing-up
+#' channel counts (vertical binning) for plotting, e.g. \code{bin.cols = 2} two
+#' channels are summed up
+#'
+#' @param rug \code{\link{logical}} (with default): enables or disables colour
+#' rug. Currently only implemented for plot type \code{multiple.lines} and
+#' \code{single}
+#'
+#' @param limit_counts \code{\link{numeric}} (optional): value to limit all count values to
+#' this value, i.e. all count values above this threshold will be replaced by this threshold. This
+#' is helpfull especially in case of TL-spectra.
+#'
+#' @param xaxis.energy \code{\link{logical}} (with default): enables or
+#' disables energy instead of wavelength axis. Note: This option means not only
+#' simnply redrawing the axis, insteadly the spectrum in terms of intensity is
+#' recalculated, s. details.
+#'
+#' @param legend.text \code{\link{character}} (with default): possiblity to
+#' provide own legend text. This argument is only considered for plot types
+#' providing a legend, e.g. \code{plot.type="transect"}
+#'
+#' @param \dots further arguments and graphical parameters that will be passed
+#' to the \code{plot} function.
+#'
+#' @return Returns a plot.
+#'
+#' @note Not all additional arguments (\code{...}) will be passed similarly!
+#'
+#' @section Function version: 0.5.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{plot}},
+#' \code{\link{plot_RLum}}, \code{\link{persp}}, \code{\link[plotly]{plot_ly}},
+#' \code{\link{contour}}
+#'
+#' @references Blasse, G., Grabmaier, B.C., 1994. Luminescent Materials.
+#' Springer.
+#'
+#' @keywords aplot
+#'
+#' @examples
+#'
+#'
+#' ##load example data
+#' data(ExampleData.XSYG, envir = environment())
+#'
+#' ##(1)plot simple spectrum (2D) - contour
+#' plot_RLum.Data.Spectrum(TL.Spectrum,
+#'                         plot.type="contour",
+#'                         xlim = c(310,750),
+#'                         ylim = c(0,300),
+#'                         bin.rows=10,
+#'                         bin.cols = 1)
+#'
+#' ##(2) plot spectrum (3D)
+#' plot_RLum.Data.Spectrum(TL.Spectrum,
+#'                         plot.type="persp",
+#'                         xlim = c(310,750),
+#'                         ylim = c(0,100),
+#'                         bin.rows=10,
+#'                         bin.cols = 1)
+#'
+#' ##(3) plot multiple lines (2D) - multiple.lines (with ylim)
+#' plot_RLum.Data.Spectrum(TL.Spectrum,
+#'                         plot.type="multiple.lines",
+#'                         xlim = c(310,750),
+#'                         ylim = c(0,100),
+#'                         bin.rows=10,
+#'                         bin.cols = 1)
+#'
+#' \dontrun{
+#'  ##(4) interactive plot using the package plotly
+#'  plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+#'  xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+#'  bin.cols = 1)
+#'
+#'  ##(5) alternative using the package fields
+#'  fields::image.plot(get_RLum(TL.Spectrum))
+#'  contour(get_RLum(TL.Spectrum), add = TRUE)
+#'
+#' }
+#'
+#' @export
+plot_RLum.Data.Spectrum <- function(
+  object,
+  par.local = TRUE,
+  plot.type = "contour",
+  optical.wavelength.colours = TRUE,
+  bg.channels,
+  bin.rows = 1,
+  bin.cols = 1,
+  rug = TRUE,
+  limit_counts = NULL,
+  xaxis.energy = FALSE,
+  legend.text,
+  ...
+){
+
+
+  # Integrity check -----------------------------------------------------------
+
+  ##check if object is of class RLum.Data.Spectrum
+  if(class(object) != "RLum.Data.Spectrum"){
+
+    if(class(object) == "matrix"){
+
+      if(is.null(colnames(object))){
+        colnames(object) <- 1:ncol(object)
+
+      }
+
+      if(is.null(rownames(object))){
+        rownames(object) <- 1:nrow(object)
+
+      }
+
+
+      object <- set_RLum(class = "RLum.Data.Spectrum",
+                         data = object)
+
+      message("[plot_RLum.Data.Spectrum()] Input has been converted to a RLum.Data.Spectrum object using set_RLum()")
+
+
+    }else{
+      stop("[plot_RLum.Data.Spectrum()] Input object neither of class 'RLum.Data.Spectrum' nor 'matrix'")
+
+    }
+
+  }
+
+  ##XSYG
+  ##check for curveDescripter
+  if("curveDescripter" %in% names(object at info) == TRUE){
+
+    temp.lab <- strsplit(object at info$curveDescripter, split = ";")[[1]]
+    xlab <- if(xaxis.energy == FALSE){
+      temp.lab[2]}else{"Energy [eV]"}
+    ylab <- temp.lab[1]
+    zlab <- temp.lab[3]
+
+  }else{
+
+    xlab <- if(xaxis.energy == FALSE){
+      "Row values [a.u.]"}else{"Energy [eV]"}
+    ylab <- "Column values [a.u.]"
+    zlab <- "Cell values [a.u.]"
+
+  }
+
+  # Do energy axis conversion -------------------------------------------------------------------
+  if (xaxis.energy) {
+    temp.object.data <- sapply(1:ncol(object at data), function(x) {
+      object at data[,x] * x ^ 2 / (4.13566733e-015 * 299792458e+09)
+    })
+
+    ##preserve column and rownames
+    colnames(temp.object.data) <- colnames(object at data)
+    rownames(temp.object.data) <-
+      4.13566733e-015 * 299792458e+09 / as.numeric(rownames(object at data))
+
+    ##write back to original data
+    object at data <-
+      temp.object.data[order(as.numeric(rownames(temp.object.data))),]
+
+  }
+
+
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {"RLum.Data.Spectrum"}
+
+  zlab <- if("zlab" %in% names(extraArgs)) {extraArgs$zlab} else
+  {ifelse(plot.type == "multiple.lines", ylab, zlab)}
+
+  xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else
+  {xlab}
+
+  ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
+  {ifelse(plot.type == "single" | plot.type == "multiple.lines",
+          "Luminescence [cts/channel]", ylab)}
+
+  xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else
+  {c(min(as.numeric(rownames(object at data))),
+     max(as.numeric(rownames(object at data))))}
+
+  ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else
+  {c(min(as.numeric(colnames(object at data))),
+     max(as.numeric(colnames(object at data))))}
+
+  #for zlim see below
+
+  mtext <- if("mtext" %in% names(extraArgs)) {extraArgs$mtext} else
+  {""}
+
+  cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else
+  {1}
+
+  phi <- if("phi" %in% names(extraArgs)) {extraArgs$phi} else
+  {15}
+
+  theta <- if("theta" %in% names(extraArgs)) {extraArgs$theta} else
+  {-30}
+
+  r <- if("r" %in% names(extraArgs)) {extraArgs$r} else
+  {10}
+
+  shade <- if("shade" %in% names(extraArgs)) {extraArgs$shade} else
+  {0.4}
+
+  expand <- if("expand" %in% names(extraArgs)) {extraArgs$expand} else
+  {0.6}
+
+  border <- if("border" %in% names(extraArgs)) {extraArgs$border} else
+  {NULL}
+
+  box <- if("box" %in% names(extraArgs)) {extraArgs$box} else
+  {TRUE}
+
+  ticktype <- if("ticktype" %in% names(extraArgs)) {extraArgs$ticktype} else
+  {"detailed"}
+
+  log<- if("log" %in% names(extraArgs)) {extraArgs$log} else
+  {""}
+
+  type<- if("type" %in% names(extraArgs)) {extraArgs$type} else
+  {"l"}
+
+  pch<- if("pch" %in% names(extraArgs)) {extraArgs$pch} else
+  {1}
+
+  lwd<- if("lwd" %in% names(extraArgs)) {extraArgs$lwd} else
+  {1}
+
+  bty <- if("bty" %in% names(extraArgs)) {extraArgs$bty} else
+  {NULL}
+
+  sub<- if("sub" %in% names(extraArgs)) {extraArgs$sub} else
+  {""}
+
+
+  # prepare values for plot ---------------------------------------------------
+  temp.xyz <- get_RLum(object)
+
+  ##check for NULL column names
+  if(is.null(colnames(temp.xyz))){
+    colnames(temp.xyz) <- 1:ncol(temp.xyz)
+
+  }
+
+  if(is.null(rownames(temp.xyz))){
+    rownames(temp.xyz) <- 1:nrow(temp.xyz)
+
+  }
+
+  ##check for the case of a single column matrix
+  if(ncol(temp.xyz)>1){
+
+    ##reduce for xlim
+    temp.xyz <- temp.xyz[as.numeric(rownames(temp.xyz)) >= xlim[1] &
+                           as.numeric(rownames(temp.xyz)) <= xlim[2],]
+
+    ##reduce for ylim
+    temp.xyz <- temp.xyz[, as.numeric(colnames(temp.xyz)) >= ylim[1] &
+                           as.numeric(colnames(temp.xyz)) <= ylim[2]]
+
+  }
+
+  ## wavelength
+  x <- as.numeric(rownames(temp.xyz))
+
+  ## time/temp
+  y <- as.numeric(colnames(temp.xyz))
+
+
+  # Background subtraction ---------------------------------------------------
+
+  if(missing(bg.channels) == FALSE){
+
+    if(length(bg.channels) > 1){
+
+      temp.bg.signal <- rowMeans(temp.xyz[,bg.channels])
+      temp.xyz <- temp.xyz[,1:ncol(temp.xyz)] - temp.bg.signal
+
+    }else{
+
+      temp.xyz <- temp.xyz[,1:ncol(temp.xyz)] - temp.xyz[,bg.channels]
+      temp.xyz <- ifelse(temp.xyz < 0, mean(temp.xyz[,bg.channels]), temp.xyz)
+
+    }
+
+    ##set values < 0 to 0
+    temp.xyz <- ifelse(temp.xyz < 0, mean(temp.xyz[,bg.channels[1]]), temp.xyz)
+
+  }
+
+
+  # Channel binning ---------------------------------------------------------
+
+  if(missing(bin.rows) == FALSE){
+
+    ##calculate n.rows
+    n.rows <- nrow(temp.xyz)
+
+    ##modulo operation for the number of groups
+    bin.group.rest <- n.rows%%bin.rows
+
+    ##define groups for binning
+    bin.group <- rep(1:(n.rows/bin.rows), 1, each = bin.rows)
+
+    ##add last group
+    bin.group <- c(bin.group, rep(n.rows/bin.rows + 1, 1, each = bin.group.rest))
+
+    ##sum up rows
+    temp.xyz <- rowsum(temp.xyz, bin.group)
+
+    ##correct labeling
+    x <- x[seq(1, n.rows, bin.rows)]
+
+    ## to avoid odd plots remove last group if bin.rows is not a multiple
+    ## of the row number
+    if(bin.group.rest != 0){
+
+      temp.xyz <- temp.xyz[-nrow(temp.xyz),]
+      x <- x[-length(x)]
+
+      warning("Last wavelength channel has been removed due to binning.")
+
+    }
+
+
+    rm(bin.group.rest)
+
+  }
+
+
+  if(missing(bin.cols) == FALSE){
+
+    ##calculate n.cols
+    n.cols <- ncol(temp.xyz)
+
+    ##check for validity
+    if(bin.cols > n.cols){
+
+      bin.cols <- n.cols
+
+      warning("bin.cols > the number of columns. Value reduced to number of cols.")
+
+    }
+
+    ##modulo operation for the number of groups
+    bin.group.rest <- n.cols%%bin.cols
+
+    ##define groups for binning
+    bin.group <- rep(1:(n.cols/bin.cols), 1, each = bin.cols)
+
+    ##add last group
+    bin.group <- c(bin.group, rep(n.cols/bin.cols + 1, 1, each = bin.group.rest))
+
+    ##sum up cols
+    temp.xyz <- rowsum(t(temp.xyz), bin.group)
+    temp.xyz <- t(temp.xyz)
+
+    ##correct labeling
+    y <- y[seq(1, n.cols, bin.cols)]
+
+    ## to avoid odd plots remove last group if bin.cols is not a multiple
+    ## of the col number
+    if(bin.group.rest != 0){
+
+      temp.xyz <- temp.xyz[,-ncol(temp.xyz)]
+      y <- y[-length(y)]
+
+      warning("Last count channel has been removed due to column binning.")
+
+    }
+
+  }
+
+  ##limit z-values if requested, this idea was taken from the Diss. by Thomas Schilles, 2002
+  if(!is.null(limit_counts)){
+    temp.xyz[temp.xyz[]>limit_counts] <- limit_counts
+
+  }
+
+  ##check for zlim
+  zlim <- if("zlim" %in% names(extraArgs)) {extraArgs$zlim} else
+  {range(temp.xyz)}
+
+
+  # set color values --------------------------------------------------------
+
+  if("col" %in% names(extraArgs) == FALSE | plot.type == "single" | plot.type == "multiple.lines"){
+
+    if(optical.wavelength.colours == TRUE | (rug == TRUE & (plot.type != "persp" & plot.type != "interactive"))){
+
+      ##make different colour palette for energy valuesw
+      if (xaxis.energy) {
+        col.violet <- c(2.76, ifelse(max(xlim) <= 4.13, max(xlim), 4.13))
+        col.blue <- c(2.52, 2.76)
+        col.green <- c(2.18, 2.52)
+        col.yellow <- c(2.10, 2.18)
+        col.orange <- c(2.00, 2.10)
+        col.red <- c(1.57, 2.00)
+        col.infrared <-
+          c(1.55, ifelse(min(xlim) >= 1.55, min(xlim), 1.57))
+
+
+        #set colour palette
+        col <- unlist(sapply(1:length(x), function(i){
+
+          if(x[i] >= col.violet[1] & x[i] < col.violet[2]){"#EE82EE"}
+          else if(x[i] >= col.blue[1] & x[i] < col.blue[2]){"#0000FF"}
+          else if(x[i] >= col.green[1] & x[i] < col.green[2]){"#00FF00"}
+          else if(x[i] >= col.yellow[1] & x[i] < col.yellow[2]){"#FFFF00"}
+          else if(x[i] >= col.orange[1] & x[i] < col.orange[2]){"#FFA500"}
+          else if(x[i] >= col.red[1] & x[i] < col.red[2]){"#FF0000"}
+          else if(x[i] <= col.infrared[2]){"#BEBEBE"}
+
+        }))
+
+
+      }else{
+        col.violet <- c(ifelse(min(xlim) <= 300, min(xlim), 300),450)
+        col.blue <- c(450,495)
+        col.green <- c(495,570)
+        col.yellow <- c(570,590)
+        col.orange <- c(590,620)
+        col.red <- c(620,790)
+        col.infrared <-
+          c(790, ifelse(max(xlim) >= 800, max(xlim), 800))
+
+
+        #set colour palette
+        col <- unlist(sapply(1:length(x), function(i){
+
+          if(x[i] >= col.violet[1] & x[i] < col.violet[2]){"#EE82EE"}
+          else if(x[i] >= col.blue[1] & x[i] < col.blue[2]){"#0000FF"}
+          else if(x[i] >= col.green[1] & x[i] < col.green[2]){"#00FF00"}
+          else if(x[i] >= col.yellow[1] & x[i] < col.yellow[2]){"#FFFF00"}
+          else if(x[i] >= col.orange[1] & x[i] < col.orange[2]){"#FFA500"}
+          else if(x[i] >= col.red[1] & x[i] < col.red[2]){"#FF0000"}
+          else if(x[i] >= col.infrared[1]){"#BEBEBE"}
+
+        }))
+
+
+      }
+
+
+
+      ##find unique colours
+      col.unique <- unique(col)
+
+      ##if only one colour value, then skip gradient calculation as it causes
+      ## an error
+
+      if(length(col.unique) > 1){
+
+        ##set colour function for replacement
+        colfunc <- colorRampPalette(col.unique)
+
+        ##get index for colour values to be cut from the current palette
+        col.unique.index <-
+          sapply(1:length(col.unique), function(i) {
+            max(which(col == col.unique[i]))
+
+          })
+
+
+        ##remove last index (no colour gradient needed), for energy axis use the first value
+        col.unique.index <- col.unique.index[-length(col.unique.index)]
+
+
+        ##set borders for colour gradient recalculation
+        col.unique.index.min <- col.unique.index - (50)/bin.rows
+        col.unique.index.max <- col.unique.index + (50)/bin.rows
+
+        ##set negative values to the lowest index
+        col.unique.index.min[col.unique.index.min<=0] <- 1
+
+
+
+        ##build up new index sequence (might be better)
+        col.gradient.index <- as.vector(unlist((
+          sapply(1:length(col.unique.index.min), function(j){
+
+            seq(col.unique.index.min[j],col.unique.index.max[j], by = 1)
+
+          }))))
+
+
+        ##generate colour ramp and replace values
+        col.new <- colfunc(length(col.gradient.index))
+        col[col.gradient.index] <- col.new
+
+        ##correct for overcharged colour values (causes zebra colour pattern)
+        if (diff(c(length(col), nrow(temp.xyz))) < 0) {
+          col <- col[1:c(length(col) - diff(c(length(col), nrow(temp.xyz))))]
+
+        }else if(diff(c(length(col), nrow(temp.xyz))) > 0){
+          col <- col[1:c(length(col) + diff(c(length(col), nrow(temp.xyz))))]
+
+
+        }
+
+
+      }
+
+
+    }else{
+
+      col <- "black"
+
+    }
+
+  }else{
+
+    col <- extraArgs$col
+
+  }
+
+
+  # Do log scaling if needed -------------------------------------------------
+
+  ##x
+  if(grepl("x", log)==TRUE){x <- log10(x)}
+
+  ##y
+  if(grepl("y", log)==TRUE){y <- log10(y)}
+
+  ##z
+  if(grepl("z", log)==TRUE){temp.xyz <- log10(temp.xyz)}
+
+
+  # PLOT --------------------------------------------------------------------
+
+  ##par setting for possible combination with plot method for RLum.Analysis objects
+  if(par.local == TRUE){par(mfrow=c(1,1), cex = cex)}
+
+  ##rest plot type for 1 column matrix
+  if(ncol(temp.xyz) == 1){
+    plot.type = "single"
+    warning("[plot_RLum.Data.Spectrum()] Single column matrix: plot.type has been automatically reset to 'single'")
+  }
+
+  ##do not let old code break down ...
+  if(plot.type == "persp3d"){
+    plot.type <- "interactive"
+    warning("[plot_RLum.Data.Spectrum()] 'plot.type' has been automatically reset to interactive!")
+
+  }
+
+  if(plot.type == "persp" && ncol(temp.xyz) > 1){
+    ## ==========================================================================#
+    ##perspective plot
+    ## ==========================================================================#
+
+    persp(x, y, temp.xyz,
+          shade = shade,
+          phi = phi,
+          theta = theta,
+          xlab = xlab,
+          ylab = ylab,
+          zlab = zlab,
+          zlim = zlim,
+          scale = TRUE,
+          col = col[1:(length(col)-1)], ##needed due to recycling of the colours
+          main = main,
+          expand = expand,
+          border = border,
+          box = box,
+          r = r,
+          ticktype = ticktype)
+
+
+    ##plot additional mtext
+    mtext(mtext, side = 3, cex = cex*0.8)
+
+  }else if(plot.type == "interactive" && ncol(temp.xyz) > 1) {
+    ## ==========================================================================#
+    ##interactive plot and former persp3d
+    ## ==========================================================================#
+
+    ##http://r-pkgs.had.co.nz/description.html
+    if (!requireNamespace("plotly", quietly = TRUE)) {
+      stop("[plot_RLum.Data.Spectrum()] Package 'plotly' needed for this plot type. Please install it.",
+           call. = FALSE)
+    }
+
+       ##set up plot
+       p <- plotly::plot_ly(
+         x = y,
+         y = x,
+         z = temp.xyz,
+         type = "surface",
+         showscale = FALSE
+         #colors = col[1:(length(col)-1)],
+         )
+
+       ##change graphical parameters
+       p <-  plotly::layout(
+         p = p,
+         scene = list(
+           xaxis = list(title = ylab),
+           yaxis = list(title = xlab),
+           zaxis = list(title = zlab)
+
+         ),
+         title = main
+       )
+
+       print(p)
+
+
+  }else if(plot.type == "contour" && ncol(temp.xyz) > 1) {
+    ## ==========================================================================#
+    ##contour plot
+    ## ==========================================================================#
+    contour(x,y,temp.xyz,
+            xlab = xlab,
+            ylab = ylab,
+            main = main,
+            col = "black"
+    )
+
+    ##plot additional mtext
+    mtext(mtext, side = 3, cex = cex*0.8)
+
+
+  } else if(plot.type == "single") {
+    ## ==========================================================================#
+    ## single plot
+    ## ==========================================================================#
+
+    col.rug <- col
+
+    col<- if("col" %in% names(extraArgs)) {extraArgs$col} else
+    {"black"}
+
+
+
+    for(i in 1:length(y)){
+
+      if("zlim" %in% names(extraArgs) == FALSE){zlim <- range(temp.xyz[,i])}
+
+      plot(x, temp.xyz[,i],
+           xlab = xlab,
+           ylab = ylab,
+           main = main,
+           xlim = xlim,
+           ylim = zlim,
+           col = col,
+           sub = paste(
+             "(frame ",i, " | ",
+             ifelse(i==1,
+                    paste("0.0 :", round(y[i], digits = 1)),
+                    paste(round(y[i-1], digits = 1),":",
+                          round(y[i], digits =1))),")",
+             sep = ""),
+           type = type,
+           pch = pch)
+
+      if(rug == TRUE){
+        ##rug als continous polygons
+        for(i in 1:length(x)){
+          polygon(x = c(x[i],x[i+1],x[i+1],x[i]),
+                  y = c(min(zlim),min(zlim), par("usr")[3], par("usr")[3]),
+                  border = col.rug[i], col = col.rug[i])
+        }
+      }
+
+    }
+
+    ##plot additional mtext
+    mtext(mtext, side = 3, cex = cex*0.8)
+
+
+  }else if(plot.type == "multiple.lines" && ncol(temp.xyz) > 1) {
+    ## ========================================================================#
+    ## multiple.lines plot
+    ## ========================================================================#
+
+    col.rug <- col
+
+    col<- if("col" %in% names(extraArgs)) {extraArgs$col} else
+    {"black"}
+
+    ##change graphic settings
+    par.default <- par()[c("mfrow", "mar", "xpd")]
+    par(mfrow = c(1,1), mar=c(5.1, 4.1, 4.1, 8.1), xpd = TRUE)
+
+    ##grep zlim
+    if("zlim" %in% names(extraArgs) == FALSE){zlim <- range(temp.xyz)}
+
+    ##open plot area
+    plot(NA, NA,
+         xlab = xlab,
+         ylab = ylab,
+         main = main,
+         xlim = xlim,
+         ylim = zlim,
+         sub = sub,
+         bty = bty)
+
+    if(rug == TRUE){
+      ##rug als continous polygons
+      for(i in 1:length(x)){
+        polygon(x = c(x[i],x[i+1],x[i+1],x[i]),
+                y = c(min(zlim),min(zlim), par("usr")[3], par("usr")[3]),
+                border = col.rug[i], col = col.rug[i])
+      }
+    }
+
+    ##add lines
+    for(i in 1:length(y)){
+
+      lines(x,
+            temp.xyz[,i],
+            lty = i,
+            lwd = lwd,
+            type = type,
+            col = col)
+    }
+
+    ##for missing values - legend.text
+    if(missing(legend.text)){
+
+      legend.text <- as.character(paste(round(y,digits=1), zlab))
+
+    }
+
+    ##legend
+    legend(x = par()$usr[2],
+           y = par()$usr[4],
+
+           legend = legend.text,
+
+           lwd= lwd,
+           lty = 1:length(y),
+           bty = "n",
+           cex = 0.6*cex)
+
+    ##plot additional mtext
+    mtext(mtext, side = 3, cex = cex*0.8)
+
+    ##reset graphic settings
+    par(par.default)
+    rm(par.default)
+
+  }else if(plot.type == "transect" && ncol(temp.xyz) > 1) {
+    ## ========================================================================#
+    ## transect plot
+    ## ========================================================================#
+
+    ##sum up rows (column sum)
+    temp.xyz <- colSums(temp.xyz)
+
+    ##consider differences within the arguments
+    #check for zlim
+    zlim <- if("zlim" %in% names(extraArgs)) {extraArgs$zlim} else
+    {c(0,max(temp.xyz))}
+
+    #check for zlim
+    zlab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
+    {paste("Counts [1/summed channels]")}
+
+    plot(y, temp.xyz,
+         xlab = ylab,
+         ylab = zlab,
+         main = main,
+         xlim = ylim,
+         ylim = zlim,
+         col = col,
+         sub = paste("(channel range: ", min(xlim), " : ", max(xlim), ")", sep=""),
+         type = type,
+         pch = pch)
+
+    ##plot additional mtext
+    mtext(mtext, side = 3, cex = cex*0.8)
+
+
+  }else{
+
+    stop("[plot_RLum.Data.Spectrum()] Unknown plot type.")
+
+  }
+
+}
diff --git a/R/plot_RLum.R b/R/plot_RLum.R
new file mode 100644
index 0000000..60b8c0b
--- /dev/null
+++ b/R/plot_RLum.R
@@ -0,0 +1,175 @@
+#' General plot function for RLum S4 class objects
+#'
+#' Function calls object specific plot functions for RLum S4 class objects.
+#'
+#' The function provides a generalised access point for plotting specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+#' corresponding plot function will be selected.  Allowed arguments can be
+#' found in the documentations of each plot function.  \tabular{lll}{
+#' \bold{object} \tab \tab \bold{corresponding plot function} \cr
+#'
+#' \code{\linkS4class{RLum.Data.Curve}} \tab : \tab
+#' \code{\link{plot_RLum.Data.Curve}} \cr
+#' \code{\linkS4class{RLum.Data.Spectrum}} \tab : \tab
+#' \code{\link{plot_RLum.Data.Spectrum}}\cr
+#' \code{\linkS4class{RLum.Data.Image}} \tab : \tab
+#' \code{\link{plot_RLum.Data.Image}}\cr \code{\linkS4class{RLum.Analysis}}
+#' \tab : \tab \code{\link{plot_RLum.Analysis}}\cr
+#' \code{\linkS4class{RLum.Results}} \tab : \tab
+#' \code{\link{plot_RLum.Results}} }
+#'
+#' @param object \code{\linkS4class{RLum}} (\bold{required}): S4 object of
+#' class \code{RLum}. Optional a \code{\link{list}} containing objects of class \code{\linkS4class{RLum}}
+#' can be provided. In this case the function tries to plot every object in this list according
+#' to its \code{RLum} class.
+#'
+#' @param \dots further arguments and graphical parameters that will be passed
+#' to the specific plot functions. The only argument that is supported directly is \code{main}
+#' (setting the plot title). In contrast to the normal behaviour \code{main} can be here provided as
+#' \code{\link{list}} and the arguments in the list will dispatched to the plots if the \code{object}
+#' is of type \code{list} as well.
+#'
+#' @return Returns a plot.
+#'
+#' @note The provided plot output depends on the input object.
+#'
+#' @section Function version: 0.4.2
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{plot_RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Curve}}, \code{\link{plot_RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{plot_RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Data.Image}}, \code{\link{plot_RLum.Analysis}},
+#' \code{\linkS4class{RLum.Analysis}}, \code{\link{plot_RLum.Results}},
+#' \code{\linkS4class{RLum.Results}}
+#'
+#' @references #
+#'
+#' @keywords dplot
+#'
+#' @examples
+#'
+#'
+#' #load Example data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' #transform data.frame to RLum.Data.Curve object
+#' temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve")
+#'
+#' #plot RLum object
+#' plot_RLum(temp)
+#'
+#'
+#' @export
+plot_RLum<- function(
+  object,
+  ...
+){
+
+  # Define dispatcher function ----------------------------------------------------------
+
+  ##check if object is of class RLum
+  RLum.dispatcher <- function(object, ...) {
+    if (inherits(object, "RLum")) {
+
+      ##grep object class
+      object.class <- is(object)[1]
+
+      ##select which plot function should be used and call it
+      switch (
+        object.class,
+
+        RLum.Data.Curve = plot_RLum.Data.Curve(object = object, ...),
+        RLum.Data.Spectrum = plot_RLum.Data.Spectrum(object = object, ...),
+        RLum.Data.Image = plot_RLum.Data.Image(object = object, ...),
+        RLum.Analysis = plot_RLum.Analysis(object = object, ...),
+        RLum.Results = plot_RLum.Results(object = object, ...)
+
+      )
+
+    }else{
+      stop(paste0(
+        "[plot_RLum()] Sorry, I don't know what to do for object of type '", is(object)[1], "'."
+      ))
+
+    }
+
+  }
+
+
+
+  # Run dispatcher ------------------------------------------------------------------------------
+
+  ##call for the list, if not just proceed as normal
+  if(is(object, "list")) {
+    ##(1) get rid of objects which are not RLum objects to avoid errors
+    object.cleaned <-
+      object[sapply(object, inherits, what = "RLum")]
+
+    ##(1.1) place warning message
+    if (length(object) > length(object.cleaned)) {
+      warning(paste0(
+        length(object) - length(object.cleaned)," non 'RLum' object(s) removed from list."
+      ))
+
+    }
+
+    ##(2) check if empty, if empty do nothing ...
+    if (length(object.cleaned) != 0) {
+
+      ## If we iterate over a list, this might be extremly useful to have different plot titles
+      if("main" %in% names(list(...))){
+        if(is(list(...)$main,"list")){
+          main.list <- rep(list(...)$main, length = length(object.cleaned))
+
+        }
+      }
+
+      ##set also mtext, but in a different way
+      if(!"mtext" %in% names(list(...))){
+
+
+        if(is(object[[1]], "RLum.Analysis")){
+          mtext <- paste("Record:", 1:length(object.cleaned))
+
+        }else{
+          mtext <- NULL
+
+        }
+      }else{
+        mtext <- rep(list(...)$mtext, length.out = length(object.cleaned))
+
+      }
+
+
+      if(exists("main.list")){
+        ##dispatch objects
+        for (i in 1:length(object.cleaned)) {
+          RLum.dispatcher(object = object[[i]],
+                          main = main.list[[i]],
+                          mtext = mtext[[i]],
+                          ...)
+        }
+      }else{
+        for (i in 1:length(object.cleaned)) {
+
+
+          RLum.dispatcher(object = object[[i]],
+                          mtext = mtext[[i]],
+                          ...)
+        }
+
+      }
+
+    }
+
+  }else{
+    ##dispatch object
+    RLum.dispatcher(object = object, ...)
+
+  }
+
+}
+
diff --git a/R/plot_RLum.Results.R b/R/plot_RLum.Results.R
new file mode 100644
index 0000000..354feca
--- /dev/null
+++ b/R/plot_RLum.Results.R
@@ -0,0 +1,1123 @@
+#' Plot function for an RLum.Results S4 class object
+#'
+#' The function provides a standardised plot output for data of an RLum.Results
+#' S4 class object
+#'
+#' The function produces a multiple plot output.  A file output is recommended
+#' (e.g., \code{\link{pdf}}).
+#'
+#' @param object \code{\linkS4class{RLum.Results}} (\bold{required}): S4 object
+#' of class \code{RLum.Results}
+#'
+#' @param single \code{\link{logical}} (with default): single plot output
+#' (\code{TRUE/FALSE}) to allow for plotting the results in as few plot windows
+#' as possible.
+#'
+#' @param \dots further arguments and graphical parameters will be passed to
+#' the \code{plot} function.
+#'
+#' @return Returns multiple plots.
+#'
+#' @note Not all arguments available for \code{\link{plot}} will be passed!
+#' Only plotting of \code{RLum.Results} objects are supported.
+#'
+#' @section Function version: 0.2.1
+#'
+#' @author Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, IRAMAT-CRP2A,
+#' Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\link{plot}}, \code{\link{plot_RLum}},
+#'
+#' @references #
+#'
+#' @keywords aplot
+#'
+#' @examples
+#'
+#'
+#' ###load data
+#' data(ExampleData.DeValues, envir = environment())
+#'
+#' # apply the un-logged minimum age model
+#' mam <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.2, log = TRUE, plot = FALSE)
+#'
+#' ##plot
+#' plot_RLum.Results(mam)
+#'
+#' # estimate the number of grains on an aliquot
+#' grains<- calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, plot = FALSE, MC.iter = 100)
+#'
+#' ##plot
+#' plot_RLum.Results(grains)
+#'
+#'
+#' @export
+plot_RLum.Results<- function(
+  object,
+  single = TRUE,
+  ...
+){
+
+  ##============================================================================##
+  ## CONSISTENCY CHECK OF INPUT DATA
+  ##============================================================================##
+
+  ##check if object is of class RLum.Data.Curve
+  if(!is(object,"RLum.Results")){
+    stop("[plot_RLum.Results()] Input object is not of type 'RLum.Results'")
+  }
+
+  ##============================================================================##
+  ## SAFE AND RESTORE PLOT PARAMETERS ON EXIT
+  ##============================================================================##
+  par.old <- par(no.readonly = TRUE)
+  on.exit(par(par.old))
+
+  ##============================================================================##
+  ## ... ARGUMENTS
+  ##============================================================================##
+
+  ##deal with addition arguments
+  extraArgs <- list(...)
+
+  ##main
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+  {""}
+  ##mtext
+  mtext <- if("mtext" %in% names(extraArgs)) {extraArgs$mtext} else
+  {""}
+  ##log
+  log <- if("log" %in% names(extraArgs)) {extraArgs$log} else
+  {""}
+  ##lwd
+  lwd <- if("lwd" %in% names(extraArgs)) {extraArgs$lwd} else
+  {1}
+  ##lty
+  lty <- if("lty" %in% names(extraArgs)) {extraArgs$lty} else
+  {1}
+  ##type
+  type <- if("type" %in% names(extraArgs)) {extraArgs$type} else
+  {"l"}
+  ##pch
+  pch <- if("pch" %in% names(extraArgs)) {extraArgs$pch} else
+  {1}
+  ##col
+  col <- if("col" %in% names(extraArgs)) {extraArgs$col} else
+  {"black"}
+
+  ##============================================================================##
+  ## PLOTTING
+  ##============================================================================##
+
+  #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
+  ## CASE 1: Minimum Age Model / Maximum Age Model
+  if(object at originator=="calc_MinDose" || object at originator=="calc_MaxDose") {
+
+    ## single MAM estimate
+    # plot profile log likelhood
+    tryCatch({
+      suppressWarnings(
+        bbmle::plot(object at data$profile, show.points=FALSE, plot.confstr=TRUE, onepage = single, ask = FALSE)
+      )
+    }, error = function(e) {
+      if (single)
+        par(mfrow=c(2, 2))
+      param <- c("gamma", "sigma", "p0", "mu")
+      for (i in param) {
+        if (object at data$summary$par == 3 && i == "mu")
+          break
+        tryCatch({
+          bbmle::plot(object at data$profile, which = i)
+        }, error = function(e)  {
+          message(paste("Unable to plot the Likelihood profile for:", i))
+        })
+      }
+      par(mfrow=c(1,1))
+    })
+
+    ## bootstrap MAM estimates
+    if(object at data$args$bootstrap==TRUE) {
+
+      # save previous plot parameter and set new ones
+      .pardefault<- par(no.readonly = TRUE)
+
+      # get De-llik pairs
+      pairs<- object at data$bootstrap$pairs$gamma
+
+      # get polynomial fit objects
+      poly.lines<- list(poly.three=object at data$bootstrap$poly.fits$poly.three,
+                        poly.four=object at data$bootstrap$poly.fits$poly.four,
+                        poly.five=object at data$bootstrap$poly.fits$poly.five,
+                        poly.six=object at data$bootstrap$poly.fits$poly.six)
+
+      # define polynomial curve functions for plotting
+      poly.curves<- list(poly.three.curve=function(x) { poly.lines$poly.three$coefficient[4]*x^3 + poly.lines$poly.three$coefficient[3]*x^2 + poly.lines$poly.three$coefficient[2]*x + poly.lines$poly.three$coefficient[1] },
+                         poly.four.curve=function(x) { poly.lines$poly.four$coefficient[5]*x^4 + poly.lines$poly.four$coefficient[4]*x^3 + poly.lines$poly.four$coefficient[3]*x^2 + poly.lines$poly.four$coefficient[2]*x + poly.lines$poly.four$coefficient[1] },
+                         poly.five.curve=function(x) { poly.lines$poly.five$coefficient[6]*x^5 + poly.lines$poly.five$coefficient[5]*x^4 + poly.lines$poly.five$coefficient[4]*x^3 + poly.lines$poly.five$coefficient[3]*x^2 + poly.lines$poly.five$coefficient[2]*x + poly.lines$poly.five$coefficient[1] },
+                         poly.six.curve=function(x) { poly.lines$poly.six$coefficient[7]*x^6 + poly.lines$poly.six$coefficient[6]*x^5 + poly.lines$poly.six$coefficient[5]*x^4 + poly.lines$poly.six$coefficient[4]*x^3 + poly.lines$poly.six$coefficient[3]*x^2 + poly.lines$poly.six$coefficient[2]*x + poly.lines$poly.six$coefficient[1] })
+
+      ## --------- PLOT "RECYCLE" BOOTSTRAP RESULTS ------------ ##
+
+      if(single==TRUE) {
+        layout(cbind(c(1,1,2, 5,5,6), c(3,3,4, 7,7,8)))
+        par(cex = 0.6)
+      } else {
+        layout(matrix(c(1,1,2)),2,1)
+        par(cex = 0.8)
+      }
+
+      for(i in 1:4) {
+        ## ----- LIKELIHOODS
+
+        # set margins (bottom, left, top, right)
+        par(mar=c(0,5,5,3))
+
+        # sort De and likelihoods by De (increasing)
+        pairs<- pairs[order(pairs[,1]),]
+
+        # remove invalid NA values
+        pairs<- na.omit(pairs)
+
+        plot(x=pairs[,1],
+             y=pairs[,2],
+             xlab="Equivalent Dose [Gy]",
+             ylab="Likelihood",
+             xlim=range(pretty(pairs[,1])),
+             ylim=range(pretty(c(0, as.double(quantile(pairs[,2],probs=0.98))))),
+             xaxt = "n",
+             xaxs = "i",
+             yaxs = "i",
+             bty = "l",
+             main="Recycled bootstrap MAM-3")
+
+        axis(side = 1, labels = FALSE, tick = FALSE)
+
+        # add subtitle
+        mtext(as.expression(bquote(italic(M) == .(object at data$args$bs.M) ~ "|" ~
+                                     italic(N) == .(object at data$args$bs.N) ~ "|" ~
+                                     italic(sigma[b])  == .(object at data$args$sigmab) ~
+                                     "\u00B1" ~ .(object at data$args$sigmab.sd) ~ "|" ~
+                                     italic(h) == .(round(object at data$args$bs.h,1))
+        )
+        ),
+        side = 3, line = 0.3, adj = 0.5,
+        cex = if(single){0.5}else{0.8})
+
+        # add points
+        points(x=pairs[,1], y=pairs[,2], pch=1, col = "grey80")
+
+        # get polynomial function
+        poly.curve<- poly.curves[[i]]
+
+        # add curve to plot
+        curve(poly.curve, from = min(pairs[,1]), to = (max(pairs[,1])),
+              col = "black", add = TRUE, type = "l")
+
+        # add legend
+        legend<- c("Third degree", "Fourth degree", "Fifth degree", "Sixth degree")
+        legend("topright",  xjust = 0,
+               legend = legend[i],
+               y.intersp = 1.2,
+               bty = "n",
+               title = "Polynomial Fit",
+               lty = 1,
+               lwd= 1.5)
+
+        ## ----- RESIDUALS
+
+        # set margins (bottom, left, top, right)
+        par(mar=c(5,5,0,3))
+
+        plot(x = pairs[,1],
+             y = residuals(poly.lines[[i]]),
+             ylim = c(min(residuals(poly.lines[[i]]))*1.2,
+                      as.double(quantile(residuals(poly.lines[[i]]),probs=0.99))),
+             xlim=range(pretty(pairs[,1])),
+             xaxt = "n",
+             bty = "l",
+             xaxs = "i",
+             col = "grey80",
+             ylab = "Fit residual",
+             xlab = "Equivalent dose [Gy]")
+
+        axis(side = 1, labels = TRUE, tick = TRUE)
+
+        # add horizontal line
+        abline(h = 0, lty=2)
+
+        # calculate residual sum of squares (RSS) and add to plot
+        rss<- sum(residuals(poly.lines[[i]])^2)
+        mtext(text = paste("RSS =",round(rss,3)), adj = 1,
+              side = 3, line = -2,
+              cex = if(single){0.6}else{0.8})
+
+        ## ----- PROPORTIONS
+
+      }##EndOf::Plot_loop
+
+      # restore previous plot parameters
+      par(.pardefault)
+
+      ### TODO: plotting of the LOESS fit needs to be fleshed out
+      ### possibly integrate this in the prior polynomial plot loop
+
+      ### LOESS PLOT
+      pairs<- object at data$bootstrap$pairs$gamma
+      pred<- predict(object at data$bootstrap$loess.fit)
+      loess<- cbind(pairs[,1], pred)
+      loess<- loess[order(loess[,1]),]
+
+      # plot gamma-llik pairs
+      plot(pairs,
+           ylim = c(0, as.double(quantile( pairs[,2],probs=0.99))),
+           ylab = "Likelihood",
+           xlab = "Equivalent dose [Gy]",
+           col = "gray80")
+
+      # add LOESS line
+      lines(loess, type = "l", col = "black")
+
+      ### ------ PLOT BOOTSTRAP LIKELIHOOD FIT
+
+      par(mar=c(5,4,4,4))
+
+      xlim<- range(pretty(object at data$data[,1]))
+      xlim[1]<- xlim[1]-object at data$data[which.min(object at data$data[,1]),2]
+      xlim[2]<- xlim[2]+object at data$data[which.max(object at data$data[,1]),2]
+      xlim<- range(pretty(xlim))
+
+      # empty plot
+      plot(NA,NA,
+           xlim=xlim,
+           ylim=c(0,2),
+           xlab="Equivalent dose [Gy]",
+           ylab="",
+           bty="l",
+           axes=FALSE,
+           xaxs="i",
+           yaxs="i",
+           yaxt="n")
+
+      axis(side = 1)
+      axis(side = 2, at = c(0,0.5,1))
+
+      mtext(text = "Normalised likelihood / density", side = 2, line = 2.5, adj = 0)
+
+      # set the polynomial to plot
+      poly.curve<- poly.curves[[1]] # three degree poly
+
+      # plot a nice grey polygon as in the publication
+      step<- 0.1
+      x<- seq(min(pairs[,1]), max(pairs[,1]), step)
+      y<- poly.curve(x)
+      # normalise y-values
+      y<- y/max(y)
+
+      x<- c(min(pairs[,1]), x, max(pairs[,1]))
+      y<- c(0, y, 0)
+
+      # cutoff negative y values
+      neg<- which(y<0)
+      y<- y[-neg]
+      x<- x[-neg]
+
+      # add bootstrap likelihood polygon to plot
+      polygon(x, y, col = "grey80", border = NA)
+
+      ### ----- PLOT MAM SINGLE ESTIMATE
+
+      # symmetric errors, might not be appropriate
+      mean<- object at data$summary$de
+      sd<- object at data$summary$de_err
+
+      x<- seq(mean-5*sd, mean+5*sd, 0.001)
+      y<- dnorm(seq(mean-5*sd, mean+5*sd, 0.001), mean, sd)
+      # normalise y-values
+      y<- y/max(y)
+
+      points(x, y,
+             type="l",
+             col="red")
+
+      ## asymmetric errors
+      x<- unlist(object at data$profile at profile$gamma$par.vals[,1])
+      y<- abs(unlist(object at data$profile at profile$gamma$z))
+
+      if(object at data$args$log == TRUE) {
+        x<- exp(x)
+      }
+
+      # now invert the data by shifting
+      y<- -y
+      y<- y-min(y)
+      y<- y/max(y)
+
+      # fit a smoothing spline
+      l<- spline(x = x, y = y, method = "n", n = 1000)
+
+      # make the endpoints zero
+      l$y[1]<- l$y[length(l$y)]<- 0
+
+      # add profile log likelihood curve to plot
+      lines(l, col="blue", lwd=1)
+
+      # add vertical lines of the mean values
+      #points(x = 80, y = 100,type = "l")
+
+      #### ------ PLOT DE
+      par(new = TRUE)
+
+      # sort the data in ascending order
+      dat<- object at data$data[order(object at data$data[,1]),]
+
+      x<- dat[,1]
+      y<- 1:length(object at data$data[,1])
+
+      plot(x = x, y = y,
+           xlim=xlim,
+           ylim=c(0, max(y)+1),
+           axes = FALSE,
+           pch = 16,
+           xlab = "",
+           ylab="",
+           xaxs="i",
+           yaxs="i")
+
+      axis(side = 4)
+      mtext(text = "# Grain / aliquot", side = 4, line = 2.5)
+
+      # get sorted errors
+      err<- object at data$data[order(object at data$data[,1]),2]
+
+      # fancy error bars
+      arrows(x0 = x-err, y0 = y,
+             x1 =  x+err, y1 = y,
+             code = 3, angle = 90, length = 0.05)
+
+      ### ---- AUXILLARY
+
+      # add legend
+      legend("bottomright",
+             bty = "n",
+             col = c("grey80", "red", "blue", "black"),
+             pch = c(NA,NA,NA,16),
+             lty = c(1,1,1,1),
+             lwd=c(10,2,2,2),
+             legend = c("Bootstrap likelihood", "Profile likelihood (gaussian fit)","Profile likelihood", "Grain / aliquot"),
+      )
+
+    }##EndOf::Bootstrap_plotting
+  }#EndOf::CASE1_MinimumAgeModel-3
+
+
+  #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
+  ## CASE 2: Central Age Model
+  if(object at originator=="calc_CentralDose") {
+
+    # get profile log likelihood data
+    sig<- object at data$profile$sig*100
+    llik<- object at data$profile$llik
+
+    # save previous plot parameter and set new ones
+    .pardefault<- par(no.readonly = TRUE)
+
+    # plot the profile log likeihood
+    par(oma=c(2,1,2,1),las=1,cex.axis=1.2, cex.lab=1.2)
+    plot(sig,llik,type="l",xlab=as.expression(bquote(sigma[OD]~"[%]")),ylab="Log likelihood",lwd=1.5)
+    abline(h=0,lty=3)
+    abline(h=-1.92,lty=3)
+    title(as.expression(bquote("Profile log likelihood for" ~ sigma[OD])))
+
+    # find upper and lower confidence limits for sigma
+    sigmax<- sig[which.max(llik)]
+    tf<- abs(llik+1.92) < 0.05
+    sig95<- sig[tf]
+    ntf<- length(sig95)
+    sigL<- sig95[1]
+    sigU<- sig95[ntf]
+
+    # put them on the graph
+    abline(v=sigL)
+    abline(v=sigmax)
+    abline(v=sigU)
+    dx<- 0.006
+    dy<- 0.2
+    ytext<- min(llik) + dy
+    res<- c(sigL,sigmax,sigU)
+    text(res+dx,rep(ytext,3),round(res,2),adj=0)
+
+    # restore previous plot parameters
+    par(.pardefault)
+    rm(.pardefault)
+  }##EndOf::Case 2 - calc_CentralDose()
+
+
+  #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
+  ## CASE 3: Fuchs & Lang 2001
+  if(object at originator=="calc_FuchsLang2001") {
+
+    ##deal with addition arguments
+    extraArgs <- list(...)
+
+    main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"Fuchs & Lang (2001)"}
+    xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else {expression(paste(D[e]," [s]"))}
+    ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else {"# Aliquots"}
+    sub <-  if("sub" %in% names(extraArgs)) {extraArgs$sub} else {""}
+    cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else {1}
+    lwd <- if("lwd" %in% names(extraArgs)) {extraArgs$lwd} else {1}
+    pch <- if("pch" %in% names(extraArgs)) {extraArgs$pch} else {19}
+    ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else {c(1,length(object at data$data[,1])+3)}
+    xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else {c(min(object at data$data[,1])-max(object at data$data[,2]), max(object at data$data[,1])+max(object at data$data[,2]))}
+    mtext <- if("mtext" %in% names(extraArgs)) {extraArgs$mtext} else {"unknown sample"}
+
+    # extract relevant plotting parameters
+    o<- order(object at data$data[1])
+    data_ordered<- object at data$data[o,]
+    usedDeValues<- object at data$usedDeValues
+    n.usedDeValues<- object at data$summary$n.usedDeValues
+
+    par(cex = cex, mfrow=c(1,1))
+
+    ##PLOT
+    counter<-seq(1,max(o))
+
+    plot(NA,NA,
+         ylim = ylim,
+         xlim = xlim,
+         xlab = xlab,
+         ylab = ylab,
+         main = main,
+         sub = sub)
+
+    ##SEGMENTS
+    segments(data_ordered[,1]-data_ordered[,2],1:length(data_ordered[,1]),
+             data_ordered[,1]+data_ordered[,2],1:length(data_ordered[,1]),
+             col="gray")
+
+
+    ##POINTS
+    points(data_ordered[,1], counter,pch=pch)
+
+    ##LINES
+    ##BOUNDARY INFORMATION
+    ##lower boundary
+    lines(c(
+      usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1], #boundary_counter for incorporate skipped values
+      usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]),
+      c(min(o)-0.5,max(o)+0.5),
+      col="red",
+      lty="dashed", lwd = lwd)
+
+
+    #upper boundary
+    lines(c(max(usedDeValues[,1]),max(usedDeValues[,1])),c(min(o)-0.5,max(o)+0.5),
+          col="red",lty="dashed", lwd = lwd)
+
+    #plot some further informations into the grafik
+    arrows(
+      usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]+usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]*0.02, #x1
+      max(o)+0.5, #y1
+      max(usedDeValues[,1]-usedDeValues[,1]*0.02), #x2
+      max(o)+0.5, #y2,
+      code=3,
+      length=0.03)
+
+    text(
+      c(
+        usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1],
+        usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]),
+      c(max(o)+2,max(o)+2),
+      labels=paste("used values = ", n.usedDeValues),
+      cex=0.6*cex,
+      adj=0)
+
+    ##MTEXT
+    mtext(side=3,mtext,cex=cex)
+  }
+
+
+  #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
+  ## CASE 4: Finite Mixture Model
+
+  if(object at originator == "calc_FiniteMixture") {
+    if(length(object at data$args$n.components) > 1L) {
+
+      ##deal with addition arguments
+      extraArgs <- list(...)
+
+      main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"Finite Mixture Model"}
+      plot.proportions<- if("plot.proportions" %in% names(extraArgs)) {extraArgs$plot.proportions} else {TRUE}
+      pdf.colors<- if("pdf.colors" %in% names(extraArgs)) {extraArgs$pdf.colors} else {"gray"}
+      pdf.weight<- if("pdf.weight" %in% names(extraArgs)) {extraArgs$pdf.weight} else {TRUE}
+      pdf.sigma<- if("pdf.sigma" %in% names(extraArgs)) {extraArgs$pdf.sigma} else {"sigmab"}
+
+      # extract relevant data from object
+      n.components<- object at data$args$n.components
+      comp.n<- object at data$components
+      sigmab<- object at data$args$sigmab
+      BIC.n<- object at data$BIC$BIC
+      LLIK.n<- object at data$llik$llik
+
+      # save previous plot parameter and set new ones
+      .pardefault<- par(no.readonly = TRUE)
+
+      ## DEVICE AND PLOT LAYOUT
+      n.plots<- length(n.components) #number of PDF plots in plotarea #1
+      seq.vertical.plots<- seq(from = 1, to = n.plots, by = 1) #indices
+      ID.plot.two<- n.plots+if(plot.proportions==TRUE){1}else{0} #ID of second plot area
+      ID.plot.three<- n.plots+if(plot.proportions==TRUE){2}else{1} #ID of third plot area
+
+      #empty vector for plot indices
+      seq.matrix<- vector(mode="integer", length=4*n.plots)
+
+      #fill vector with plot indices in correct order
+      cnt<- 1
+      seq<- seq(1,length(seq.matrix),4)
+      for(i in seq) {
+        seq.matrix[i]<- cnt
+        seq.matrix[i+1]<- cnt
+        seq.matrix[i+2]<- if(plot.proportions==TRUE){ID.plot.two}else{cnt}
+        seq.matrix[i+3]<- ID.plot.three
+
+        cnt<- cnt+1
+      }
+
+      # create device layout
+      layout(matrix(c(seq.matrix),4,n.plots))
+
+      # outer margins (bottom, left, top, right)
+      par(oma=c(2.5,5,3,5))
+
+      # general plot parameters (global scaling, allow overplotting)
+      par(cex = 0.8, xpd = NA)
+
+      # define color palette for prettier output
+      if(pdf.colors == "colors") {
+        col.n<- c("red3", "slateblue3", "seagreen", "tan3", "yellow3",
+                  "burlywood4", "magenta4", "mediumpurple3", "brown4","grey",
+                  "aquamarine")
+        poly.border<- FALSE
+      }
+      if(pdf.colors == "gray" || pdf.colors == "grey") {
+        col.n<- gray.colors(length(n.components)*2)
+        poly.border<- FALSE
+      }
+      if(pdf.colors == "none") {
+        col.n<- NULL
+        poly.border<- TRUE
+      }
+
+      ##--------------------------------------------------------------------------
+      ## PLOT 1: EQUIVALENT DOSES OF COMPONENTS
+
+      ## create empty plot without x-axis
+      for(i in 1:n.plots) {
+
+        pos.n<- seq(from = 1, to = n.components[i]*3, by = 3)
+
+        # set margins (bottom, left, top, right)
+        par(mar=c(1,0,2,0))
+
+        # empty plot area
+        plot(NA, NA,
+             xlim=c(min(n.components)-0.2, max(n.components)+0.2),
+             ylim=c(min(comp.n[pos.n,]-comp.n[pos.n+1,], na.rm = TRUE),
+                    max((comp.n[pos.n,]+comp.n[pos.n+1,])*1.1, na.rm = TRUE)),
+             ylab="",
+             xaxt="n",
+             yaxt="n",
+             xlab="")
+
+        # add text in upper part of the plot ("k = 1,2..n")
+        mtext(bquote(italic(k) == .(n.components[i])),
+              side = 3, line = -2, cex=0.8)
+
+        # add y-axis label (only for the first plot)
+        if(i==1) {
+          mtext(expression(paste("D"[e]," [Gy]")), side=2,line=2.7, cex=1)
+        }
+
+        # empty list to store normal distribution densities
+        sapply.storage<- list()
+
+        ## NORMAL DISTR. OF EACH COMPONENT
+        options(warn=-1) #supress warnings for NA values
+
+        # LOOP - iterate over number of components
+        for(j in 1:max(n.components)) {
+
+          # draw random values of the ND to check for NA values
+          comp.nd.n<- sort(rnorm(n = length(object at data$data[,1]),
+                                 mean = comp.n[pos.n[j],i],
+                                 sd = comp.n[pos.n[j]+1,i]))
+
+          # proceed if no NA values occured
+          if(length(comp.nd.n)!=0) {
+
+            # weight - proportion of the component
+            wi<- comp.n[pos.n[j]+2,i]
+
+            # calculate density values with(out) weights
+            fooX<- function(x) {
+              dnorm(x, mean = comp.n[pos.n[j],i],
+                    sd = if(pdf.sigma=="se"){comp.n[pos.n[j]+1,i]}
+                    else{if(pdf.sigma=="sigmab"){comp.n[pos.n[j],i]*sigmab}}
+              )*
+                if(pdf.weight==TRUE){wi}else{1}
+            }
+
+            # x-axis scaling - determine highest dose in first cycle
+            if(i==1 && j==1){
+              max.dose<- max(object at data$data[,1])+sd(object at data$data[,1])/2
+              min.dose<- min(object at data$data[,1])-sd(object at data$data[,1])/2
+
+              # density function to determine y-scaling if no weights are used
+              fooY<- function(x) {
+                dnorm(x, mean = na.exclude(comp.n[pos.n,]),
+                      sd = na.exclude(comp.n[pos.n+1,]))
+              }
+              # set y-axis scaling
+              dens.max<-max(sapply(0:max.dose, fooY))
+            }##EndOfIf::first cycle settings
+
+
+            # override y-axis scaling if weights are used
+            if(pdf.weight==TRUE){
+              sapply.temp<- list()
+              for(b in 1:max(n.components)){
+
+                # draw random values of the ND to check for NA values
+                comp.nd.n<- sort(rnorm(n = length(object at data$data[,1]),
+                                       mean = comp.n[pos.n[b],i],
+                                       sd = comp.n[pos.n[b]+1,i]))
+
+                # proceed if no NA values occured
+                if(length(comp.nd.n)!=0) {
+
+                  # weight - proportion of the component
+                  wi.temp<- comp.n[pos.n[b]+2,i]
+
+                  fooT<- function(x) {
+                    dnorm(x, mean = comp.n[pos.n[b],i],
+                          sd = if(pdf.sigma=="se"){comp.n[pos.n[b]+1,i]}
+                          else{if(pdf.sigma=="sigmab"){comp.n[pos.n[b],i]*sigmab}}
+                    )*wi.temp
+                  }
+                  sapply.temp[[b]]<- sapply(0:max.dose, fooT)
+                }
+              }
+              dens.max<- max(Reduce('+', sapply.temp))
+            }
+
+            # calculate density values for 0 to maximum dose
+            sapply<- sapply(0:max.dose, fooX)
+
+            # save density values in list for sum curve of gaussians
+            sapply.storage[[j]]<- sapply
+
+            ## determine axis scaling
+            # x-axis (dose)
+            if("dose.scale" %in% names(extraArgs)) {
+              y.scale<- extraArgs$dose.scale
+            } else {
+              y.scale<- c(min.dose,max.dose)
+            }
+            # y-axis (density)
+            if("pdf.scale" %in% names(extraArgs)) {
+              x.scale<- extraArgs$pdf.scale
+            } else {
+              x.scale<- dens.max*1.1
+            }
+
+            ## PLOT Normal Distributions
+            par(new=TRUE)
+            plot(sapply, 1:length(sapply)-1,
+                 type="l", yaxt="n", xaxt="n", col=col.n[j], lwd=1,
+                 ylim=y.scale,
+                 xlim=c(0,x.scale),
+                 xaxs="i", yaxs="i",
+                 ann=FALSE, xpd = FALSE)
+
+            # draw colored polygons under curve
+            polygon(x=c(min(sapply), sapply,  min(sapply)),
+                    y=c(0, 0:max.dose,  0),
+                    col = adjustcolor(col.n[j], alpha.f = 0.66),
+                    yaxt="n", border=poly.border, xpd = FALSE, lty = 2, lwd = 1.5)
+
+          }
+        }##EndOf::Component loop
+
+        #turn warnings on again
+        options(warn=0)
+
+        # Add sum of gaussians curve
+        par(new = TRUE)
+
+        plot(Reduce('+', sapply.storage),1:length(sapply)-1,
+             type="l", yaxt="n", xaxt="n", col="black",
+             lwd=1.5, lty = 1,
+             ylim=y.scale,
+             xlim=c(0,x.scale),
+             xaxs="i", yaxs="i", ann=FALSE, xpd = FALSE)
+
+        # draw additional info during first k-cycle
+        if(i == 1) {
+
+          # plot title
+          mtext("Normal distributions",
+                side = 3, font = 2, line = 0, adj = 0, cex = 0.8)
+
+          # main title
+          mtext(main,
+                side = 3, font = 2, line = 3.5, adj = 0.5,
+                at = grconvertX(0.5, from = "ndc", to = "user"))
+
+          # subtitle
+          mtext(as.expression(bquote(italic(sigma[b]) == .(sigmab) ~
+                                       "|" ~ n == .(length(object at data$data[,1])))),
+                side = 3, font = 1, line = 2.2, adj = 0.5,
+                at = grconvertX(0.5, from = "ndc", to = "user"), cex = 0.9)
+
+          # x-axis label
+          mtext("Density [a.u.]",
+                side = 1, line = 0.5, adj = 0.5,
+                at = grconvertX(0.5, from = "ndc", to = "user"))
+
+          # draw y-axis with proper labels
+          axis(side=2, labels = TRUE)
+        }
+
+        if(pdf.colors == "colors") {
+          # create legend labels
+          dose.lab.legend<- paste("c", 1:n.components[length(n.components)], sep="")
+
+          if(max(n.components)>8) {
+            ncol.temp<- 8
+            yadj<- 1.025
+          } else {
+            ncol.temp<- max(n.components)
+            yadj<- 0.93
+          }
+
+          # add legend
+          if(i==n.plots) {
+            legend(grconvertX(0.55, from = "ndc", to = "user"),
+                   grconvertY(yadj, from = "ndc", to = "user"),
+                   legend = dose.lab.legend,
+                   col = col.n[1:max(n.components)],
+                   pch = 15, adj = c(0,0.2), pt.cex=1.4,
+                   bty = "n", ncol=ncol.temp, x.intersp=0.4)
+
+            mtext("Components: ", cex = 0.8,
+                  at = grconvertX(0.5, from = "ndc", to = "user"))
+          }
+        }
+
+      }##EndOf::k-loop and Plot 1
+
+      ##--------------------------------------------------------------------------
+      ## PLOT 2: PROPORTION OF COMPONENTS
+      if(plot.proportions==TRUE) {
+        # margins for second plot
+        par(mar=c(2,0,2,0))
+
+        # create matrix with proportions from a subset of the summary matrix
+        prop.matrix<- comp.n[pos.n+2,]*100
+
+        # stacked barplot of proportions without x-axis
+        barplot(prop.matrix,
+                width=1,
+                xlim=c(0.2, length(n.components)-0.2),
+                ylim=c(0,100),
+                axes=TRUE,
+                space=0,
+                col=col.n,
+                xpd=FALSE,
+                xaxt="n")
+
+        # y-axis label
+        mtext("Proportion [%]",
+              side=2,line=3, cex=1)
+
+
+        # add x-axis with corrected tick positions
+        axis(side = 1, labels = n.components, at = n.components+0.5-n.components[1])
+
+        # draw a box (not possible with barplot())
+        box(lty=1, col="black")
+
+        # add subtitle
+        mtext("Proportion of components",
+              side = 3, font = 2, line = 0, adj = 0, cex = 0.8)
+
+      }
+      ##--------------------------------------------------------------------------
+      ## PLOT 3: BIC & LLIK
+
+      # margins for third plot
+      par(mar=c(2,0,2,0))
+
+      # prepare scaling for both y-axes
+      BIC.scale<- c(min(BIC.n)*if(min(BIC.n)<0){1.2}else{0.8},
+                    max(BIC.n)*if(max(BIC.n)<0){0.8}else{1.2})
+      LLIK.scale<- c(min(LLIK.n)*if(min(LLIK.n)<0){1.2}else{0.8},
+                     max(LLIK.n)*if(max(LLIK.n)<0){0.8}else{1.2})
+
+      # plot BIC scores
+      plot(n.components, BIC.n,
+           main= "",
+           type="b",
+           pch=22,
+           cex=1.5,
+           xlim=c(min(n.components)-0.2, max(n.components)+0.2),
+           ylim=BIC.scale,
+           xaxp=c(min(n.components), max(n.components), length(n.components)-1),
+           xlab=expression(paste(italic(k), " Components")),
+           ylab=expression(paste("BIC")),
+           cex.lab=1.25)
+
+      # following plot should be added to previous
+      par(new = TRUE)
+
+      # plot LLIK estimates
+      plot(n.components, LLIK.n,
+           xlim=c(min(n.components)-0.2, max(n.components)+0.2),
+           xaxp=c(min(n.components), max(n.components), length(n.components)-1),
+           ylim=LLIK.scale,
+           yaxt="n", type="b", pch=16, xlab="", ylab="", lty=2, cex = 1.5)
+
+      # subtitle
+      mtext("Statistical criteria",
+            side = 3, font = 2, line = 0, adj = 0, cex = 0.8)
+
+      # second y-axis with proper scaling
+      axis(side = 4, ylim=c(0,100))
+
+      # LLIK axis label
+      mtext(bquote(italic(L)[max]),
+            side=4,line=3, cex=1.3)
+
+      # legend
+      legend(grconvertX(0.75, from = "nfc", to = "user"),
+             grconvertY(0.96, from = "nfc", to = "user"),
+             legend = c("BIC", as.expression(bquote(italic(L)[max]))),
+             pch = c(22,16), pt.bg=c("white","black"),
+             adj = 0, pt.cex=1.3, lty=c(1,2),
+             bty = "n", horiz = TRUE, x.intersp=0.5)
+
+
+      ## restore previous plot parameters
+      par(.pardefault)
+    }
+  }##EndOf::Case 4 - Finite Mixture Model
+
+  #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
+  ## CASE 5: Aliquot Size
+  if(object at originator=="calc_AliquotSize") {
+    if(object at data$args$MC == TRUE) {
+
+      extraArgs <- list(...)
+
+      main<- if("main" %in% names(extraArgs)) { extraArgs$main } else { "Monte Carlo Simulation"  }
+      xlab<- if("xlab" %in% names(extraArgs)) { extraArgs$xlab } else { "Amount of grains on aliquot" }
+
+      # extract relevant data
+      MC.n<- object at data$MC$estimates
+      MC.n.kde<- object at data$MC$kde
+      MC.stats<- object at data$MC$statistics
+      MC.q<- object at data$MC$quantile
+      MC.iter<- object at data$args$MC.iter
+
+      # set layout of plotting device
+      layout(matrix(c(1,1,2)),2,1)
+      par(cex = 0.8)
+
+      ## plot MC estimate distribution
+
+      # set margins (bottom, left, top, right)
+      par(mar=c(2,5,5,3))
+
+      # plot histogram
+      hist(MC.n, freq=FALSE, col = "gray90",
+           main="", xlab=xlab,
+           xlim = c(min(MC.n)*0.95, max(MC.n)*1.05),
+           ylim = c(0, max(MC.n.kde$y)*1.1))
+
+      # add rugs to histogram
+      rug(MC.n)
+
+      # add KDE curve
+      lines(MC.n.kde, col = "black", lwd = 1)
+
+      # add mean, median and quantils (0.05,0.95)
+      abline(v=c(MC.stats$mean, MC.stats$median, MC.q),
+             lty=c(2, 4, 3,3), lwd = 1)
+
+      # add main- and subtitle
+      mtext(main, side = 3, adj = 0.5,
+            line = 3, cex = 1)
+      mtext(as.expression(bquote(italic(n) == .(MC.iter) ~ "|" ~
+                                   italic(hat(mu)) == .(round(MC.stats$mean)) ~ "|" ~
+                                   italic(hat(sigma))  == .(round(MC.stats$sd.abs)) ~ "|" ~
+                                   italic(frac(hat(sigma),sqrt(n))) == .(round(MC.stats$se.abs))  ~ "|" ~
+                                   italic(v) == .(round(MC.stats$skewness, 2))
+      )
+      ),
+      side = 3, line = 0.3, adj = 0.5,
+      cex = 0.9)
+
+      # add legend
+      legend("topright", legend = c("mean","median", "0.05 / 0.95 quantile"),
+             lty = c(2, 4, 3), bg = "white", box.col = "white", cex = 0.9)
+
+      ## BOXPLOT
+      # set margins (bottom, left, top, right)
+      par(mar=c(5,5,0,3))
+
+      plot(NA, type="n", xlim=c(min(MC.n)*0.95, max(MC.n)*1.05),
+           xlab=xlab,  ylim=c(0.5,1.5),
+           xaxt="n", yaxt="n", ylab="")
+      par(bty="n")
+      boxplot(MC.n, horizontal = TRUE, add = TRUE, bty="n")
+    }
+  }#EndOf::Case 5 - calc_AliqoutSize()
+
+  #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
+  ## CASE 6: calc_SourceDoseRate()
+  if(object at originator=="calc_SourceDoseRate") {
+
+    ##prepare data
+    ##get data
+    df <- get_RLum(object = object, data.object = "dose.rate")
+
+    ##reduce the size for plotting, more than 100 points makes no sense
+    if(nrow(df)>100) {
+      df <- df[seq(1,nrow(df), length = 100),]
+
+    }
+
+
+    ##plot settings
+    plot.settings <- list(
+      main = "Source Dose Rate Prediction",
+      xlab = "Date",
+      ylab = paste0(
+        "Dose rate/(",get_RLum(object = object, data.object = "parameters")$dose.rate.unit,")"),
+      log = "",
+      cex = 1,
+      xlim = NULL,
+      ylim = c(min(df[,1]) - max(df[,2]), max(df[,1]) + max(df[,2])),
+      pch = 1,
+      mtext = paste0(
+        "source type: ", get_RLum(object = object, data.object = "parameters")$source.type,
+        " | ",
+        "half-life: ", get_RLum(object = object, data.object = "parameters")$halflife,
+        " a"
+      ),
+      grid = expression(nx = 10, ny = 10),
+      col = 1,
+      type = "b",
+      lty = 1,
+      lwd = 1,
+      segments = ""
+    )
+
+    ##modify list if something was set
+    plot.settings <- modifyList(plot.settings, list(...))
+
+
+    ##plot
+    plot(
+      df[,3], df[,1],
+      main = plot.settings$main,
+      xlab = plot.settings$xlab,
+      ylab = plot.settings$ylab,
+      xlim = plot.settings$xlim,
+      ylim = plot.settings$ylim,
+      log = plot.settings$log,
+      pch = plot.settings$pch,
+      col = plot.settings$pch,
+      type = plot.settings$type,
+      lty = plot.settings$lty,
+      lwd = plot.settings$lwd
+    )
+
+    if(!is.null(plot.settings$segments)){
+      segments(
+        x0 = df[,3], y0 = df[,1] + df[,2],
+        x1 = df[,3], y1 = df[,1] - df[,2]
+      )
+    }
+
+    mtext(side = 3, plot.settings$mtext)
+
+    if(!is.null(plot.settings$grid)){
+      grid(eval(plot.settings$grid))
+
+    }
+
+  }#EndOf::Case 6 - calc_SourceDoseRate()
+
+  #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
+  ## CASE 7: Fast Ratio
+  if (object at originator=="calc_FastRatio") {
+
+    # graphical settings
+    settings <- list(main = "Fast Ratio",
+                     xlab = "t/s",
+                     ylab = "Signal/cts",
+                     cex = 1.0)
+    settings <- modifyList(settings, list(...))
+
+    par(cex = settings$cex)
+
+    # fetch data from RLum.Results object
+    curve <- get_RLum(object, "data")
+    if (inherits(curve, "RLum.Data.Curve"))
+      curve <- get_RLum(curve)
+    res <- get_RLum(object, "summary")
+    fit <- get_RLum(object, "fit")
+
+    # calculate the dead channel time offset
+    offset <- res$dead.channels.start * res$channel.width
+
+    # plot the OSL curve
+    plot(curve, type = "p", main = settings$main,
+         xlab = settings$xlab, ylab = settings$ylab)
+
+    # plot points to show measured data points (i.e., the channels)
+    points(curve[(res$dead.channels.start + 1):(nrow(curve) - res$dead.channels.end),],
+           pch = 16)
+
+    # plot dead channels as empty circles
+    if (res$dead.channels.start > 0)
+      points(curve[1:res$dead.channels.start,])
+    if (res$dead.channels.end > 0)
+      points(curve[(nrow(curve) - res$dead.channels.end):nrow(curve), ])
+
+    # optional: plot fitted CW curve
+    if (!is.null(fit)) {
+      nls.fit <- get_RLum(fit, "fit")
+      if (!inherits(fit, "try-error") & "fitCW.curve" %in% names(object at data$args)) {
+        if (object at data$args$fitCW.curve == "T" | object at data$args$fitCW.curve == TRUE) {
+          lines(curve[(res$dead.channels.start + 1):(nrow(curve) - res$dead.channels.end), 1],
+                predict(nls.fit), col = "red", lty = 1)
+        }
+      }
+    }
+
+    lines(curve)
+
+
+    # add vertical lines and labels for L1, L2, L3
+    L_times <- c(curve[res$Ch_L1, 1],
+                 curve[res$Ch_L2, 1],
+                 curve[res$Ch_L3_start, 1],
+                 curve[res$Ch_L3_end, 1]) + offset
+    abline(v = L_times,
+           lty = 2)
+    text(L_times, max(curve[ ,2]) * 0.95, pos = c(4,4,2,2),
+         labels = expression('L'[1], 'L'[2], 'L'[3['start']], 'L'[3['end']]))
+
+  }#EndOf::Case7 - calc_FastRatio()
+}
diff --git a/R/plot_RadialPlot.R b/R/plot_RadialPlot.R
new file mode 100644
index 0000000..21671b5
--- /dev/null
+++ b/R/plot_RadialPlot.R
@@ -0,0 +1,1552 @@
+#' Function to create a Radial Plot
+#'
+#' A Galbraith's radial plot is produced on a logarithmic or a linear scale.
+#'
+#' Details and the theoretical background of the radial plot are given in the
+#' cited literature. This function is based on an S script of Rex Galbraith. To
+#' reduce the manual adjustments, the function has been rewritten. Thanks to
+#' Rex Galbraith for useful comments on this function. \cr Plotting can be
+#' disabled by adding the argument \code{plot = "FALSE"}, e.g. to return only
+#' numeric plot output.\cr
+#'
+#' Earlier versions of the Radial Plot in this package had the 2-sigma-bar
+#' drawn onto the z-axis. However, this might have caused misunderstanding in
+#' that the 2-sigma range may also refer to the z-scale, which it does not!
+#' Rather it applies only to the x-y-coordinate system (standardised error vs.
+#' precision). A spread in doses or ages must be drawn as lines originating at
+#' zero precision (x0) and zero standardised estimate (y0). Such a range may be
+#' drawn by adding lines to the radial plot ( \code{line}, \code{line.col},
+#' \code{line.label}, cf. examples).\cr\cr
+#'
+#' A statistic summary, i.e. a collection of statistic measures of
+#' centrality and dispersion (and further measures) can be added by specifying
+#' one or more of the following keywords: \code{"n"} (number of samples),
+#' \code{"mean"} (mean De value), \code{"mean.weighted"} (error-weighted mean),
+#' \code{"median"} (median of the De values), \code{"sdrel"} (relative standard
+#' deviation in percent), \code{"sdrel.weighted"} (error-weighted relative
+#' standard deviation in percent), \code{"sdabs"} (absolute standard deviation),
+#' \code{"sdabs.weighted"} (error-weighted absolute standard deviation),
+#' \code{"serel"} (relative standard error), \code{"serel.weighted"} (
+#' error-weighted relative standard error), \code{"seabs"} (absolute standard
+#' error), \code{"seabs.weighted"} (error-weighted absolute standard error),
+#' \code{"in.2s"} (percent of samples in 2-sigma range),
+#' \code{"kurtosis"} (kurtosis) and \code{"skewness"} (skewness).
+#'
+#' @param data \code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+#' object (required): for \code{data.frame} two columns: De (\code{data[,1]})
+#' and De error (\code{data[,2]}). To plot several data sets in one plot, the
+#' data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.
+#' @param na.rm \code{\link{logical}} (with default): excludes \code{NA}
+#' values from the data set prior to any further operations.
+#' @param negatives \code{\link{character}} (with default): rule for negative
+#' values. Default is \code{"remove"} (i.e. negative values are removed from
+#' the data set).
+#' @param log.z \code{\link{logical}} (with default): Option to display the
+#' z-axis in logarithmic scale. Default is \code{TRUE}.
+#' @param central.value \code{\link{numeric}}: User-defined central value,
+#' primarily used for horizontal centering of the z-axis.
+#' @param centrality \code{\link{character}} or \code{\link{numeric}} (with
+#' default): measure of centrality, used for automatically centering the plot
+#' and drawing the central line. Can either be one out of \code{"mean"},
+#' \code{"median"}, \code{"mean.weighted"} and \code{"median.weighted"} or a
+#' numeric value used for the standardisation.
+#' @param mtext \code{\link{character}}: additional text below the plot title.
+#' @param summary \code{\link{character}} (optional): add statistic measures of
+#' centrality and dispersion to the plot. Can be one or more of several
+#' keywords. See details for available keywords.
+#' @param summary.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+#' specified to place the summary below the plot header. However, this latter
+#' option is only possible if \code{mtext} is not used.
+#' @param legend \code{\link{character}} vector (optional): legend content to
+#' be added to the plot.
+#' @param legend.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position coordinates or keyword (e.g. \code{"topright"})
+#' for the legend to be plotted.
+#' @param stats \code{\link{character}}: additional labels of statistically
+#' important values in the plot. One or more out of the following:
+#' \code{"min"}, \code{"max"}, \code{"median"}.
+#' @param rug \code{\link{logical}}: Option to add a rug to the z-scale, to
+#' indicate the location of individual values
+#' @param plot.ratio \code{\link{numeric}}: User-defined plot area ratio (i.e.
+#' curvature of the z-axis). If omitted, the default value (\code{4.5/5.5}) is
+#' used and modified automatically to optimise the z-axis curvature. The
+#' parameter should be decreased when data points are plotted outside the
+#' z-axis or when the z-axis gets too elliptic.
+#' @param bar.col \code{\link{character}} or \code{\link{numeric}} (with
+#' default): colour of the bar showing the 2-sigma range around the central
+#' value. To disable the bar, use \code{"none"}. Default is \code{"grey"}.
+#' @param y.ticks \code{\link{logical}}: Option to hide y-axis labels. Useful
+#' for data with small scatter.
+#' @param grid.col \code{\link{character}} or \code{\link{numeric}} (with
+#' default): colour of the grid lines (originating at [0,0] and stretching to
+#' the z-scale). To disable grid lines, use \code{"none"}. Default is
+#' \code{"grey"}.
+#' @param line \code{\link{numeric}}: numeric values of the additional lines to
+#' be added.
+#' @param line.col \code{\link{character}} or \code{\link{numeric}}: colour of
+#' the additional lines.
+#' @param line.label \code{\link{character}}: labels for the additional lines.
+#' @param output \code{\link{logical}}: Optional output of numerical plot
+#' parameters. These can be useful to reproduce similar plots. Default is
+#' \code{FALSE}.
+#' @param \dots Further plot arguments to pass. \code{xlab} must be a vector of
+#' length 2, specifying the upper and lower x-axes labels.
+#' @return Returns a plot object.
+#' @section Function version: 0.5.3
+#' @author Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+#' IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Based on a rewritten
+#' S script of Rex Galbraith, 2010
+#' @seealso \code{\link{plot}}, \code{\link{plot_KDE}},
+#' \code{\link{plot_Histogram}}
+#' @references Galbraith, R.F., 1988. Graphical Display of Estimates Having
+#' Differing Standard Errors. Technometrics, 30 (3), 271-281.
+#'
+#' Galbraith, R.F., 1990. The radial plot: Graphical assessment of spread in
+#' ages. International Journal of Radiation Applications and Instrumentation.
+#' Part D. Nuclear Tracks and Radiation Measurements, 17 (3), 207-214.
+#'
+#' Galbraith, R. & Green, P., 1990. Estimating the component ages in a finite
+#' mixture. International Journal of Radiation Applications and
+#' Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3)
+#' 197-206.
+#'
+#' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
+#' track ages. Nuclear Tracks And Radiation Measurements, 21 (4), 459-470.
+#'
+#' Galbraith, R.F., 1994. Some Applications of Radial Plots. Journal of the
+#' American Statistical Association, 89 (428), 1232-1242.
+#'
+#' Galbraith, R.F., 2010. On plotting OSL equivalent doses. Ancient TL, 28 (1),
+#' 1-10.
+#'
+#' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent
+#' dose and error calculation and display in OSL dating: An overview and some
+#' recommendations. Quaternary Geochronology, 11, 1-27.
+#' @examples
+#'
+#' ## load example data
+#' data(ExampleData.DeValues, envir = environment())
+#' ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+#'
+#' ## plot the example data straightforward
+#' plot_RadialPlot(data = ExampleData.DeValues)
+#'
+#' ## now with linear z-scale
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 log.z = FALSE)
+#'
+#' ## now with output of the plot parameters
+#' plot1 <- plot_RadialPlot(data = ExampleData.DeValues,
+#'                          log.z = FALSE,
+#'                          output = TRUE)
+#' plot1
+#' plot1$zlim
+#'
+#' ## now with adjusted z-scale limits
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                log.z = FALSE,
+#'                zlim = c(100, 200))
+#'
+#' ## now the two plots with serious but seasonally changing fun
+#' #plot_RadialPlot(data = data.3, fun = TRUE)
+#'
+#' ## now with user-defined central value, in log-scale again
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 central.value = 150)
+#'
+#' ## now with a rug, indicating individual De values at the z-scale
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 rug = TRUE)
+#'
+#' ## now with legend, colour, different points and smaller scale
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 legend.text = "Sample 1",
+#'                 col = "tomato4",
+#'                 bar.col = "peachpuff",
+#'                 pch = "R",
+#'                 cex = 0.8)
+#'
+#' ## now without 2-sigma bar, y-axis, grid lines and central value line
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 bar.col = "none",
+#'                 grid.col = "none",
+#'                 y.ticks = FALSE,
+#'                 lwd = 0)
+#'
+#' ## now with user-defined axes labels
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 xlab = c("Data error (%)",
+#'                          "Data precision"),
+#'                 ylab = "Scatter",
+#'                 zlab = "Equivalent dose [Gy]")
+#'
+#' ## now with minimum, maximum and median value indicated
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 central.value = 150,
+#'                 stats = c("min", "max", "median"))
+#'
+#' ## now with a brief statistical summary
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 summary = c("n", "in.2s"))
+#'
+#' ## now with another statistical summary as subheader
+#' plot_RadialPlot(data = ExampleData.DeValues,
+#'                 summary = c("mean.weighted", "median"),
+#'                 summary.pos = "sub")
+#'
+#' ## now the data set is split into sub-groups, one is manipulated
+#' data.1 <- ExampleData.DeValues[1:15,]
+#' data.2 <- ExampleData.DeValues[16:25,] * 1.3
+#'
+#' ## now a common dataset is created from the two subgroups
+#' data.3 <- list(data.1, data.2)
+#'
+#' ## now the two data sets are plotted in one plot
+#' plot_RadialPlot(data = data.3)
+#'
+#' ## now with some graphical modification
+#' plot_RadialPlot(data = data.3,
+#'                 col = c("darkblue", "darkgreen"),
+#'                 bar.col = c("lightblue", "lightgreen"),
+#'                 pch = c(2, 6),
+#'                 summary = c("n", "in.2s"),
+#'                 summary.pos = "sub",
+#'                 legend = c("Sample 1", "Sample 2"))
+#'
+#' @export
+plot_RadialPlot <- function(
+  data,
+  na.rm = TRUE,
+  negatives = "remove",
+  log.z = TRUE,
+  central.value,
+  centrality = "mean.weighted",
+  mtext,
+  summary,
+  summary.pos,
+  legend,
+  legend.pos,
+  stats,
+  rug = FALSE,
+  plot.ratio,
+  bar.col,
+  y.ticks = TRUE,
+  grid.col,
+  line,
+  line.col,
+  line.label,
+  output = FALSE,
+  ...
+) {
+  ## Homogenise input data format
+  if(is(data, "list") == FALSE) {data <- list(data)}
+
+  ## Check input data
+  for(i in 1:length(data)) {
+    if(is(data[[i]], "RLum.Results") == FALSE &
+         is(data[[i]], "data.frame") == FALSE) {
+      stop(paste("[plot_RadialPlot] Error: Input data format is neither",
+                 "'data.frame' nor 'RLum.Results'"))
+    } else {
+      if(is(data[[i]], "RLum.Results") == TRUE) {
+        data[[i]] <- get_RLum(data[[i]], "data")
+      }
+    }
+  }
+
+  ## check data and parameter consistency--------------------------------------
+  if(missing(stats) == TRUE) {stats <- numeric(0)}
+  if(missing(summary) == TRUE) {
+    summary <- c("n", "in.2s")
+  }
+
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- "sub"
+  }
+  if(missing(bar.col) == TRUE) {bar.col <- rep("grey80", length(data))}
+  if(missing(grid.col) == TRUE) {grid.col <- rep("grey70", length(data))}
+  if(missing(summary) == TRUE) {summary <- NULL}
+  if(missing(summary.pos) == TRUE) {summary.pos <- "topleft"}
+  if(missing(mtext) == TRUE) {mtext <- ""}
+
+
+  ## check z-axis log-option for grouped data sets
+  if(is(data, "list") == TRUE & length(data) > 1 & log.z == FALSE) {
+    warning(paste("Option 'log.z' is not set to 'TRUE' altough more than one",
+                  "data set (group) is provided."))
+  }
+
+  ## optionally, remove NA-values
+  if(na.rm == TRUE) {
+    for(i in 1:length(data)) {
+      data[[i]] <- na.exclude(data[[i]])
+    }
+  }
+
+  ## create preliminary global data set
+  De.global <- data[[1]][,1]
+  if(length(data) > 1) {
+    for(i in 2:length(data)) {
+      De.global <- c(De.global, data[[i]][,1])
+    }
+  }
+
+  ## calculate major preliminary tick values and tick difference
+  extraArgs <- list(...)
+  if("zlim" %in% names(extraArgs)) {
+    limits.z <- extraArgs$zlim
+  } else {
+    z.span <- (mean(De.global) * 0.5) / (sd(De.global) * 100)
+    z.span <- ifelse(z.span > 1, 0.9, z.span)
+    limits.z <- c((ifelse(min(De.global) <= 0, 1.1, 0.9) - z.span) * min(De.global),
+                  (1.1 + z.span) * max(De.global))
+  }
+
+  ticks <- round(pretty(limits.z, n = 5), 3)
+  De.delta <- ticks[2] - ticks[1]
+
+#   ## calculate correction dose to shift negative values
+#   if(min(De.global) <= 0) {
+#     De.add <- abs(ticks[length(ticks) - sum(ticks > limits.z[1])])
+#   } else {De.add <- 0}
+
+  ## optionally, reassign De.add to remove negative values
+  if(negatives == "remove") {
+    De.add <- 0
+
+    for(i in 1:length(data)) {
+      data.test <- data[[i]][,1] <= 0
+      data[[i]] <- data[[i]][!data.test,]
+      data.negative <- paste(seq(1, length(data.test))[data.test == TRUE],
+                             collapse = ", ")
+      if(sum(data.test) > 0) {
+        warning(paste("The following lines contain zero or negative values: ",
+                      data.negative,
+                      ".",
+                      sep = ""))
+      }
+    }
+  }
+
+  ## optionally add correction dose to data set and adjust error
+  for(i in 1:length(data)) {
+    data[[i]][,1] <- data[[i]][,1] + De.add
+    data[[i]][,2] <- data[[i]][,2] * data[[i]][,1] / abs(data[[i]][,1] - De.add)
+  }
+
+  ## calculate major preliminary tick values and tick difference
+  extraArgs <- list(...)
+  if("zlim" %in% names(extraArgs)) {
+    limits.z <- extraArgs$zlim
+  } else {
+    z.span <- (mean(De.global) * 0.5) / (sd(De.global) * 100)
+    z.span <- ifelse(z.span > 1, 0.9, z.span)
+    limits.z <- c((ifelse(min(De.global) <= 0, 1.1, 0.9) - z.span) * min(De.global),
+                  (1.1 + z.span) * max(De.global))
+  }
+  ticks <- round(pretty(limits.z, n = 5), 3)
+  De.delta <- ticks[2] - ticks[1]
+
+  ## calculate correction dose to shift negative values
+  if(min(De.global) <= 0) {
+    De.add <- abs(ticks[length(ticks) - sum(ticks > limits.z[1])])
+  } else {De.add <- 0}
+
+  if(negatives == "remove") {
+    De.add <- 0
+  }
+  ## optionally add correction dose to data set and adjust error
+  for(i in 1:length(data)) {
+    data[[i]][,1] <- data[[i]][,1] + De.add
+    data[[i]][,2] <- data[[i]][,2] * data[[i]][,1] / abs(data[[i]][,1] - De.add)
+  }
+
+  ## adjust limits.z
+  limits.z <- limits.z + 2 * De.add
+
+  ## calculate and append statistical measures --------------------------------
+
+  ## z-values based on log-option
+  z <- sapply(1:length(data), function(x){
+    if(log.z == TRUE) {log(data[[x]][,1])} else {data[[x]][,1]}})
+  if(is(z, "list") == FALSE) {z <- list(z)}
+  data <- lapply(1:length(data), function(x) {
+     cbind(data[[x]], z[[x]])})
+  rm(z)
+
+  ## calculate se-values based on log-option
+  se <- sapply(1:length(data), function(x){
+    if(log.z == TRUE) {data[[x]][,2] / data[[x]][,1]} else {data[[x]][,2]}})
+  if(is(se, "list") == FALSE) {se <- list(se)}
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], se[[x]])})
+  rm(se)
+
+  ## calculate central values
+  if(centrality[1] == "mean") {
+    z.central <- lapply(1:length(data), function(x){
+      rep(mean(data[[x]][,3], na.rm = TRUE), length(data[[x]][,3]))})
+  } else if(centrality[1] == "median") {
+    z.central <- lapply(1:length(data), function(x){
+      rep(median(data[[x]][,3], na.rm = TRUE), length(data[[x]][,3]))})
+  } else  if(centrality[1] == "mean.weighted") {
+    z.central <- lapply(1:length(data), function(x){
+      sum(data[[x]][,3] / data[[x]][,4]^2) /
+        sum(1 / data[[x]][,4]^2)})
+  } else if(centrality[1] == "median.weighted") {
+    ## define function after isotone::weighted.median
+    median.w <- function (y, w)
+    {
+      ox <- order(y)
+      y <- y[ox]
+      w <- w[ox]
+      k <- 1
+      low <- cumsum(c(0, w))
+      up <- sum(w) - low
+      df <- low - up
+      repeat {
+        if (df[k] < 0)
+          k <- k + 1
+        else if (df[k] == 0)
+          return((w[k] * y[k] + w[k - 1] * y[k - 1]) / (w[k] + w[k - 1]))
+        else return(y[k - 1])
+      }
+    }
+    z.central <- lapply(1:length(data), function(x){
+      rep(median.w(y = data[[x]][,3],
+                   w = data[[x]][,4]), length(data[[x]][,3]))})
+  } else if(is.numeric(centrality) == TRUE &
+              length(centrality) == length(data)) {
+    z.central.raw <- if(log.z == TRUE) {
+      log(centrality)
+    } else {
+      centrality
+    }
+    z.central <- lapply(1:length(data), function(x){
+      rep(z.central.raw[x], length(data[[x]][,3]))})
+  } else if(is.numeric(centrality) == TRUE &
+              length(centrality) > length(data)) {
+    z.central <- lapply(1:length(data), function(x){
+      rep(median(data[[x]][,3], na.rm = TRUE), length(data[[x]][,3]))})
+  } else {
+    stop("Measure of centrality not supported!")
+  }
+
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], z.central[[x]])})
+  rm(z.central)
+
+  ## calculate precision
+  precision <- sapply(1:length(data), function(x){
+    1 / data[[x]][,4]})
+  if(is(precision, "list") == FALSE) {precision <- list(precision)}
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], precision[[x]])})
+  rm(precision)
+
+  ## calculate standard estimate
+  std.estimate <- sapply(1:length(data), function(x){
+    (data[[x]][,3] - data[[x]][,5]) / data[[x]][,4]})
+  if(is(std.estimate, "list") == FALSE) {std.estimate <- list(std.estimate)}
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], std.estimate[[x]])})
+
+  ## append empty standard estimate for plotting
+  data <- lapply(1:length(data), function(x) {
+    cbind(data[[x]], std.estimate[[x]])})
+  rm(std.estimate)
+
+  ## generate global data set
+  data.global <- cbind(data[[1]],
+                       rep(x = 1,
+                           times = nrow(data[[1]])))
+
+  colnames(data.global) <- rep("", 9)
+
+  if(length(data) > 1) {
+    for(i in 2:length(data)) {
+      data.add <- cbind(data[[i]],
+                        rep(x = i, times = nrow(data[[i]])))
+      colnames(data.add) <- rep("", 9)
+      data.global <- rbind(data.global,
+                           data.add)
+    }
+  }
+
+  ## create column names
+  colnames(data.global) <- c("De",
+                             "error",
+                             "z",
+                             "se",
+                             "z.central",
+                             "precision",
+                             "std.estimate",
+                             "std.estimate.plot")
+
+## calculate global central value
+if(centrality[1] == "mean") {
+  z.central.global <- mean(data.global[,3], na.rm = TRUE)
+} else if(centrality[1] == "median") {
+  z.central.global <- median(data.global[,3], na.rm = TRUE)
+} else  if(centrality[1] == "mean.weighted") {
+  z.central.global <- sum(data.global[,3] / data.global[,4]^2) /
+    sum(1 / data.global[,4]^2)
+} else if(centrality[1] == "median.weighted") {
+  ## define function after isotone::weighted.mean
+  median.w <- function (y, w)
+  {
+    ox <- order(y)
+    y <- y[ox]
+    w <- w[ox]
+    k <- 1
+    low <- cumsum(c(0, w))
+    up <- sum(w) - low
+    df <- low - up
+    repeat {
+      if (df[k] < 0)
+        k <- k + 1
+      else if (df[k] == 0)
+        return((w[k] * y[k] + w[k - 1] * y[k - 1])/(w[k] +
+                                                      w[k - 1]))
+      else return(y[k - 1])
+    }
+  }
+  z.central.global <- median.w(y = data.global[,3], w = data.global[,4])
+} else if(is.numeric(centrality) == TRUE &
+            length(centrality == length(data))) {
+  z.central.global <- mean(data.global[,3], na.rm = TRUE)
+}
+
+  ## optionally adjust zentral value by user-defined value
+  if(missing(central.value) == FALSE) {
+
+    ## adjust central value for De.add
+    central.value <- central.value + 2 * De.add
+
+    z.central.global <- ifelse(log.z == TRUE,
+                               log(central.value), central.value)
+  }
+
+  ## create column names
+  for(i in 1:length(data)) {
+    colnames(data[[i]]) <- c("De",
+                             "error",
+                             "z",
+                             "se",
+                             "z.central",
+                             "precision",
+                             "std.estimate",
+                             "std.estimate.plot")
+  }
+
+  ## re-calculate standardised estimate for plotting
+  for(i in 1:length(data)) {
+    data[[i]][,8] <- (data[[i]][,3] - z.central.global) / data[[i]][,4]
+  }
+
+  data.global.plot <- data[[1]][,8]
+  if(length(data) > 1) {
+    for(i in 2:length(data)) {
+      data.global.plot <- c(data.global.plot, data[[i]][,8])
+    }
+  }
+  data.global[,8] <- data.global.plot
+
+  ## print warning for too small scatter
+  if(max(abs(1 / data.global[6])) < 0.02) {
+    small.sigma <- TRUE
+    print(paste("Attention, small standardised estimate scatter.",
+                "Toggle off y.ticks?"))
+}
+
+  ## read out additional arguments---------------------------------------------
+  extraArgs <- list(...)
+
+  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+    {expression(paste(D[e], " distribution"))}
+
+  sub <- if("sub" %in% names(extraArgs)) {extraArgs$sub} else {""}
+
+  if("xlab" %in% names(extraArgs)) {
+    if(length(extraArgs$xlab) != 2) {
+      stop("Argmuent xlab is not of length 2!")
+    } else {xlab <- extraArgs$xlab}
+  } else {
+    xlab <- c(if(log.z == TRUE) {
+      "Relative standard error (%)"
+      } else {
+        "Standard error"
+        },
+      "Precision")
+  }
+
+  ylab <- if("ylab" %in% names(extraArgs)) {
+    extraArgs$ylab
+    } else {
+      "Standardised estimate"
+    }
+
+  zlab <- if("zlab" %in% names(extraArgs)) {
+    extraArgs$zlab
+    } else {
+      expression(paste(D[e], " [Gy]"))
+    }
+
+  if("zlim" %in% names(extraArgs)) {
+    limits.z <- extraArgs$zlim
+  } else {
+    z.span <- (mean(data.global[,1]) * 0.5) / (sd(data.global[,1]) * 100)
+    z.span <- ifelse(z.span > 1, 0.9, z.span)
+    limits.z <- c((0.9 - z.span) * min(data.global[[1]]),
+                  (1.1 + z.span) * max(data.global[[1]]))
+  }
+  if(limits.z[1] <= 0) {
+    limits.z <- limits.z + 2 * De.add
+  }
+
+  if("xlim" %in% names(extraArgs)) {
+    limits.x <- extraArgs$xlim
+  } else {
+    limits.x <- c(0, max(data.global[,6]))
+  }
+
+  if(limits.x[1] != 0) {
+    limits.x[1] <- 0
+    warning("Lower x-axis limit not set to zero, issue corrected!")
+  }
+
+  if("ylim" %in% names(extraArgs)) {
+    limits.y <- extraArgs$ylim
+  } else {
+    y.span <- (mean(data.global[,1]) * 10) / (sd(data.global[,1]) * 100)
+    y.span <- ifelse(y.span > 1, 0.98, y.span)
+    limits.y <- c(-(1 + y.span) * max(abs(data.global[,7])),
+                   (0.8 + y.span) * max(abs(data.global[,7])))
+  }
+
+  cex <- if("cex" %in% names(extraArgs)) {
+    extraArgs$cex
+  } else {
+    1
+  }
+
+  lty <- if("lty" %in% names(extraArgs)) {
+    extraArgs$lty
+    } else {
+      rep(2, length(data))
+    }
+
+  lwd <- if("lwd" %in% names(extraArgs)) {
+    extraArgs$lwd
+    } else {
+      rep(1, length(data))
+    }
+
+  pch <- if("pch" %in% names(extraArgs)) {
+    extraArgs$pch
+    } else {
+      rep(1, length(data))
+    }
+
+  col <- if("col" %in% names(extraArgs)) {
+    extraArgs$col
+    } else {
+      1:length(data)
+    }
+
+  tck <- if("tck" %in% names(extraArgs)) {
+    extraArgs$tck
+  } else {
+    NA
+  }
+
+  tcl <- if("tcl" %in% names(extraArgs)) {
+    extraArgs$tcl
+  } else {
+    -0.5
+  }
+
+  show <- if("show" %in% names(extraArgs)) {extraArgs$show} else {TRUE}
+  if(show != TRUE) {show <- FALSE}
+
+  fun <- if("fun" %in% names(extraArgs)) {
+    extraArgs$fun
+  } else {
+    FALSE
+  }
+
+  ## define auxiliary plot parameters -----------------------------------------
+
+  ## optionally adjust plot ratio
+  if(missing(plot.ratio) == TRUE) {
+    if(log.z == TRUE) {
+      plot.ratio <- 1 /  (1 * ((max(data.global[,6]) - min(data.global[,6])) /
+        (max(data.global[,7]) - min(data.global[,7]))))
+    } else {
+      plot.ratio <- 4.5 / 5.5
+    }
+  }
+
+  if(plot.ratio > 10^6) {plot.ratio <- 10^6}
+
+  ## calculate conversion factor for plot coordinates
+  f <- (max(data.global[,6]) - min(data.global[,6])) /
+       (max(data.global[,7]) - min(data.global[,7])) * plot.ratio
+
+  ## calculate major and minor z-tick values
+  tick.values.major <- signif(pretty(limits.z, n = 5), 3)
+  tick.values.minor <- signif(pretty(limits.z, n = 25), 3)
+
+  tick.values.major <- tick.values.major[tick.values.major >=
+    min(tick.values.minor)]
+  tick.values.major <- tick.values.major[tick.values.major <=
+    max(tick.values.minor)]
+  tick.values.major <- tick.values.major[tick.values.major >=
+    limits.z[1]]
+  tick.values.major <- tick.values.major[tick.values.major <=
+    limits.z[2]]
+  tick.values.minor <- tick.values.minor[tick.values.minor >=
+    limits.z[1]]
+  tick.values.minor <- tick.values.minor[tick.values.minor <=
+    limits.z[2]]
+
+  if(log.z == TRUE) {
+    tick.values.major <- log(tick.values.major)
+    tick.values.minor <- log(tick.values.minor)
+  }
+
+  ## calculate z-axis radius
+  r.x <- limits.x[2] / max(data.global[,6]) + 0.03
+  r <- max(sqrt((data.global[,6])^2+(data.global[,7] * f)^2)) * r.x
+
+  ## calculate major z-tick coordinates
+  tick.x1.major <- r / sqrt(1 + f^2 * (
+    tick.values.major - z.central.global)^2)
+  tick.y1.major <- (tick.values.major - z.central.global) * tick.x1.major
+  tick.x2.major <- (1 + 0.015 * cex) * r / sqrt(
+    1 + f^2 * (tick.values.major - z.central.global)^2)
+  tick.y2.major <- (tick.values.major - z.central.global) * tick.x2.major
+  ticks.major <- cbind(tick.x1.major,
+                       tick.x2.major,
+                       tick.y1.major,
+                       tick.y2.major)
+
+  ## calculate minor z-tick coordinates
+  tick.x1.minor <- r / sqrt(1 + f^2 * (
+    tick.values.minor - z.central.global)^2)
+  tick.y1.minor <- (tick.values.minor - z.central.global) * tick.x1.minor
+  tick.x2.minor <- (1 + 0.007 * cex) * r / sqrt(
+    1 + f^2 * (tick.values.minor - z.central.global)^2)
+  tick.y2.minor <- (tick.values.minor - z.central.global) * tick.x2.minor
+  ticks.minor <- cbind(tick.x1.minor,
+                       tick.x2.minor,
+                       tick.y1.minor,
+                       tick.y2.minor)
+
+  ## calculate z-label positions
+  label.x <- 1.03 * r / sqrt(1 + f^2 *
+    (tick.values.major - z.central.global)^2)
+  label.y <- (tick.values.major - z.central.global) * tick.x2.major
+
+  ## create z-axes labels
+  if(log.z == TRUE) {
+    label.z.text <- signif(exp(tick.values.major), 3)
+  } else {
+    label.z.text <- signif(tick.values.major, 3)
+  }
+
+  ## subtract De.add from label values
+  if(De.add != 0) {
+    label.z.text <- label.z.text - 2 * De.add
+  }
+
+  labels <- cbind(label.x, label.y, label.z.text)
+
+  ## calculate coordinates for 2-sigma-polygon overlay
+  polygons <- matrix(nrow = length(data), ncol = 8)
+
+  for(i in 1:length(data)) {
+    polygons[i,1:4] <- c(limits.x[1],
+                         limits.x[1],
+                         max(data.global[,6]),
+                         max(data.global[,6]))
+    polygons[i,5:8] <- c(-2,
+                         2,
+                         (data[[i]][1,5] - z.central.global) *
+                           polygons[i,3] + 2,
+                         (data[[i]][1,5] - z.central.global) *
+                           polygons[i,4] - 2)
+  }
+  ## calculate node coordinates for semi-circle
+  user.limits <- if(log.z == TRUE) {
+    log(limits.z)
+  } else{
+    limits.z
+  }
+
+  ellipse.values <- seq(from = min(c(tick.values.major,
+                                     tick.values.minor,
+                                     user.limits[2])),
+                        to = max(c(tick.values.major,
+                                   tick.values.minor,
+                                   user.limits[2])),
+                        length.out = 500)
+  ellipse.x <- r / sqrt(1 + f^2 * (ellipse.values - z.central.global)^2)
+  ellipse.y <- (ellipse.values - z.central.global) * ellipse.x
+  ellipse <- cbind(ellipse.x, ellipse.y)
+  ellipse.lims <- rbind(range(ellipse[,1]), range(ellipse[,2]))
+
+  ## calculate statistical labels
+  if(length(stats == 1)) {stats <- rep(stats, 2)}
+  stats.data <- matrix(nrow = 3, ncol = 3)
+  data.stats <- as.numeric(data.global[,1] - 2 * De.add)
+
+  if("min" %in% stats == TRUE) {
+    stats.data[1, 3] <- data.stats[data.stats == min(data.stats)][1]
+    stats.data[1, 1] <- data.global[data.stats == stats.data[1, 3], 6][1]
+    stats.data[1, 2] <- data.global[data.stats == stats.data[1, 3], 8][1]
+  }
+
+  if("max" %in% stats == TRUE) {
+    stats.data[2, 3] <- data.stats[data.stats == max(data.stats)][1]
+    stats.data[2, 1] <- data.global[data.stats == stats.data[2, 3], 6][1]
+    stats.data[2, 2] <- data.global[data.stats == stats.data[2, 3], 8][1]
+  }
+
+  if("median" %in% stats == TRUE) {
+    stats.data[3, 3] <- data.stats[data.stats == quantile(data.stats, 0.5, type = 3)]
+    stats.data[3, 1] <- data.global[data.stats == stats.data[3, 3], 6][1]
+    stats.data[3, 2] <- data.global[data.stats == stats.data[3, 3], 8][1]
+  }
+
+  ## recalculate axes limits if necessary
+  limits.z.x <- range(ellipse[,1])
+  limits.z.y <- range(ellipse[,2])
+  if(!("ylim" %in% names(extraArgs))) {
+    if(limits.z.y[1] < 0.66 * limits.y[1]) {
+      limits.y[1] <- 1.8 * limits.z.y[1]
+    }
+    if(limits.z.y[2] > 0.77 * limits.y[2]) {
+      limits.y[2] <- 1.3 * limits.z.y[2]
+    }
+#    limits.y <- c(-max(abs(limits.y)), max(abs(limits.y)))
+  }
+  if(!("xlim" %in% names(extraArgs))) {
+    if(limits.z.x[2] > 1.1 * limits.x[2]) {
+      limits.x[2] <- limits.z.x[2]
+    }
+  }
+
+  ## calculate and paste statistical summary
+  De.stats <- matrix(nrow = length(data), ncol = 18)
+  colnames(De.stats) <- c("n",
+                          "mean",
+                          "mean.weighted",
+                          "median",
+                          "median.weighted",
+                          "kde.max",
+                          "sd.abs",
+                          "sd.rel",
+                          "se.abs",
+                          "se.rel",
+                          "q25",
+                          "q75",
+                          "skewness",
+                          "kurtosis",
+                          "sd.abs.weighted",
+                          "sd.rel.weighted",
+                          "se.abs.weighted",
+                          "se.rel.weighted")
+
+  for(i in 1:length(data)) {
+    statistics <- calc_Statistics(data[[i]])
+    De.stats[i,1] <- statistics$weighted$n
+    De.stats[i,2] <- statistics$unweighted$mean
+    De.stats[i,3] <- statistics$weighted$mean
+    De.stats[i,4] <- statistics$unweighted$median
+    De.stats[i,5] <- statistics$unweighted$median
+    De.stats[i,7] <- statistics$unweighted$sd.abs
+    De.stats[i,8] <- statistics$unweighted$sd.rel
+    De.stats[i,9] <- statistics$unweighted$se.abs
+    De.stats[i,10] <- statistics$weighted$se.rel
+    De.stats[i,11] <- quantile(data[[i]][,1], 0.25)
+    De.stats[i,12] <- quantile(data[[i]][,1], 0.75)
+    De.stats[i,13] <- statistics$unweighted$skewness
+    De.stats[i,14] <- statistics$unweighted$kurtosis
+    De.stats[i,15] <- statistics$weighted$sd.abs
+    De.stats[i,16] <- statistics$weighted$sd.rel
+    De.stats[i,17] <- statistics$weighted$se.abs
+    De.stats[i,18] <- statistics$weighted$se.rel
+
+    ##kdemax - here a little doubled as it appears below again
+    De.density <-density(x = data[[i]][,1],
+                         kernel = "gaussian",
+                         from = limits.z[1],
+                         to = limits.z[2])
+
+    De.stats[i,6] <- De.density$x[which.max(De.density$y)]
+  }
+
+  label.text = list(NA)
+
+  if(summary.pos[1] != "sub") {
+    n.rows <- length(summary)
+
+    for(i in 1:length(data)) {
+      stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "")
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          paste(
+                            "",
+                            ifelse("n" %in% summary[j] == TRUE,
+                                   paste("n = ",
+                                         De.stats[i,1],
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("mean" %in% summary[j] == TRUE,
+                                   paste("mean = ",
+                                         round(De.stats[i,2], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("mean.weighted" %in% summary[j] == TRUE,
+                                   paste("weighted mean = ",
+                                         round(De.stats[i,3], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("median" %in% summary[j] == TRUE,
+                                   paste("median = ",
+                                         round(De.stats[i,4], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("median.weighted" %in% summary[j] == TRUE,
+                                   paste("weighted median = ",
+                                         round(De.stats[i,5], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kdemax" %in% summary[j] == TRUE,
+                                   paste("kdemax = ",
+                                         round(De.stats[i,6], 2),
+                                         " \n ",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdabs" %in% summary[j] == TRUE,
+                                   paste("sd = ",
+                                         round(De.stats[i,7], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdrel" %in% summary[j] == TRUE,
+                                   paste("rel. sd = ",
+                                         round(De.stats[i,8], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("seabs" %in% summary[j] == TRUE,
+                                   paste("se = ",
+                                         round(De.stats[i,9], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("serel" %in% summary[j] == TRUE,
+                                   paste("rel. se = ",
+                                         round(De.stats[i,10], 2), " %",
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("skewness" %in% summary[j] == TRUE,
+                                   paste("skewness = ",
+                                         round(De.stats[i,13], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("kurtosis" %in% summary[j] == TRUE,
+                                   paste("kurtosis = ",
+                                         round(De.stats[i,14], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("in.2s" %in% summary[j] == TRUE,
+                                   paste("in 2 sigma = ",
+                                         round(sum(data[[i]][,7] > -2 &
+                                                     data[[i]][,7] < 2) /
+                                                 nrow(data[[i]]) * 100 , 1),
+                                         " %",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdabs.weighted" %in% summary[j] == TRUE,
+                                   paste("abs. weighted sd = ",
+                                         round(De.stats[i,15], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("sdrel.weighted" %in% summary[j] == TRUE,
+                                   paste("rel. weighted sd = ",
+                                         round(De.stats[i,16], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("seabs.weighted" %in% summary[j] == TRUE,
+                                   paste("abs. weighted se = ",
+                                         round(De.stats[i,17], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            ifelse("serel.weighted" %in% summary[j] == TRUE,
+                                   paste("rel. weighted se = ",
+                                         round(De.stats[i,18], 2),
+                                         "\n",
+                                         sep = ""),
+                                   ""),
+                            sep = ""))
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]] <- paste(stops,
+                                                    summary.text,
+                                                    stops,
+                                                    sep = "")
+    }
+  } else {
+    for(i in 1:length(data)) {
+
+      summary.text <- character(0)
+
+      for(j in 1:length(summary)) {
+        summary.text <- c(summary.text,
+                          ifelse("n" %in% summary[j] == TRUE,
+                                 paste("n = ",
+                                       De.stats[i,1],
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("mean" %in% summary[j] == TRUE,
+                                 paste("mean = ",
+                                       round(De.stats[i,2], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("mean.weighted" %in% summary[j] == TRUE,
+                                 paste("weighted mean = ",
+                                       round(De.stats[i,3], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("median" %in% summary[j] == TRUE,
+                                 paste("median = ",
+                                       round(De.stats[i,4], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("median.weighted" %in% summary[j] == TRUE,
+                                 paste("weighted median = ",
+                                       round(De.stats[i,5], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kdemax" %in% summary[j] == TRUE,
+                                 paste("kdemax = ",
+                                       round(De.stats[i,6], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdrel" %in% summary[j] == TRUE,
+                                 paste("rel. sd = ",
+                                       round(De.stats[i,8], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdabs" %in% summary[j] == TRUE,
+                                 paste("abs. sd = ",
+                                       round(De.stats[i,7], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("serel" %in% summary[j] == TRUE,
+                                 paste("rel. se = ",
+                                       round(De.stats[i,10], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("seabs" %in% summary[j] == TRUE,
+                                 paste("abs. se = ",
+                                       round(De.stats[i,9], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("skewness" %in% summary[j] == TRUE,
+                                 paste("skewness = ",
+                                       round(De.stats[i,13], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("kurtosis" %in% summary[j] == TRUE,
+                                 paste("kurtosis = ",
+                                       round(De.stats[i,14], 2),
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("in.2s" %in% summary[j] == TRUE,
+                                 paste("in 2 sigma = ",
+                                       round(sum(data[[i]][,7] > -2 &
+                                                   data[[i]][,7] < 2) /
+                                               nrow(data[[i]]) * 100 , 1),
+                                       " %   ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdabs.weighted" %in% summary[j] == TRUE,
+                                 paste("abs. weighted sd = ",
+                                       round(De.stats[i,15], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("sdrel.weighted" %in% summary[j] == TRUE,
+                                 paste("rel. weighted sd = ",
+                                       round(De.stats[i,16], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("seabs.weighted" %in% summary[j] == TRUE,
+                                 paste("abs. weighted se = ",
+                                       round(De.stats[i,17], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 ""),
+                          ifelse("serel.weighted" %in% summary[j] == TRUE,
+                                 paste("rel. weighted se = ",
+                                       round(De.stats[i,18], 2), " %",
+                                       " | ",
+                                       sep = ""),
+                                 "")
+        )
+      }
+
+      summary.text <- paste(summary.text, collapse = "")
+
+      label.text[[length(label.text) + 1]]  <- paste(
+        "  ",
+        summary.text,
+        sep = "")
+    }
+
+    ## remove outer vertical lines from string
+    for(i in 2:length(label.text)) {
+      label.text[[i]] <- substr(x = label.text[[i]],
+                                start = 3,
+                                stop = nchar(label.text[[i]]) - 3)
+    }
+  }
+
+## remove dummy list element
+label.text[[1]] <- NULL
+  ## convert keywords into summary placement coordinates
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- c(limits.x[1], limits.y[2])
+    summary.adj <- c(0, 1)
+  } else if(length(summary.pos) == 2) {
+    summary.pos <- summary.pos
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "topleft") {
+    summary.pos <- c(limits.x[1], limits.y[2])
+    summary.adj <- c(0, 1)
+  } else if(summary.pos[1] == "top") {
+    summary.pos <- c(mean(limits.x), limits.y[2])
+    summary.adj <- c(0.5, 1)
+  } else if(summary.pos[1] == "topright") {
+    summary.pos <- c(limits.x[2], limits.y[2])
+    summary.adj <- c(1, 1)
+  }  else if(summary.pos[1] == "left") {
+    summary.pos <- c(limits.x[1], mean(limits.y))
+    summary.adj <- c(0, 0.5)
+  } else if(summary.pos[1] == "center") {
+    summary.pos <- c(mean(limits.x), mean(limits.y))
+    summary.adj <- c(0.5, 0.5)
+  } else if(summary.pos[1] == "right") {
+    summary.pos <- c(limits.x[2], mean(limits.y))
+    summary.adj <- c(1, 0.5)
+  }else if(summary.pos[1] == "bottomleft") {
+    summary.pos <- c(limits.x[1], limits.y[1])
+    summary.adj <- c(0, 0)
+  } else if(summary.pos[1] == "bottom") {
+    summary.pos <- c(mean(limits.x), limits.y[1])
+    summary.adj <- c(0.5, 0)
+  } else if(summary.pos[1] == "bottomright") {
+    summary.pos <- c(limits.x[2], limits.y[1])
+    summary.adj <- c(1, 0)
+  }
+
+  ## convert keywords into legend placement coordinates
+  if(missing(legend.pos) == TRUE) {
+    legend.pos <- c(limits.x[1], limits.y[2])
+    legend.adj <- c(0, 1)
+  } else if(length(legend.pos) == 2) {
+    legend.pos <- legend.pos
+    legend.adj <- c(0, 1)
+  } else if(legend.pos[1] == "topleft") {
+    legend.pos <- c(limits.x[1], limits.y[2])
+    legend.adj <- c(0, 1)
+  } else if(legend.pos[1] == "top") {
+    legend.pos <- c(mean(limits.x), limits.y[2])
+    legend.adj <- c(0.5, 1)
+  } else if(legend.pos[1] == "topright") {
+    legend.pos <- c(limits.x[2], limits.y[2])
+    legend.adj <- c(1, 1)
+  } else if(legend.pos[1] == "left") {
+    legend.pos <- c(limits.x[1], mean(limits.y))
+    legend.adj <- c(0, 0.5)
+  } else if(legend.pos[1] == "center") {
+    legend.pos <- c(mean(limits.x), mean(limits.y))
+    legend.adj <- c(0.5, 0.5)
+  } else if(legend.pos[1] == "right") {
+    legend.pos <- c(limits.x[2], mean(limits.y))
+    legend.adj <- c(1, 0.5)
+  } else if(legend.pos[1] == "bottomleft") {
+    legend.pos <- c(limits.x[1], limits.y[1])
+    legend.adj <- c(0, 0)
+  } else if(legend.pos[1] == "bottom") {
+    legend.pos <- c(mean(limits.x), limits.y[1])
+    legend.adj <- c(0.5, 0)
+  } else if(legend.pos[1] == "bottomright") {
+    legend.pos <- c(limits.x[2], limits.y[1])
+    legend.adj <- c(1, 0)
+  }
+
+  ## calculate line coordinates and further parameters
+  if(missing(line) == FALSE) {
+    if(log.z == TRUE) {line <- log(line)}
+
+    line.coords <- list(NA)
+
+    for(i in 1:length(line)) {
+      line.x <- c(limits.x[1],
+                  r / sqrt(1 + f^2 * (line[i] - z.central.global)^2))
+      line.y <- c(0, (line[i] - z.central.global) * line.x[2])
+
+      line.coords[[length(line.coords) + 1]] <- rbind(line.x, line.y)
+    }
+
+    line.coords[1] <- NULL
+
+    if(missing(line.col) == TRUE) {
+      line.col <- seq(from = 1, to = length(line.coords))
+    }
+
+    if(missing(line.label) == TRUE) {
+      line.label <- rep("", length(line.coords))
+    }
+  }
+
+  ## calculate rug coordinates
+  if(missing(rug) == FALSE) {
+    if(log.z == TRUE) {
+      rug.values <- log(De.global)
+    } else {
+      rug.values <- De.global
+    }
+
+    rug.coords <- list(NA)
+
+    for(i in 1:length(rug.values)) {
+      rug.x <- c(r / sqrt(1 + f^2 * (rug.values[i] - z.central.global)^2) * 0.988,
+                 r / sqrt(1 + f^2 * (rug.values[i] - z.central.global)^2) * 0.995)
+      rug.y <- c((rug.values[i] - z.central.global) * rug.x[1],
+                 (rug.values[i] - z.central.global) * rug.x[2])
+      rug.coords[[length(rug.coords) + 1]] <- rbind(rug.x, rug.y)
+    }
+
+    rug.coords[1] <- NULL
+  }
+
+  ## Generate plot ------------------------------------------------------------
+
+  ## check if plotting is enabled
+  if(show == TRUE) {
+
+    ## determine number of subheader lines to shif the plot
+    if(length(summary) > 0 & summary.pos[1] == "sub") {
+      shift.lines <- length(data) + 1
+    } else {shift.lines <- 1}
+
+    ## setup plot area
+    par(mar = c(4, 4, shift.lines + 1.5, 7),
+        xpd = TRUE,
+        cex = cex)
+
+    ## create empty plot
+    plot(NA,
+         xlim = limits.x,
+         ylim = limits.y,
+         main = "",
+         sub = sub,
+         xlab = "",
+         ylab = "",
+         xaxs = "i",
+         yaxs = "i",
+         frame.plot = FALSE,
+         axes = FALSE)
+
+    ## add y-axis label
+    mtext(side = 2,
+          line = 2.5,
+          at = 0,
+          adj = 0.5,
+          cex = cex,
+          text = ylab)
+
+    ## calculate upper x-axis label values
+    label.x.upper <- if(log.z == TRUE) {
+      as.character(round(1/axTicks(side = 1)[-1] * 100, 1))
+    } else {
+      as.character(round(1/axTicks(side = 1)[-1], 1))
+    }
+
+    ## optionally, plot 2-sigma-bar
+    if(bar.col[1] != "none") {
+      for(i in 1:length(data)) {
+        polygon(x = polygons[i,1:4],
+                y = polygons[i,5:8],
+                lty = "blank",
+                col = bar.col[i])
+      }
+    }
+
+    ## optionally, add grid lines
+    if(grid.col[1] != "none") {
+      for(i in 1:length(tick.x1.major)) {
+        lines(x = c(limits.x[1], tick.x1.major[i]),
+              y = c(0, tick.y1.major[i]),
+              col = grid.col)
+      }
+    }
+
+    ## optionally, plot central value lines
+    if(lwd[1] > 0 & lty[1] > 0) {
+      for(i in 1:length(data)) {
+        x2 <- r / sqrt(1 + f^2 * (
+          data[[i]][1,5] - z.central.global)^2)
+        y2 <- (data[[i]][1,5] - z.central.global) * x2
+        lines(x = c(limits.x[1], x2),
+              y = c(0, y2),
+              lty = lty[i],
+              lwd = lwd[i],
+              col = col[i])
+      }
+    }
+
+    ## optionally add further lines
+    if(missing(line) == FALSE) {
+      for(i in 1:length(line)) {
+        lines(x = line.coords[[i]][1,],
+              y = line.coords[[i]][2,],
+              col = line.col[i])
+        text(x = line.coords[[i]][1,2],
+             y = line.coords[[i]][2,2] + par()$cxy[2] * 0.3,
+             labels = line.label[i],
+             pos = 2,
+             col = line.col[i],
+             cex = cex * 0.9)
+      }
+    }
+
+    ## overplot unwanted parts
+    polygon(x = c(ellipse[,1], limits.x[2] * 2, limits.x[2] * 2),
+            y = c(ellipse[,2], max(ellipse[,2]), min(ellipse[,2])),
+            col = "white",
+            lty = 0)
+
+    ## add plot title
+    title(main = main, line = shift.lines, font = 2)
+
+    ## plot lower x-axis (precision)
+    x.axis.ticks <- axTicks(side = 1)
+    x.axis.ticks <- x.axis.ticks[c(TRUE, x.axis.ticks <= limits.x[2])]
+    x.axis.ticks <- x.axis.ticks[x.axis.ticks <= limits.x[2]]
+
+    ## axis with lables and ticks
+    axis(side = 1,
+         at = x.axis.ticks,
+         lwd = 1,
+         xlab = "")
+
+    ## extend axis line to right side of the plot
+    lines(x = c(max(x.axis.ticks, na.rm = TRUE), limits.x[2]),
+          y = c(limits.y[1], limits.y[1]))
+
+    ## draw closing tick on right hand side
+    axis(side = 1, tcl = 0.5, lwd = 0, lwd.ticks = 1, at = limits.x[2],
+         labels = FALSE)
+    axis(side = 1, tcl = -0.5, lwd = 0, lwd.ticks = 1, at = limits.x[2],
+         labels = FALSE)
+
+    ## add upper axis label
+    mtext(text = xlab[1],
+          at = (limits.x[1] + limits.x[2]) / 2,
+          side = 1,
+          line = -3.5,
+          cex = cex)
+
+    ## add lower axis label
+    mtext(text = xlab[2],
+          at = (limits.x[1] + limits.x[2]) / 2,
+          side = 1,
+          line = 2.5,
+          cex = cex)
+
+    ## plot upper x-axis
+    axis(side = 1,
+         tcl = 0.5,
+         lwd = 0,
+         lwd.ticks = 1,
+         at = x.axis.ticks[-1],
+         labels = FALSE)
+
+    ## remove first tick label (infinity)
+    label.x.upper <- label.x.upper[1:(length(x.axis.ticks) - 1)]
+
+    ## add tick labels
+    axis(side = 1,
+         lwd = 0,
+         labels = label.x.upper,
+         at = x.axis.ticks[-1],
+         line = -3)
+
+    ## plot minor z-ticks
+    for(i in 1:length(tick.values.minor)) {
+      lines(x = c(tick.x1.minor[i], tick.x2.minor[i]),
+            y = c(tick.y1.minor[i], tick.y2.minor[i]))
+    }
+
+    ## plot major z-ticks
+    for(i in 1:length(tick.values.major)) {
+      lines(x = c(tick.x1.major[i], tick.x2.major[i]),
+            y = c(tick.y1.major[i], tick.y2.major[i]))
+    }
+
+    ## plot z-axis
+    lines(ellipse)
+
+    ## plot z-values
+    text(x = label.x,
+         y = label.y,
+         label = label.z.text, 0)
+
+    ## plot z-label
+    mtext(side = 4,
+          at = 0,
+          line = 5,
+          las = 3,
+          adj = 0.5,
+          cex = cex,
+          text = zlab)
+
+    ## optionally add rug
+    if(rug == TRUE) {
+      for(i in 1:length(rug.coords)) {
+        lines(x = rug.coords[[i]][1,],
+              y = rug.coords[[i]][2,],
+              col = col[data.global[i,9]])
+      }
+    }
+
+    ## plot values
+    for(i in 1:length(data)) {
+      points(data[[i]][,6][data[[i]][,6] <= limits.x[2]],
+             data[[i]][,8][data[[i]][,6] <= limits.x[2]],
+             col = col[i],
+             pch = pch[i])
+    }
+
+    ## optionally add min, max, median sample text
+    if(length(stats) > 0) {
+      text(x = stats.data[,1],
+           y = stats.data[,2],
+           labels = round(stats.data[,3], 1),
+           pos = 2,
+           cex = 0.85)
+    }
+
+    ## optionally add legend content
+    if(missing(legend) == FALSE) {
+      legend(x = legend.pos[1],
+             y = 0.8 * legend.pos[2],
+             xjust = legend.adj[1],
+             yjust = legend.adj[2],
+             legend = legend,
+             pch = pch,
+             col = col,
+             text.col = col,
+             cex = 0.8 * cex,
+             bty = "n")
+    }
+
+    ## plot y-axis
+    if(y.ticks == TRUE) {
+      char.height <- par()$cxy[2]
+      tick.space <- axisTicks(usr = limits.y, log = FALSE)
+      tick.space <- (max(tick.space) - min(tick.space)) / length(tick.space)
+      if(tick.space < char.height * 1.5) {
+        axis(side = 2, at = c(-2, 2), labels = c("", ""), las = 1)
+        axis(side = 2, at = 0, tcl = 0, labels = paste("\u00B1", "2"), las = 1)
+      } else {
+        axis(side = 2, at = seq(-2, 2, by = 2), las = 2)
+      }
+    } else {
+      axis(side = 2, at = 0)
+    }
+
+    ## optionally add subheader text
+    mtext(side = 3,
+          line = shift.lines - 2,
+          text = mtext,
+          cex = 0.8 * cex)
+
+    ## add summary content
+    for(i in 1:length(data)) {
+      if(summary.pos[1] != "sub") {
+        text(x = summary.pos[1],
+             y = 0.8 * summary.pos[2],
+             adj = summary.adj,
+             labels = label.text[[i]],
+             cex = 0.8 * cex,
+             col = col[i])
+      } else {
+        if(mtext == "") {
+          mtext(side = 3,
+                line = shift.lines - 1 - i,
+                text = label.text[[i]],
+                col = col[i],
+                cex = 0.8 * cex)
+        }
+      }
+    }
+
+    ##FUN by R Luminescence Team
+    if(fun==TRUE){sTeve()}
+  }
+
+  if(output == TRUE) {
+    return(list(data = data,
+                data.global = data.global,
+                xlim = limits.x,
+                ylim = limits.y,
+                zlim = limits.z,
+                r = r,
+                plot.ratio = plot.ratio,
+                ticks.major = ticks.major,
+                ticks.minor = ticks.minor,
+                labels = labels,
+                polygons = polygons,
+                ellipse.lims = ellipse.lims))
+  }
+
+}
diff --git a/R/plot_Risoe.BINfileData.R b/R/plot_Risoe.BINfileData.R
new file mode 100644
index 0000000..df9dbc2
--- /dev/null
+++ b/R/plot_Risoe.BINfileData.R
@@ -0,0 +1,237 @@
+#' Plot single luminescence curves from a BIN file object
+#'
+#' Plots single luminescence curves from an object returned by the
+#' \link{read_BIN2R} function.
+#'
+#' \bold{Nomenclature}\cr
+#'
+#' See \code{\link{Risoe.BINfileData-class}}
+#'
+#' \bold{curve.transformation}\cr
+#'
+#' This argument allows transforming continuous wave (CW) curves to pseudo
+#' (linear) modulated curves. For the transformation, the functions of the
+#' package are used.  Currently, it is not possible to pass further arguments
+#' to the transformation functions. The argument works only for \code{ltype}
+#' \code{OSL} and \code{IRSL}.\cr
+#'
+#' \bold{Irradiation time}\cr
+#'
+#' Plotting the irradiation time (s) or the given dose (Gy) requires that the
+#' variable \code{IRR_TIME} has been set within the BIN-file.  This is normally
+#' done by using the 'Run Info' option within the Sequence Editor or by editing
+#' in R.
+#'
+#' @param BINfileData \link{Risoe.BINfileData-class} (\bold{required}):
+#' requires an S4 object returned by the \link{read_BIN2R} function.
+#' @param position \link{vector} (optional): option to limit the plotted curves
+#' by position (e.g. \code{position = 1}, \code{position = c(1,3,5)}).
+#' @param run \link{vector} (optional): option to limit the plotted curves by
+#' run (e.g., \code{run = 1}, \code{run = c(1,3,5)}).
+#' @param set \link{vector} (optional): option to limit the plotted curves by
+#' set (e.g., \code{set = 1}, \code{set = c(1,3,5)}).
+#' @param sorter \link{character} (with default): the plot output can be
+#' ordered by "POSITION","SET" or "RUN". POSITION, SET and RUN are options
+#' defined in the Risoe Sequence Editor.
+#' @param ltype \link{character} (with default): option to limit the plotted
+#' curves by the type of luminescence stimulation.  Allowed values:
+#' \code{"IRSL"}, \code{"OSL"},\code{"TL"}, \code{"RIR"}, \code{"RBR"}
+#' (corresponds to LM-OSL), \code{"RL"}.  All type of curves are plotted by
+#' default.
+#' @param curve.transformation \link{character} (optional): allows transforming
+#' CW-OSL and CW-IRSL curves to pseudo-LM curves via transformation functions.
+#' Allowed values are: \code{CW2pLM}, \code{CW2pLMi}, \code{CW2pHMi} and
+#' \code{CW2pPMi}. See details.
+#' @param dose_rate \link{numeric} (optional): dose rate of the irradition
+#' source at the measurement date. If set, the given irradiation dose will be
+#' shown in Gy.  See details.
+#' @param temp.lab \link{character} (optional): option to allow for different
+#' temperature units. If no value is set deg. C is chosen.
+#' @param cex.global \link{numeric} (with default): global scaling factor.
+#' @param \dots further undocumented plot arguments.
+#' @return Returns a plot.
+#' @note The function has been successfully tested for the Sequence Editor file
+#' output version 3 and 4.
+#' @section Function version: 0.4.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France),\cr Michael Dietze, GFZ Potsdam (Germany)
+#' @seealso \code{\link{Risoe.BINfileData-class}},\code{\link{read_BIN2R}},
+#' \code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pPMi}},
+#' \code{\link{CW2pHMi}}
+#' @references Duller, G., 2007. Analyst. pp. 1-45.
+#' @keywords dplot
+#' @examples
+#'
+#'
+#' ##load data
+#' data(ExampleData.BINfileData, envir = environment())
+#'
+#' ##plot all curves from the first position to the desktop
+#' #pdf(file = "~/Desktop/CurveOutput.pdf", paper = "a4", height = 11, onefile = TRUE)
+#'
+#' ##example - load from *.bin file
+#' #BINfile<- file.choose()
+#' #BINfileData<-read_BIN2R(BINfile)
+#'
+#' #par(mfrow = c(4,3), oma = c(0.5,1,0.5,1))
+#' #plot_Risoe.BINfileData(CWOSL.SAR.Data,position = 1)
+#' #mtext(side = 4, BINfile, outer = TRUE, col = "blue", cex = .7)
+#' #dev.off()
+#'
+#' @export
+plot_Risoe.BINfileData<- function(
+  BINfileData,
+  position,
+  run,
+  set,
+  sorter = "POSITION",
+  ltype = c("IRSL","OSL","TL","RIR","RBR","RL"),
+  curve.transformation,
+  dose_rate,
+  temp.lab,
+  cex.global = 1,
+  ...
+){
+
+  ##check if the object is of type Risoe.BINfileData
+  if(class(BINfileData)!="Risoe.BINfileData"){stop("Wrong object! Object of type Risoe.BINfileData needed.")}
+
+  temp<-BINfileData
+
+  # Missing check ----------------------------------------------------------------
+
+  ##set plot position if missing
+  if(missing(position)==TRUE){position<-c(min(temp at METADATA[,"POSITION"]):max(temp at METADATA[,"POSITION"]))}
+  if(missing(run)==TRUE){run<-c(min(temp at METADATA[,"RUN"]):max(temp at METADATA[,"RUN"]))}
+  if(missing(set)==TRUE){set<-c(min(temp at METADATA[,"SET"]):max(temp at METADATA[,"SET"]))}
+
+  ##temp.lab
+  if(missing(temp.lab) == TRUE){temp.lab <- "\u00B0C"}
+
+
+  ##fun
+  extraArgs <- list(...) # read out additional arguments list
+  fun       <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE}
+
+  # Ordering --------------------------------------------------------------------
+
+  ##(1) order by RUN, SET OR BY POSITION
+  if(sorter=="RUN"){
+    temp at METADATA<-temp at METADATA[order(temp at METADATA[,"RUN"]),]
+  }else if(sorter=="SET"){
+    temp at METADATA<-temp at METADATA[order(temp at METADATA[,"SET"]),]
+  }else {
+    temp at METADATA<-temp at METADATA[order(temp at METADATA[,"POSITION"]),]
+  }
+
+
+
+  # Select values for plotting ------------------------------------------------------------------
+
+  ##(2) set SEL for selected position
+
+  ##set all to FALSE
+  temp at METADATA[,"SEL"]<-FALSE
+
+  ##set TRUE
+  temp at METADATA[(temp at METADATA[,"POSITION"] %in% position)==TRUE &
+                  (temp at METADATA[,"RUN"] %in% run)==TRUE &
+                  (temp at METADATA[,"SET"] %in% set)==TRUE &
+                  (temp at METADATA[,"LTYPE"] %in% ltype)==TRUE,"SEL"]<-TRUE
+
+  ##------------------------------------------------------------------------##
+  ##PLOTTING
+  ##------------------------------------------------------------------------##
+  ##(3) plot curves
+  for(i in 1:length(temp at METADATA[,"ID"])){
+
+    ##print only if SEL == TRUE
+    if(temp at METADATA[i,"SEL"]==TRUE)
+    {
+
+      ##find measured unit
+      measured_unit<-if(temp at METADATA[i,"LTYPE"]=="TL"){" \u00B0C"}else{"s"}
+
+      ##set x and y values
+      values.x <- seq(temp at METADATA[i,"HIGH"]/temp at METADATA[i,"NPOINTS"],
+                      temp at METADATA[i,"HIGH"],by=temp at METADATA[i,"HIGH"]/temp at METADATA[i,"NPOINTS"])
+      values.y <- unlist(temp at DATA[temp at METADATA[i,"ID"]])
+      values.xy <- data.frame(values.x, values.y)
+
+      ##set curve transformation if wanted
+      if((temp at METADATA[i,"LTYPE"] == "OSL" | temp at METADATA[i,"LTYPE"] == "IRSL") &
+           missing(curve.transformation) == FALSE){
+
+        if(curve.transformation=="CW2pLM"){
+
+          values.xy <- CW2pLM(values.xy)
+
+        }else if(curve.transformation=="CW2pLMi"){
+
+          values.xy <- CW2pLMi(values.xy)[,1:2]
+
+        }else if(curve.transformation=="CW2pHMi"){
+
+          values.xy <- CW2pHMi(values.xy)[,1:2]
+
+        }else if(curve.transformation=="CW2pPMi"){
+
+          values.xy <- CW2pPMi(values.xy)[,1:2]
+
+        }else{
+
+          warning("Function for curve.transformation is unknown. No transformation is performed.")
+
+        }
+
+      }
+
+      ##plot graph
+      plot(values.xy,
+           main=paste("pos=", temp at METADATA[i,"POSITION"],", run=", temp at METADATA[i,"RUN"],
+                      ", set=", temp at METADATA[i,"SET"],sep=""
+           ),
+           type="l",
+           ylab=paste(temp at METADATA[i,"LTYPE"]," [cts/",round(temp at METADATA[i,"HIGH"]/temp at METADATA[i,"NPOINTS"],digits=3)," ",
+                      measured_unit,"]",sep=""),
+           xlab=if(measured_unit=="\u00B0C"){paste("temp. [",temp.lab,"]",sep="")}else{"time [s]"},
+           col=if(temp at METADATA[i,"LTYPE"]=="IRSL" | temp at METADATA[i,"LTYPE"]=="RIR"){"red"}
+           else if(temp at METADATA[i,"LTYPE"]=="OSL" | temp at METADATA[i,"LTYPE"]=="RBR"){"blue"}
+           else{"black"},
+           sub=if(temp at METADATA[i,"LTYPE"]=="TL"){paste("(",temp at METADATA[i,"RATE"]," K/s)",sep="")}else{},
+           lwd=1.2*cex.global,
+           cex=0.9*cex.global
+      )
+
+      ##add mtext for temperature
+
+      ##grep temperature (different for different verions)
+
+      temperature<-if(temp at METADATA[i,"VERSION"]=="03"){temp at METADATA[i,"AN_TEMP"]}
+      else{temp at METADATA[i,"TEMPERATURE"]}
+
+      ##mtext
+      mtext(side=3,
+            if(temp at METADATA[i,"LTYPE"]=="TL"){paste("TL to ",temp at METADATA[i,"HIGH"], " ",temp.lab,sep="")}
+            else{paste(temp at METADATA[i,"LTYPE"],"@",temperature," ",temp.lab ,sep="")},
+            cex=0.9*cex.global)
+
+      ##add mtext for irradiation
+      mtext(side=4,cex=0.8*cex.global, line=0.5,
+            if(temp at METADATA[i, "IRR_TIME"]!=0){
+
+              if(missing("dose_rate")==TRUE){
+                paste("dose = ",temp at METADATA[i, "IRR_TIME"], " s", sep="")
+              }else{
+                paste("dose = ",temp at METADATA[i, "IRR_TIME"]*dose_rate, " Gy", sep="")
+              }
+            }
+      )#end mtext
+
+    }#endif::selection
+
+  }#endforloop
+
+  if(fun==TRUE){sTeve()}
+
+}
diff --git a/R/plot_ViolinPlot.R b/R/plot_ViolinPlot.R
new file mode 100644
index 0000000..c9531d4
--- /dev/null
+++ b/R/plot_ViolinPlot.R
@@ -0,0 +1,269 @@
+#' Create a violin plot
+#'
+#' Draws a kernal densiy plot in combination with a boxplot in its middle. The shape of the violin
+#' is constructed using a mirrored density curve. This plot is especially designed for cases
+#' where the individual errors are zero or to small to be visualised. The idea for this plot is
+#' based on the the 'volcano plot' in the ggplot2 package by Hadely Wickham and Winston Chang.
+#' The general idea for the Violin Plot seems to be introduced by Hintze and Nelson (1998).
+#'
+#' The function is passing several arguments to the function \code{\link{plot}},
+#' \code{\link[stats]{density}}, \code{\link[graphics]{boxplot}}:
+#' Supported arguments are: \code{xlim}, \code{main}, \code{xlab},
+#' \code{ylab}, \code{col.violin}, \code{col.boxplot}, \code{mtext}, \code{cex}, \code{mtext}
+#'
+#' \bold{\code{Valid summary keywords}}\cr
+#'
+#' 'n', 'mean', 'median', 'sd.abs', 'sd.rel', 'se.abs', 'se.rel', 'skewness', 'kurtosis'
+#'
+#' @param data \code{\link{numeric}} or \code{\linkS4class{RLum.Results}}
+#' object (required): input data for plotting. Alternatively a \code{\link{data.frame}} or
+#' a \code{\link{matrix}} can be provided, but only the first column will be considered by the
+#' function
+#'
+#' @param boxplot \code{\link{logical}} (with default): enable or disable boxplot
+#'
+#' @param rug \code{\link{logical}} (with default): enable or disable rug
+#'
+#' @param summary \code{\link{character}} (optional): add statistic measures of
+#' centrality and dispersion to the plot. Can be one or more of several
+#' keywords. See details for available keywords.
+#'
+#' @param summary.pos \code{\link{numeric}} or \code{\link{character}} (with
+#' default): optional position keywords (cf., \code{\link{legend}})
+#' for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+#' specified to place the summary below the plot header. However, this latter
+#' option in only possible if \code{mtext} is not used.
+#'
+#' @param na.rm \code{\link{logical}} (with default): exclude NA values
+#' from the data set prior to any further operations.
+#'
+#' @param \dots further arguments and graphical parameters passed to
+#' \code{\link{plot.default}}, \code{\link[stats]{density}} and \code{\link{boxplot}}. See details for
+#' further information
+#'
+#' @note Although the code for this function was developed independently and just the idea for the plot
+#' was based on the 'ggplot2' package plot type 'volcano', it should be mentioned that, beyond this,
+#' two other R packages exist providing a possibility to produces this kind of plot, namely:
+#' 'vioplot' and 'violinmplot' (see References for details).
+#'
+#' @section Function version: 0.1.2
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @references
+#'
+#' Daniel Adler (2005). vioplot: A violin plot is a combination of a box plot and a kernel density plot.
+#' R package version 0.2 http://CRAN.R-project.org/package=violplot
+#'
+#' Hintze, J.L., Nelson, R.D., 1998. A Box Plot-Density Trace Synergism. The American Statistician 52, 181-184.
+#'
+#' Raphael W. Majeed (2012). violinmplot: Combination of violin plot with mean and standard deviation.
+#' R package version 0.2.1. http://CRAN.R-project.org/package=violinmplot
+#'
+#' Wickham. H (2009). ggplot2: elegant graphics for data analysis. Springer New York.
+#'
+#' @seealso \code{\link[stats]{density}}, \code{\link{plot}}, \code{\link{boxplot}}, \code{\link{rug}},
+#' \code{\link{calc_Statistics}}
+#'
+#' @examples
+#' ## read example data set
+#' data(ExampleData.DeValues, envir = environment())
+#' ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+#'
+#' ## create plot straightforward
+#' plot_ViolinPlot(data = ExampleData.DeValues)
+#'
+#' @export
+plot_ViolinPlot <- function(
+  data,
+  boxplot = TRUE,
+  rug = TRUE,
+  summary = NULL,
+  summary.pos = "sub",
+  na.rm = TRUE,
+  ...
+) {
+
+
+  # Integrity tests and conversion --------------------------------------------------------------
+
+    ##Prechecks
+
+    if(missing(data)){
+      stop("[plot_ViolinPlot()] I don't know what to do, data input needed." )
+
+    }else{
+
+      ##check for RLum.Results object
+      if(is(data, "RLum.Results")){
+        data <- get_RLum(data, "data")
+
+      }
+
+      ##if data.frame or matrix
+      if(is(data, "data.frame") | is(data, "matrix")){
+        data <- data[,1]
+
+      }
+
+    }
+
+    ##Remove NA values
+    if(na.rm){
+      data <- na.exclude(data)
+
+      warning(paste("[plot_ViolinPlot()]",
+        length(attr(x = na.exclude(c(NA,1,2, NA)), which = "na.action", exact = TRUE))), " NA values removed!", call. = FALSE)
+
+    }
+
+    #Further checks
+    if(!is(summary.pos, "character")){
+      stop("[plot_ViolinPlot()] argument 'summary.pos' needs to be of type character!")
+
+    }
+
+  ##stop if only one or 0 values are left in data
+  if(length(data) == 0){
+    warning("[plot_ViolinePlot()] Actually it is rather hard to plot 0 values. NULL returned", call. = FALSE)
+    return()
+  }
+
+  # Pre-calculations ----------------------------------------------------------------------------
+
+
+  ##density for the violin
+  if(length(data)>1){
+    density <-
+      density(x = data,
+              bw = ifelse("bw" %in% names(list(...)),list(...)$bw,"nrd0"))
+
+  }else{
+    density <- NULL
+    warning("[plot_ViolinePlot()] single data point found, no density calculated.", call. = FALSE)
+
+  }
+
+
+  ##some statistical parameter, get rid of the weighted statistics
+  stat.summary <- suppressWarnings(calc_Statistics(as.data.frame(data), digits = 2)[["unweighted"]])
+
+    ##make valid summary string
+    if(is.null(summary)){
+      summary <- c("n","median")
+
+    }
+
+    ##at least show a warning for invalid keywords
+    if(!all(summary %in% names(stat.summary))){
+      warning(paste0("[plot_ViolinePlot()] At least one 'summary' keyword is invalid. Valid keywords are: ",
+                     paste(names(stat.summary), collapse = ", ")), call. = FALSE)
+    }
+
+    ##make sure that only valid keywords make it
+    summary <- summary[(summary %in% names(stat.summary))]
+
+    stat.text <-
+      paste(names(stat.summary[summary]), " = ", stat.summary[summary], collapse = " \n")
+
+    stat.mtext <-
+      paste(names(stat.summary[summary]), " = ", stat.summary[summary], collapse = " | ")
+
+
+
+
+
+  # Plot settings -------------------------------------------------------------------------------
+
+  ##set default values
+  plot.settings <- list(
+    xlim = if(!is.null(density)){range(density$x)}else{c(data[1]*0.9, data[1]*1.1)},
+    main = "Violin Plot",
+    xlab = expression(paste(D[e], " [a.u.]")),
+    ylab = if(!is.null(density)){"Density"}else{" "},
+    col.violin = rgb(0,0,0,0.2),
+    col.boxplot = NULL,
+    mtext = ifelse(summary.pos != 'sub', "", stat.mtext),
+    cex = 1
+  )
+
+  ##modify list accordingly
+  plot.settings <- modifyList(plot.settings, val = list(...))
+
+
+  # Plot ----------------------------------------------------------------------------------------
+
+  ##open empty plot area
+  plot(
+    NA,NA,
+    xlim = plot.settings$xlim,
+    ylim = c(0.2,1.8),
+    xlab = plot.settings$xlab,
+    ylab = plot.settings$ylab,
+    yaxt = "n",
+    main = plot.settings$main,
+    cex = plot.settings$cex
+  )
+
+  ##add polygon ... the violin
+  if(!is.null(density)){
+    polygon(
+      x = c(density$x, rev(density$x)),
+      y = c(1 + density$y / max(density$y) * 0.5,
+            rev(1 - density$y / max(density$y) * 0.5)),
+      col = plot.settings$col.violin,
+      border = plot.settings$col.violin
+    )
+
+
+  }
+
+  ##add the boxplot
+  if(boxplot){
+    boxplot(
+      data,
+      outline = TRUE,
+      boxwex = 0.4,
+      horizontal = TRUE,
+      axes = FALSE,
+      add = TRUE,
+      col = plot.settings$col.boxplot
+    )
+
+  }
+
+  ##add rug
+  if(rug){
+    rug(x = data)
+
+  }
+
+  ##add mtext
+  if(!is.null(plot.settings$mtext)){
+    mtext(side = 3, text = plot.settings$mtext)
+
+  }
+
+  ##add stat.text
+  if (summary.pos != "sub") {
+
+    valid_keywords <-
+      c(
+        "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right", "center"
+      )
+
+    if (any(
+      summary.pos %in% valid_keywords
+    )) {
+      legend(summary.pos, legend = stat.text, bty = "n")
+
+    }else{
+      warning_text <- paste0("Value provided for 'summary.pos' is not a valid keyword, valid keywords are:",
+                             paste(valid_keywords, collapse = ", "))
+      warning(warning_text)
+
+    }
+
+  }
+
+}
diff --git a/R/read_BIN2R.R b/R/read_BIN2R.R
new file mode 100644
index 0000000..52beeb9
--- /dev/null
+++ b/R/read_BIN2R.R
@@ -0,0 +1,1472 @@
+#' Import Risoe BIN-file into R
+#'
+#' Import a *.bin or a *.binx file produced by a Risoe DA15 and DA20 TL/OSL
+#' reader into R.
+#'
+#' The binary data file is parsed byte by byte following the data structure
+#' published in the Appendices of the Analyst manual p. 42.\cr\cr For the
+#' general BIN-file structure, the reader is referred to the Risoe website:
+#' \code{http://www.nutech.dtu.dk/}
+#'
+#' @param file \code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
+#' BIN/BINX file. If input is a \code{list} it should comprise only \code{character}s representing
+#' each valid path and BIN/BINX-file names.
+#' Alternatively the input character can be just a directory (path), in this case the
+#' the function tries to detect and import all BIN/BINX files found in the directory.
+#'
+#' @param show.raw.values \link{logical} (with default): shows raw values from
+#' BIN file for \code{LTYPE}, \code{DTYPE} and \code{LIGHTSOURCE} without
+#' translation in characters. Can be provided as \code{list} if \code{file} is a \code{list}.
+#'
+#' @param n.records \link{raw} (optional): limits the number of imported
+#' records. Can be used in combination with \code{show.record.number} for
+#' debugging purposes, e.g. corrupt BIN-files. Can be provided as \code{list} if \code{file} is a \code{list}.
+#'
+#' @param zero_data.rm \code{\link{logical}} (with default): remove erroneous data with no count
+#' values. As such data are usally not needed for the subsequent data analysis they will be removed
+#' by default. Can be provided as \code{list} if \code{file} is a \code{list}.
+#'
+#' @param duplicated.rm \code{\link{logical}} (with default): remove duplicated entries if \code{TRUE}.
+#' This may happen due to an erroneous produced BIN/BINX-file. This option compares only
+#' predeccessor and successor. Can be provided as \code{list} if \code{file} is a \code{list}.
+#'
+#' @param position \code{\link{numeric}} (optional): imports only the selected position. Note:
+#' the import performance will not benefit by any selection made here.
+#' Can be provided as \code{list} if \code{file} is a \code{list}.
+#'
+#' @param fastForward \code{\link{logical}} (with default): if \code{TRUE} for a
+#' more efficient data processing only a list of \code{RLum.Analysis} objects is returned instead
+#' of a \link{Risoe.BINfileData-class} object. Can be provided as \code{list} if \code{file} is a \code{list}.
+#'
+#' @param show.record.number \link{logical} (with default): shows record number
+#' of the imported record, for debugging usage only. Can be provided as \code{list} if \code{file} is a \code{list}.
+#'
+#' @param txtProgressBar \link{logical} (with default): enables or disables
+#' \code{\link{txtProgressBar}}.
+#'
+#' @param forced.VersionNumber \code{\link{integer}} (optional): allows to cheat the
+#' version number check in the function by own values for cases where the
+#' BIN-file version is not supported. Can be provided as \code{list} if \code{file} is a \code{list}.\cr
+#' Note: The usage is at own risk, only supported BIN-file versions have been tested.
+#'
+#' @param pattern \code{\link{character}} (optional): argument that is used if only a path is provided.
+#' The argument will than be passed to the function \code{\link{list.files}} used internally to
+#' construct a \code{list} of wanted files
+#'
+#' @param verbose \code{\link{logical}} (with default): enables or disables verbose mode
+#'
+#' @param \dots further arguments that will be passed to the function
+#' \code{\link{Risoe.BINfileData2RLum.Analysis}}. Please note that any matching argument
+#' automatically sets \code{fastForward = TRUE}
+#'
+#' @return Returns an S4 \link{Risoe.BINfileData-class} object containing two
+#' slots:\cr \item{METADATA}{A \link{data.frame} containing all variables
+#' stored in the bin-file.} \item{DATA}{A \link{list} containing a numeric
+#' \link{vector} of the measured data. The ID corresponds to the record ID in
+#' METADATA.}\cr
+#'
+#' If \code{fastForward = TRUE} a list of \code{\linkS4class{RLum.Analysis}} object is returned. The
+#' internal coercing is done using the function \code{\link{Risoe.BINfileData2RLum.Analysis}}
+#'
+#'
+#' @note The function works for BIN/BINX-format versions 03, 04, 06, 07 and 08. The
+#' version number depends on the used Sequence Editor.\cr\cr
+#'
+#' \bold{ROI data sets introduced with BIN-file version 8 are not supported and skipped durint
+#' import.}
+#'
+#'
+#' @section Function version: 0.15.0
+#'
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France), Margret C. Fuchs, HZDR Freiberg, (Germany)
+#'
+#'
+#' @seealso \code{\link{write_R2BIN}}, \code{\linkS4class{Risoe.BINfileData}},
+#' \code{\link[base]{readBin}}, \code{\link{merge_Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}}
+#' \code{\link[utils]{txtProgressBar}}, \code{\link{list.files}}
+#'
+#'
+#' @references
+#' DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
+#' \url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+#'
+#'
+#' @keywords IO
+#'
+#'
+#' @examples
+#'
+#'
+#' ##(1) import Risoe BIN-file to R (uncomment for usage)
+#'
+#' #FILE <- file.choose()
+#' #temp <- read_BIN2R(FILE)
+#' #temp
+#'
+#' @export
+read_BIN2R <- function(
+  file,
+  show.raw.values = FALSE,
+  position = NULL,
+  n.records = NULL,
+  zero_data.rm = TRUE,
+  duplicated.rm = FALSE,
+  fastForward = FALSE,
+  show.record.number = FALSE,
+  txtProgressBar = TRUE,
+  forced.VersionNumber = NULL,
+  pattern = NULL,
+  verbose = TRUE,
+  ...
+){
+
+  # Self Call -----------------------------------------------------------------------------------
+  # Option (a): Input is a list, every element in the list will be treated as file connection
+  # with that many file can be read in at the same time
+  # Option (b): The input is just a path, the function tries to grep ALL BIN/BINX files in the
+  # directory and import them, if this is detected, we proceed as list
+
+  if (is(file, "character")) {
+    if (is.null(pattern)) {
+      ##If this is not really a path we skip this here
+      if (dir.exists(file) & length(dir(file)) > 0) {
+        if (verbose) {
+          cat(
+            "[read_BIN2R()] Directory detected, trying to extract '*.bin'/'*.binx' files ...\n"
+          )
+        }
+        file <-
+          as.list(c(
+            paste0(file, dir(
+              file, recursive = FALSE, pattern = ".bin"
+            )),
+            paste0(file, dir(
+              file, recursive = FALSE, pattern = ".binx"
+            )),
+            paste0(file, dir(
+              file, recursive = FALSE, pattern = ".BIN"
+            )),
+            paste0(file, dir(
+              file, recursive = FALSE, pattern = ".BINX"
+            ))
+          ))
+
+      }
+
+
+    }else{
+      file <- as.list(list.files(file, pattern = pattern, full.names = TRUE, recursive = TRUE))
+
+
+    }
+
+  }
+
+  if (is(file, "list")) {
+
+    ##extend list of parameters
+
+    ##position
+    position <- if(is(position, "list")){
+      rep(position, length = length(file))
+
+    }else{
+      rep(list(position), length = length(file))
+
+    }
+
+    ##n.records
+    n.records <- if(is(n.records, "list")){
+      rep(n.records, length = length(file))
+
+    }else{
+      rep(list(n.records), length = length(file))
+
+    }
+
+    ##zero_data.rm
+    zero_data.rm<- if(is(zero_data.rm, "list")){
+      rep(zero_data.rm, length = length(file))
+
+    }else{
+      rep(list(zero_data.rm), length = length(file))
+
+    }
+
+    ##duplicated.rm
+    duplicated.rm <- if(is(duplicated.rm, "list")){
+      rep(duplicated.rm, length = length(file))
+
+    }else{
+      rep(list(duplicated.rm), length = length(file))
+
+    }
+
+    ## show.raw.values
+    show.raw.values <- if(is( show.raw.values, "list")){
+      rep( show.raw.values, length = length(file))
+
+    }else{
+      rep(list( show.raw.values), length = length(file))
+
+    }
+
+    ## show.record.number
+    show.record.number <- if(is(show.record.number, "list")){
+      rep(show.record.number, length = length(file))
+
+    }else{
+      rep(list(show.record.number), length = length(file))
+
+    }
+
+    ##forced.VersionNumber
+    forced.VersionNumber <- if(is(forced.VersionNumber, "list")){
+      rep(forced.VersionNumber, length = length(file))
+
+    }else{
+      rep(list(forced.VersionNumber), length = length(file))
+
+    }
+
+    temp.return <- lapply(1:length(file), function(x) {
+      temp <- read_BIN2R(
+        file = file[[x]],
+        fastForward = fastForward,
+        position = position[[x]],
+        n.records = n.records[[x]],
+        duplicated.rm = duplicated.rm[[x]],
+        show.raw.values =  show.raw.values[[x]],
+        show.record.number = show.record.number[[x]],
+        txtProgressBar = txtProgressBar,
+        forced.VersionNumber = forced.VersionNumber[[x]],
+        verbose = verbose,
+        ...
+      )
+
+    })
+
+    ##return
+    if (fastForward) {
+      return(unlist(temp.return, recursive = FALSE))
+
+    }else{
+      return(temp.return)
+
+    }
+
+  }
+
+
+
+  # Integrity checks ------------------------------------------------------
+
+  ##check if file exists
+  if(!file.exists(file)){
+
+    stop("[read_BIN2R()] File does not exists!")
+
+  }
+
+  ##check if file is a BIN or BINX file
+  if(!(TRUE%in%(c("BIN", "BINX", "bin", "binx")%in%tail(
+    unlist(strsplit(file, split = "\\.")), n = 1)))){
+
+    stop("[read_BIN2R()] Input is not a file or not of type 'BIN' or 'BINX'!")
+
+  }
+
+  # Config ------------------------------------------------------------------
+
+  ##set supported BIN format version
+  VERSION.supported <- as.raw(c(03, 04, 06, 07, 08))
+
+
+  # Short file parsing to get number of records -------------------------------------------------
+
+  #open connection
+  con<-file(file, "rb")
+
+  ##get information about file size
+  file.size <- file.info(file)
+
+  ##read data up to the end of con
+
+  ##set ID
+  temp.ID <- 0
+
+
+  ##start for BIN-file check up
+  while(length(temp.VERSION<-readBin(con, what="raw", 1, size=1, endian="litte"))>0) {
+
+     ##force version number
+    if(!is.null(forced.VersionNumber)){
+      temp.VERSION <- as.raw(forced.VersionNumber)
+    }
+
+    ##stop input if wrong VERSION
+    if((temp.VERSION%in%VERSION.supported) == FALSE){
+
+      if(temp.ID > 0){
+
+        if(is.null(n.records)){
+          warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. Import limited to the first ", temp.ID-1," records."))
+
+        }else{
+          warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. 'n.records' reset to ", temp.ID-1,"."))
+
+        }
+
+        ##set or reset n.records
+        n.records <- temp.ID-1
+        break()
+
+      }else{
+        ##show error message
+        error.text <- paste("[read_BIN2R()] BIN-format version (",temp.VERSION,") of this file is currently not supported! Supported version numbers are: ",paste(VERSION.supported,collapse=", "),".",sep="")
+
+        ##close connection
+        close(con)
+
+        ##show error
+        stop(error.text)
+
+      }
+
+    }
+
+    #empty byte position
+    EMPTY<-readBin(con, what="raw", 1, size=1, endian="litte")
+
+    if(temp.VERSION == 06 | temp.VERSION == 07 | temp.VERSION == 08){
+
+      ##GET record LENGTH
+      temp.LENGTH  <- readBin(con, what="int", 1, size=4, endian="little")
+
+      STEPPING <- readBin(con, what="raw", temp.LENGTH-6, size=1, endian="litte")
+
+    }else{
+
+      ##GET record LENGTH
+      temp.LENGTH  <- readBin(con, what="int", 1, size=2, endian="little")
+      STEPPING <- readBin(con, what="raw", temp.LENGTH-4, size=1, endian="litte")
+
+    }
+
+    temp.ID<-temp.ID+1
+
+    if(!is.null(n.records) && temp.ID == n.records){
+      break()
+
+    }
+
+  }
+
+  ##close con
+  close(con)
+
+  ##set n.records
+  if(is.null(n.records)){
+    n.records <- temp.ID
+
+  }
+  rm(temp.ID)
+
+
+
+# Set Lookup tables  --------------------------------------------------------------------------
+
+  ##LTYPE
+  LTYPE.lookup <- c(
+    "0" = "TL",
+    "1" = "OSL",
+    "2" = "IRSL",
+    "3" = "M-IR",
+    "4" = "M-VIS",
+    "5" = "TOL",
+    "6" = "TRPOSL",
+    "7" = "RIR",
+    "8" = "RBR",
+    "9" = "USER",
+    "10" = "POSL",
+    "11" = "SGOSL",
+    "12" = "RL",
+    "13" = "XRF"
+  )
+
+  ##DTYPE
+  DTYPE.lookup <-
+    c(
+      "0" = "Natural",
+      "1" = "N+dose",
+      "2" = "Bleach",
+      "3" = "Bleach+dose",
+      "4" = "Natural (Bleach)",
+      "5" = "N+dose (Bleach)",
+      "6" = "Dose",
+      "7" = "Background"
+    )
+
+  ##LIGHTSOURCE
+  LIGHTSOURCE.lookup <- c(
+    "0" = "None",
+    "1" = "Lamp",
+    "2" = "IR diodes/IR Laser",
+    "3" = "Calibration LED",
+    "4" = "Blue Diodes",
+    "5" = "White light",
+    "6" = "Green laser (single grain)",
+    "7" = "IR laser (single grain)"
+  )
+
+
+  ##PRESET VALUES
+  temp.CURVENO <- NA
+  temp.FNAME <- NA
+  temp.MEASTEMP <- NA
+  temp.IRR_UNIT <- NA
+  temp.IRR_DOSERATE <- NA
+  temp.IRR_DOSERATEERR <- NA
+  temp.TIMESINCEIRR <- NA
+  temp.TIMETICK <- NA
+  temp.ONTIME <- NA
+  temp.OFFTIME <- NA
+  temp.STIMPERIOD <- NA
+  temp.GATE_ENABLED <- raw(length = 1)
+  temp.ENABLE_FLAGS <- raw(length = 1)
+  temp.GATE_START <- NA
+  temp.GATE_STOP <- NA
+  temp.GATE_END <- NA
+  temp.PTENABLED <- raw(length = 1)
+  temp.DTENABLED <- raw(length = 1)
+  temp.DEADTIME <- NA
+  temp.MAXLPOWER <- NA
+  temp.XRF_ACQTIME <- NA
+  temp.XRF_HV <- NA
+  temp.XRF_CURR <- NA
+  temp.XRF_DEADTIMEF <- NA
+  temp.DETECTOR_ID <- NA
+  temp.LOWERFILTER_ID <- NA
+  temp.UPPERFILTER_ID <- NA
+  temp.ENOISEFACTOR <- NA
+  temp.SEQUENCE <- NA
+  temp.GRAIN <- NA
+  temp.GRAINNUMBER <- NA
+  temp.LIGHTPOWER <- NA
+  temp.LPOWER <- NA
+  temp.RECTYPE <- 0
+  temp.MARKPOS_X1 <- NA
+  temp.MARKPOS_Y1 <- NA
+  temp.MARKPOS_X2 <- NA
+  temp.MARKPOS_Y2 <- NA
+  temp.MARKPOS_X3 <- NA
+  temp.MARKPOS_Y3 <- NA
+  temp.EXTR_START <- NA
+  temp.EXTR_END <- NA
+
+  ##SET length of entire record
+  n.length <- n.records
+
+  ##initialise data.frame
+  results.METADATA <- data.table::data.table(
+
+    ID = integer(length = n.length),
+    SEL = logical(length = n.length),
+    VERSION = numeric(length = n.length),
+    LENGTH = integer(length = n.length),
+    PREVIOUS = integer(length = n.length),
+    NPOINTS = integer(length = n.length),
+    RECTYPE = integer(length = n.length),
+
+    RUN = integer(length = n.length),
+    SET = integer(length = n.length),
+    POSITION = integer(length = n.length),
+    GRAIN = integer(length = n.length),
+    GRAINNUMBER = integer(length = n.length),
+    CURVENO = integer(length = n.length),
+    XCOORD = integer(length = n.length),
+    YCOORD = integer(length = n.length),
+    SAMPLE = character(length = n.length),
+    COMMENT = character(length = n.length),
+
+    SYSTEMID = integer(length = n.length),
+    FNAME = character(length = n.length),
+    USER = character(length = n.length),
+    TIME = character(length = n.length),
+    DATE = character(length = n.length),
+
+    DTYPE = character(length = n.length),
+    BL_TIME = numeric(length = n.length),
+    BL_UNIT = integer(length = n.length),
+    NORM1 = numeric(length = n.length),
+    NORM2 = numeric(length = n.length),
+    NORM3 = numeric(length = n.length),
+    BG = numeric(length = n.length),
+    SHIFT = integer(length = n.length),
+    TAG = integer(length = n.length),
+
+    LTYPE = character(length = n.length),
+    LIGHTSOURCE = character(length = n.length),
+    LPOWER = numeric(length = n.length),
+    LIGHTPOWER = numeric(length = n.length),
+    LOW = numeric(length = n.length),
+    HIGH = numeric(length = n.length),
+    RATE = numeric(length = n.length),
+    TEMPERATURE = numeric(length = n.length),
+    MEASTEMP = numeric(length = n.length),
+    AN_TEMP = numeric(length = n.length),
+    AN_TIME = numeric(length = n.length),
+    TOLDELAY = integer(length = n.length),
+    TOLON = integer(length = n.length),
+    TOLOFF = integer(length = n.length),
+    IRR_TIME = numeric(length = n.length),
+    IRR_TYPE = integer(length = n.length),
+    IRR_UNIT = integer(length = n.length),
+    IRR_DOSERATE = numeric(length = n.length),
+    IRR_DOSERATEERR = numeric(length = n.length),
+    TIMESINCEIRR = numeric(length = n.length),
+    TIMETICK = numeric(length = n.length),
+    ONTIME = numeric(length = n.length),
+    OFFTIME = numeric(length = n.length),
+    STIMPERIOD = integer(length = n.length),
+    GATE_ENABLED = numeric(length = n.length),
+    ENABLE_FLAGS = numeric(length = n.length),
+    GATE_START  = numeric(length = n.length),
+    GATE_STOP = numeric(length = n.length),
+    PTENABLED = numeric(length = n.length),
+    DTENABLED = numeric(length = n.length),
+    DEADTIME = numeric(length = n.length),
+    MAXLPOWER = numeric(length = n.length),
+    XRF_ACQTIME = numeric(length = n.length),
+    XRF_HV = numeric(length = n.length),
+    XRF_CURR = numeric(length = n.length),
+    XRF_DEADTIMEF = numeric(length = n.length),
+
+    DETECTOR_ID = integer(length = n.length),
+    LOWERFILTER_ID = integer(length = n.length),
+    UPPERFILTER_ID = integer(length = n.length),
+    ENOISEFACTOR = numeric(length = n.length),
+    MARKPOS_X1 = numeric(length = n.length),
+    MARKPOS_Y1 = numeric(length = n.length),
+    MARKPOS_X2 = numeric(length = n.length),
+    MARKPOS_Y2 = numeric(length = n.length),
+    MARKPOS_X3 = numeric(length = n.length),
+    MARKPOS_Y3 = numeric(length = n.length),
+    EXTR_START = numeric(length = n.length),
+    EXTR_END = numeric(length = n.length),
+
+    SEQUENCE = character(length = n.length)
+
+  ) #end set data table
+
+
+  #set variable for DPOINTS handling
+  results.DATA<-list()
+
+  ##set list for RESERVED values
+  results.RESERVED <- rep(list(list()), n.length)
+
+  # Open Connection ---------------------------------------------------------
+
+  ##show warning if version number check has been cheated
+
+  if(!is.null(forced.VersionNumber)){
+    warning("Argument 'forced.VersionNumber' has been used. BIN-file version might be not supported!")
+  }
+
+  #open connection
+  con<-file(file, "rb")
+
+  ##get information about file size
+  file.size<-file.info(file)
+
+  ##output
+  if(verbose){cat(paste("\n[read_BIN2R()]\n\t >> ",file,sep=""), fill=TRUE)}
+
+  ##set progressbar
+  if(txtProgressBar & verbose){
+    pb<-txtProgressBar(min=0,max=file.size$size, char="=", style=3)
+  }
+
+  ##read data up to the end of con
+
+  ##set ID
+  temp.ID <- 0
+
+
+  # LOOP --------------------------------------------------------------------
+
+  ##start loop for import BIN data
+  while(length(temp.VERSION<-readBin(con, what="raw", 1, size=1, endian="litte"))>0) {
+
+    ##force version number
+    if(!is.null(forced.VersionNumber)){
+      temp.VERSION <- as.raw(forced.VersionNumber)
+    }
+
+    ##stop input if wrong VERSION
+    if((temp.VERSION%in%VERSION.supported) == FALSE){
+
+      ##close connection
+      close(con)
+
+      ##show error message
+      error.text <- paste("[read_BIN2R()] BIN-format version (",temp.VERSION,") of this file is currently not supported! Supported version numbers are: ",paste(VERSION.supported,collapse=", "),".",sep="")
+
+      stop(error.text)
+
+    }
+
+    ##print record ID for debugging purposes
+    if(verbose){
+      if(show.record.number == TRUE){
+
+
+
+        cat(temp.ID,",", sep = "")
+        if(temp.ID%%10==0){
+          cat("\n")
+        }
+      }
+   }
+
+
+    #empty byte position
+    EMPTY<-readBin(con, what="raw", 1, size=1, endian="litte")
+
+    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    # BINX FORMAT SUPPORT -----------------------------------------------------
+    if(temp.VERSION == 06 | temp.VERSION == 07 | temp.VERSION == 08){
+
+      ##(1) Header size and strucutre
+      ##LENGTH, PREVIOUS, NPOINTS, LTYPE
+      temp <- readBin(con, what="int", 3, size=4, endian="little")
+
+      temp.LENGTH <- temp[1]
+      temp.PREVIOUS <- temp[2]
+      temp.NPOINTS <- temp[3]
+
+      ##for temp.VERSION == 08
+      ##RECTYPE
+      if(temp.VERSION == 08){
+        temp.RECTYPE <- readBin(con, what="int", 1, size=1, endian="little", signed = FALSE)
+        if(temp.RECTYPE == 128){
+          STEPPING<-readBin(con, what="raw", temp.LENGTH)
+
+          warning("[read_BIN2R()] ROI definition in data set detected, but currently not supported, skipped!", call. = FALSE)
+
+          next()
+        }
+      }
+
+
+      ##(2) Sample characteristics
+      ##RUN, SET, POSITION, GRAINNUMBER, CURVENO, XCOORD, YCOORD
+      temp <- readBin(con, what="int", 7, size=2, endian="little")
+
+      temp.RUN <- temp[1]
+      temp.SET <- temp[2]
+      temp.POSITION <- temp[3]
+      temp.GRAINNUMBER <- temp[4]
+      temp.CURVENO <- temp[5]
+      temp.XCOORD <- temp[6]
+      temp.YCOORD <- temp[7]
+
+      ##SAMPLE, COMMENT
+      ##SAMPLE
+      SAMPLE_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+      temp.SAMPLE<-readChar(con, SAMPLE_SIZE, useBytes=TRUE)
+      #however it should be set to 20
+
+      #step forward in con
+      if(20-c(SAMPLE_SIZE)>0){
+        STEPPING<-readBin(con, what="raw", (20-c(SAMPLE_SIZE)),
+                          size=1, endian="little")
+      }
+
+      ##COMMENT
+      COMMENT_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+      temp.COMMENT<-readChar(con, COMMENT_SIZE, useBytes=TRUE) #set to 80 (manual)
+
+      #step forward in con
+      if(80-c(COMMENT_SIZE)>0){
+        STEPPING<-readBin(con, what="raw", (80-c(COMMENT_SIZE)),
+                          size=1, endian="little")
+      }
+
+      ##(3) Instrument and sequence characteristic
+      ##SYSTEMID
+      temp.SYSTEMID <- readBin(con, what="int", 1, size=2, endian="little")
+
+      ##FNAME
+      FNAME_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##correct for 0 file name length
+      if(length(FNAME_SIZE)>0){
+        temp.FNAME<-readChar(con, FNAME_SIZE, useBytes=TRUE) #set to 100 (manual)
+      }else{
+        FNAME_SIZE <- 0
+      }
+
+      #step forward in con
+      if(100-c(FNAME_SIZE)>0){
+        STEPPING<-readBin(con, what="raw", (100-c(FNAME_SIZE)),
+                          size=1, endian="little")
+      }
+
+      ##USER
+      USER_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##correct for 0 user size length
+      if (length(USER_SIZE) > 0) {
+        temp.USER <-
+          readChar(con, USER_SIZE, useBytes = TRUE) #set to 30 (manual)
+      }else{
+        USER_SIZE <- 0
+
+      }
+
+      #step forward in con
+      if(30-c(USER_SIZE)>0){
+        STEPPING<-readBin(con, what="raw", (30-c(USER_SIZE)),
+                          size=1, endian="little")
+      }
+
+      ##TIME
+      TIME_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##time size corrections for wrong time formats; set n to 6 for all values
+      ##accoording the handbook of Geoff Duller, 2007
+      if(length(TIME_SIZE)>0){
+        temp.TIME<-readChar(con, TIME_SIZE, useBytes=TRUE)
+      }else{
+        TIME_SIZE <- 0
+
+      }
+
+      if(6-TIME_SIZE>0){
+
+        STEPPING<-readBin(con, what="raw", (6-TIME_SIZE),
+                          size=1, endian="little")
+      }
+
+
+      ##DATE
+      DATE_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##date size corrections for wrong date formats; set n to 6 for all values
+      ##accoording the handbook of Geoff Duller, 2007
+      DATE_SIZE<-6
+      temp.DATE<-readChar(con, DATE_SIZE, useBytes=TRUE)
+
+
+      ##(4) Analysis
+
+      ##DTYPE
+      temp.DTYPE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##BL_TIME
+      temp.BL_TIME<-readBin(con, what="double", 1, size=4, endian="little")
+
+      ##BL_UNIT
+      temp.BL_UNIT<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##NORM1, NORM2, NORM3, BG
+      temp <- readBin(con, what="double", 4, size=4, endian="little")
+
+      temp.NORM1 <- temp[1]
+      temp.NORM2 <- temp[2]
+      temp.NORM3 <- temp[3]
+      temp.BG <- temp[4]
+
+      ##SHIFT
+      temp.SHIFT<- readBin(con, what="integer", 1, size=2, endian="little")
+
+      ##TAG
+      temp.TAG <- readBin(con, what="int", 1, size=1, endian="little")
+
+      ##RESERVED
+      temp.RESERVED1 <-readBin(con, what="raw", 20, size=1, endian="little")
+
+      ##(5) Measurement characteristics
+
+      ##LTYPE
+      temp.LTYPE <- readBin(con, what="int", 1, size=1, endian="little")
+
+      ##LTYPESOURCE
+      temp.LIGHTSOURCE <- readBin(con, what="int", 1, size=1, endian="little")
+
+      ##LIGHTPOWER, LOW, HIGH, RATE
+      temp <- readBin(con, what="double", 4, size=4, endian="little")
+
+      temp.LIGHTPOWER <- temp[1]
+      temp.LOW <- temp[2]
+      temp.HIGH <- temp[3]
+      temp.RATE <- temp[4]
+
+      ##TEMPERATURE
+      temp.TEMPERATURE <- readBin(con, what="int", 1, size=2, endian="little")
+
+      ##MEASTEMP
+      temp.MEASTEMP <- readBin(con, what="integer", 1, size=2, endian="little")
+
+      ##AN_TEMP
+      temp.AN_TEMP <- readBin(con, what="double", 1, size=4, endian="little")
+
+      ##AN_TIME
+      temp.AN_TIME <- readBin(con, what="double", 1, size=4, endian="little")
+
+      ##DELAY, ON, OFF
+      temp <- readBin(con, what="int", 3, size=2, endian="little")
+
+      temp.TOLDELAY <- temp[1]
+      temp.TOLON <- temp[2]
+      temp.TOLOFF <- temp[3]
+
+      ##IRR_TIME
+      temp.IRR_TIME <- readBin(con, what="double", 1, size=4, endian="little")
+
+      ##IRR_TYPE
+      temp.IRR_TYPE <- readBin(con, what="int", 1, size=1, endian="little")
+
+      ##IRR_DOSERATE
+      temp.IRR_DOSERATE <- readBin(con, what="double", 1, size=4, endian="little")
+
+      ##IRR_DOSERATEERR
+      temp.IRR_DOSERATEERR <- readBin(con, what="double", 1, size=4, endian="little")
+
+      ##TIMESINCEIRR
+      temp.TIMESINCEIRR <- readBin(con, what="integer", 1, size=4, endian="little")
+
+      ##TIMETICK
+      temp.TIMETICK <- readBin(con, what="double", 1, size=4, endian="little")
+
+      ##ONTIME
+      temp.ONTIME <- readBin(con, what="integer", 1, size=4, endian="little")
+
+      ##STIMPERIOD
+      temp.STIMPERIOD <- readBin(con, what="integer", 1, size=4, endian="little")
+
+      ##GATE_ENABLED
+      temp.GATE_ENABLED <- readBin(con, what="raw", 1, size=1, endian="little")
+
+      ##GATE_START
+      temp.GATE_START <- readBin(con, what="integer", 1, size=4, endian="little")
+
+      ##GATE_STOP
+      temp.GATE_STOP <- readBin(con, what="integer", 1, size=4, endian="little")
+
+      ##PTENABLED
+      temp.PTENABLED <- readBin(con, what="raw", 1, size=1, endian="little")
+
+      ##DTENABLED
+      temp.DTENABLED <- readBin(con, what="raw", 1, size=1, endian="little")
+
+      ##DEADTIME, MAXLPOWER, XRF_ACQTIME, XRF_HV
+      temp <- readBin(con, what="double", 4, size=4, endian="little")
+
+      temp.DEADTIME <- temp[1]
+      temp.MAXLPOWER <- temp[2]
+      temp.XRF_ACQTIME <- temp[3]
+      temp.XRF_HV <- temp[4]
+
+      ##XRF_CURR
+      temp.XRF_CURR <- readBin(con, what="integer", 1, size=4, endian="little")
+
+      ##XRF_DEADTIMEF
+      temp.XRF_DEADTIMEF <- readBin(con, what="double", 1, size=4, endian="little")
+
+      ###Account for differences between V6 and V7
+      if(temp.VERSION == 06){
+
+        ##RESERVED
+        temp.RESERVED2<-readBin(con, what="raw", 24, size=1, endian="little")
+
+
+      }else{
+
+        ##DETECTOR_ID
+        temp.DETECTOR_ID <- readBin(con, what="int", 1, size=1, endian="little")
+
+        ##LOWERFILTER_ID, UPPERFILTER_ID
+        temp <- readBin(con, what="int", 2, size=2, endian="little")
+
+        temp.LOWERFILTER_ID <- temp[1]
+        temp.UPPERFILTER_ID <- temp[2]
+
+        ##ENOISEFACTOR
+        temp.ENOISEFACTOR <- readBin(con, what="double", 1, size=4, endian="little")
+
+        ##CHECK FOR VERSION 08
+        if(temp.VERSION == 07){
+
+           ##RESERVED for version 07
+          temp.RESERVED2<-readBin(con, what="raw", 15, size=1, endian="little")
+
+        }else{
+
+          ##MARKER_POSITION
+          temp <- readBin(con, what="double", 6, size=4, endian="little")
+
+            temp.MARPOS_X1 <- temp[1]
+            temp.MARPOS_Y1 <- temp[2]
+            temp.MARPOS_X2 <- temp[3]
+            temp.MARPOS_Y2 <- temp[4]
+            temp.MARPOS_X3 <- temp[5]
+            temp.MARPOS_Y3 <- temp[6]
+
+
+          ###EXTR_START, EXTR_END
+          temp <- readBin(con, what="double", 2, size=4, endian="little")
+
+            temp.EXTR_START <- temp[1]
+            temp.EXTR_END <- temp[2]
+
+          temp.RESERVED2<-readBin(con, what="raw", 42, size=1, endian="little")
+
+        }
+
+
+      }
+
+      #DPOINTS
+      temp.DPOINTS<-readBin(con, what="integer", temp.NPOINTS, size=4, endian="little")
+
+    }else if(temp.VERSION == 04 | temp.VERSION == 03){
+      ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+      ##START BIN FILE FORMAT SUPPORT  (vers. 03 and 04)
+      ##LENGTH, PREVIOUS, NPOINTS, LTYPE
+
+      temp <- readBin(con, what="int", 3, size=2, endian="little")
+
+      temp.LENGTH <- temp[1]
+      temp.PREVIOUS <- temp[2]
+      temp.NPOINTS <- temp[3]
+
+      ##LTYPE
+      temp.LTYPE<-readBin(con, what="int", 1, size=1, endian="little")
+
+
+      ##LOW, HIGH, RATE
+      temp <- readBin(con, what="double", 3, size=4, endian="little")
+
+      temp.LOW <- temp[1]
+      temp.HIGH <- temp[2]
+      temp.RATE <- temp[3]
+
+
+      temp.TEMPERATURE<-readBin(con, what="integer", 1, size=2, endian="little")
+
+      ##XCOORD, YCOORD, TOLDELAY, TOLON, TOLOFF
+      temp <- readBin(con, what="integer", 5, size=2, endian="little")
+
+      temp.XCOORD <- temp[1]
+      temp.YCOORD <- temp[2]
+      temp.TOLDELAY <- temp[3]
+      temp.TOLON <- temp[4]
+      temp.TOLOFF <- temp[5]
+
+
+      ##POSITION
+      temp.POSITION<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##RUN
+      temp.RUN<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##TIME
+      TIME_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+
+
+      ##time size corrections for wrong time formats; set n to 6 for all values
+      ##accoording the handbook of Geoff Duller, 2007
+      TIME_SIZE<-6
+      temp.TIME<-readChar(con, TIME_SIZE, useBytes=TRUE)
+
+
+      ##DATE
+      DATE_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##date size corrections for wrong date formats; set n to 6 for all values
+      ##accoording the handbook of Geoff Duller, 2007
+      DATE_SIZE<-6
+      temp.DATE<-readChar(con, DATE_SIZE, useBytes=TRUE)
+
+
+      ##SEQUENCE
+      SEQUENCE_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+      temp.SEQUENCE<-readChar(con, SEQUENCE_SIZE, useBytes=TRUE)
+
+      #step forward in con
+      if(8-SEQUENCE_SIZE>0){
+        STEPPING<-readBin(con, what="raw", (8-c(SEQUENCE_SIZE)),size=1, endian="little")
+      }
+
+
+      ##USER
+      USER_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+      temp.USER<-readChar(con, USER_SIZE, useBytes=FALSE)
+
+      #step forward in con
+      if(8-c(USER_SIZE)>0){
+        STEPPING<-readBin(con, what="raw", (8-c(USER_SIZE)), size=1, endian="little")
+      }
+
+      ##DTYPE
+      temp.DTYPE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##IRR_TIME
+      temp.IRR_TIME<-readBin(con, what="double", 1, size=4, endian="little")
+
+      ##IRR_TYPE
+      temp.IRR_TYPE<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##IRR_UNIT
+      temp.IRR_UNIT<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##BL_TIME
+      temp.BL_TIME<-readBin(con, what="double", 1, size=4, endian="little")
+
+      ##BL_UNIT
+      temp.BL_UNIT<-readBin(con, what="int", 1, size=1, endian="little")
+
+      ##AN_TEMP, AN_TIME, NORM1, NORM2, NORM3, BG
+      temp <- readBin(con, what="double", 6, size=4, endian="little")
+
+      temp.AN_TEMP <- temp[1]
+      temp.AN_TIME <- temp[2]
+      temp.NORM1 <- temp[3]
+      temp.NORM2 <- temp[4]
+      temp.NORM3 <- temp[5]
+      temp.BG <- temp[6]
+
+      ##SHIFT
+      temp.SHIFT<-readBin(con, what="integer", 1, size=2, endian="little")
+
+      ##SAMPLE
+      SAMPLE_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+      temp.SAMPLE<-readChar(con, SAMPLE_SIZE, useBytes=TRUE) #however it should be set to 20
+
+      #step forward in con
+      if(20-c(SAMPLE_SIZE)>0){
+        STEPPING<-readBin(con, what="raw", (20-c(SAMPLE_SIZE)), size=1, endian="little")
+      }
+
+      ##COMMENT
+      COMMENT_SIZE<-readBin(con, what="int", 1, size=1, endian="little")
+      temp.COMMENT<-readChar(con, COMMENT_SIZE, useBytes=TRUE) #set to 80 (manual)
+
+      #step forward in con
+      if(80-c(COMMENT_SIZE)>0){
+        STEPPING<-readBin(con, what="raw", (80-c(COMMENT_SIZE)), size=1, endian="little")
+      }
+
+      ##LIGHTSOURCE, SET, TAG
+      temp <- readBin(con, what="int", 3, size=1, endian="little")
+
+      temp.LIGHTSOURCE <- temp[1]
+      temp.SET <- temp[2]
+      temp.TAG <- temp[3]
+
+
+      ##GRAIN
+      temp.GRAIN<-readBin(con, what="integer", 1, size=2, endian="little")
+
+      ##LPOWER
+      temp.LPOWER<-readBin(con, what="double", 1, size=4, endian="little")
+
+      ##SYSTEMID
+      temp.SYSTEMID<-readBin(con, what="integer", 1, size=2, endian="little")
+
+      ##Unfortunately an inconsitent BIN-file structure forces a differenciation ...
+      if(temp.VERSION == 03){
+
+        ##RESERVED
+        temp.RESERVED1<-readBin(con, what="raw", 36, size=1, endian="little")
+
+        ##ONTIME, OFFTIME
+        temp <- readBin(con, what="double", 2, size=4, endian="little")
+
+        temp.ONTIME <- temp[1]
+        temp.OFFTIME <- temp[2]
+
+
+        ##Enable flags  #GateEnabled for v 06
+        temp.ENABLE_FLAGS <- readBin(con, what="raw", 1, size=1, endian="little")
+        temp.GATE_ENABLED <- temp.ENABLE_FLAGS
+
+        ##ONGATEDELAY, OFFGATEDELAY
+        temp <- readBin(con, what="double", 2, size=4, endian="little")
+
+        temp.GATE_START <- temp[1]
+        temp.GATE_STOP <- temp[2]
+
+        ##RESERVED
+        temp.RESERVED2<-readBin(con, what="raw", 1, size=1, endian="little")
+
+
+      }else{
+
+        ##RESERVED
+        temp.RESERVED1<-readBin(con, what="raw", 20, size=1, endian="little")
+
+        ##CURVENO
+        temp.CURVENO <- readBin(con, what="integer", 1, size=2, endian="little")
+
+        ##TIMETICK
+        temp.TIMETICK <- readBin(con, what="double", 1, size=4, endian="little")
+
+        ##ONTIME, STIMPERIOD
+        temp <- readBin(con, what="integer", 2, size=4, endian="little")
+
+        temp.ONTIME <- temp[1]
+        temp.STIMPERIOD <- temp[2]
+
+        ##GATE_ENABLED
+        temp.GATE_ENABLED <- readBin(con, what="raw", 1, size=1, endian="little")
+
+        ##ONGATEDELAY, OFFGATEDELAY
+        temp <- readBin(con, what="double", 2, size=4, endian="little")
+
+        temp.GATE_START <- temp[1]
+        temp.GATE_END <- temp[2]
+        temp.GATE_STOP <- temp.GATE_END
+
+        ##PTENABLED
+        temp.PTENABLED <- readBin(con, what="raw", 1, size=1, endian="little")
+
+        ##RESERVED
+        temp.RESERVED2<-readBin(con, what="raw", 10, size=1, endian="little")
+
+      }
+
+      #DPOINTS
+      temp.DPOINTS<-readBin(con, what="integer", temp.NPOINTS, size=4, endian="little")
+
+
+    }else{
+
+      stop("[read_BIN2R()] Unsupported BIN/BINX-file version.")
+
+    }
+
+    #endif:format support
+    ##END BIN FILE FORMAT SUPPORT
+    ## ==========================================================================#
+
+    #SET UNIQUE ID
+    temp.ID <- temp.ID+1
+
+     ##update progress bar
+    if(txtProgressBar & verbose){
+      setTxtProgressBar(pb, seek(con,origin="current"))
+    }
+
+    ##set for equal values with different names
+    if(!is.na(temp.GRAINNUMBER)){temp.GRAIN <- temp.GRAINNUMBER}
+    if(!is.na(temp.GRAIN)){temp.GRAINNUMBER <- temp.GRAIN}
+
+    if(!is.na(temp.LIGHTPOWER)){temp.LPOWER <- temp.LIGHTPOWER}
+    if(!is.na(temp.LPOWER)){temp.LIGHTPOWER <- temp.LPOWER}
+
+    temp.SEL <- if(temp.TAG == 1){TRUE}else{FALSE}
+
+    ##replace values in the data.table with values
+    results.METADATA[temp.ID, `:=` (
+      ID = temp.ID,
+      SEL = temp.SEL,
+      VERSION = as.numeric(temp.VERSION),
+      LENGTH = temp.LENGTH,
+      PREVIOUS = temp.PREVIOUS,
+      NPOINTS = temp.NPOINTS,
+      RECTYPE = temp.RECTYPE,
+      RUN = temp.RUN,
+      SET = temp.SET,
+      POSITION = temp.POSITION,
+      GRAIN = temp.GRAIN,
+      GRAINNUMBER = temp.GRAINNUMBER,
+      CURVENO = temp.CURVENO,
+      XCOORD = temp.XCOORD,
+      YCOORD = temp.YCOORD,
+      SAMPLE = temp.SAMPLE,
+      COMMENT = temp.COMMENT,
+      SYSTEMID = temp.SYSTEMID,
+      FNAME = temp.FNAME,
+      USER = temp.USER,
+      TIME = temp.TIME,
+      DATE = temp.DATE,
+      DTYPE = as.character(temp.DTYPE),
+      BL_TIME = temp.BL_TIME,
+      BL_UNIT = temp.BL_UNIT,
+      NORM1 = temp.NORM1,
+      NORM2 = temp.NORM2,
+      NORM3 = temp.NORM3,
+      BG = temp.BG,
+      SHIFT = temp.SHIFT,
+      TAG = temp.TAG,
+      LTYPE = as.character(temp.LTYPE),
+      LIGHTSOURCE = as.character(temp.LIGHTSOURCE),
+      LPOWER = temp.LPOWER,
+      LIGHTPOWER = temp.LIGHTPOWER,
+      LOW = temp.LOW,
+      HIGH = temp.HIGH,
+      RATE = temp.RATE,
+      TEMPERATURE = temp.TEMPERATURE,
+      MEASTEMP = temp.MEASTEMP,
+      AN_TEMP = temp.AN_TEMP,
+      AN_TIME = temp.AN_TIME,
+      TOLDELAY = temp.TOLDELAY,
+      TOLON = temp.TOLON,
+      TOLOFF = temp.TOLOFF,
+      IRR_TIME = temp.IRR_TIME,
+      IRR_TYPE = temp.IRR_TYPE,
+      IRR_UNIT = temp.IRR_UNIT,
+      IRR_DOSERATE = temp.IRR_DOSERATE,
+      IRR_DOSERATEERR = temp.IRR_DOSERATEERR,
+      TIMESINCEIRR = temp.TIMESINCEIRR,
+      TIMETICK = temp.TIMETICK,
+      ONTIME = temp.ONTIME,
+      OFFTIME = temp.OFFTIME,
+      STIMPERIOD = temp.STIMPERIOD,
+      GATE_ENABLED = as.numeric(temp.GATE_ENABLED),
+      ENABLE_FLAGS = as.numeric(temp.ENABLE_FLAGS),
+      GATE_START = temp.GATE_START,
+      GATE_STOP = temp.GATE_STOP,
+      PTENABLED = as.numeric(temp.PTENABLED),
+      DTENABLED = as.numeric(temp.DTENABLED),
+      DEADTIME = temp.DEADTIME,
+      MAXLPOWER = temp.MAXLPOWER,
+      XRF_ACQTIME = temp.XRF_ACQTIME,
+      XRF_HV = temp.XRF_HV,
+      XRF_CURR = temp.XRF_CURR,
+      XRF_DEADTIMEF = temp.XRF_DEADTIMEF,
+      DETECTOR_ID = temp.DETECTOR_ID,
+      LOWERFILTER_ID = temp.LOWERFILTER_ID,
+      UPPERFILTER_ID = temp.UPPERFILTER_ID,
+      ENOISEFACTOR = temp.ENOISEFACTOR,
+      MARKPOS_X1 = temp.MARKPOS_X1,
+      MARKPOS_Y1 = temp.MARKPOS_Y1,
+      MARKPOS_X2 = temp.MARKPOS_X2,
+      MARKPOS_Y2 = temp.MARKPOS_Y2,
+      MARKPOS_X3 = temp.MARKPOS_X3,
+      MARKPOS_Y3 = temp.MARKPOS_Y3,
+      SEQUENCE = temp.SEQUENCE
+
+    )]
+
+    results.DATA[[temp.ID]] <- temp.DPOINTS
+
+    results.RESERVED[[temp.ID]][[1]] <- temp.RESERVED1
+    results.RESERVED[[temp.ID]][[2]] <- temp.RESERVED2
+
+    ##BREAK
+    ##stop loop if record limit is reached
+    if (!is.null(n.records)) {
+      if (n.records == temp.ID) {
+        break()
+      }
+
+    }
+
+    ##reset values
+    temp.GRAINNUMBER <- NA
+    temp.GRAIN <- NA
+
+
+  }#endwhile::end lopp
+
+  ##close con
+  close(con)
+
+  ##close
+  if(txtProgressBar & verbose){close(pb)}
+
+  ##output
+  if(verbose){cat(paste("\t >> ",temp.ID," records have been read successfully!\n\n", sep=""))}
+
+  # Further limitation --------------------------------------------------------------------------
+  if(!is.null(position)){
+
+    ##check whether the position is valid at all
+    if (all(position %in% results.METADATA[["POSITION"]])) {
+
+      results.METADATA <- results.METADATA[which(results.METADATA[["POSITION"]] %in% position),]
+      results.DATA <- results.DATA[results.METADATA[["ID"]]]
+
+        ##re-calculate ID ... otherwise it will not match
+        results.METADATA[["ID"]] <- 1:length(results.DATA )
+
+        ##show a message
+        message("[read_BIN2R()] The record index has been recalculated!")
+
+
+    }else{
+      valid.position <-
+        paste(unique(results.METADATA[["POSITION"]]), collapse = ", ")
+      warning(
+        paste0(
+          "Position limitation omitted. At least one position number is not valid, valid position numbers are: ", valid.position
+        )
+      )
+    }
+
+  }
+
+
+  ##check for position that have no data at all (error during the measurement)
+  if(zero_data.rm){
+    zero_data.check <- which(sapply(results.DATA, length) == 0)
+
+    ##remove records if there is something to remove
+    if(length(zero_data.check) != 0){
+      results.METADATA <- results.METADATA[-zero_data.check, ]
+      results.DATA[zero_data.check] <- NULL
+
+      ##recalculate record index
+      results.METADATA[["ID"]] <- 1:nrow(results.METADATA)
+
+      warning(
+        paste0(
+          "[read_BIN2R()] zero data records detected and removed: ",
+          paste(zero_data.check, collapse = ", "),
+          ". Record index re-calculated."
+        )
+      )
+
+    }
+
+  }
+
+  ##check for duplicated entries and remove them if wanted, but only if we have more than 2 records
+  if (n.records > 1) {
+    duplication.check <- suppressWarnings(which(c(
+      0, vapply(
+        2:length(results.DATA),
+        FUN = function(x) {
+          all(results.DATA[[x - 1]] == results.DATA[[x]])
+        },
+        FUN.VALUE = 1
+      )
+    ) == 1))
+
+    if (length(duplication.check) != 0) {
+      if (duplicated.rm) {
+        ##remove records
+        results.METADATA <- results.METADATA[-duplication.check, ]
+        results.DATA[duplication.check] <- NULL
+
+        ##recalculate record index
+        results.METADATA[["ID"]] <- 1:nrow(results.METADATA)
+
+        ##message
+        if(verbose) {
+          message(
+            paste0(
+              "[read_BIN2R()] duplicated record(s) detected and removed: ",
+              paste(duplication.check, collapse = ", "),
+              ". Record index re-calculated."
+            )
+          )
+        }
+
+      } else{
+        warning(
+          paste0(
+            "[read_BIN2R()] duplicated record(s) detected: ",
+            paste(duplication.check, collapse = ", "),
+            ". \n\n >> You should consider 'duplicated.rm = TRUE'."
+          )
+        )
+
+      }
+
+    }
+
+  }
+
+  ##produce S4 object for output
+  object <- set_Risoe.BINfileData(
+    METADATA = results.METADATA,
+    DATA = results.DATA,
+    .RESERVED =  results.RESERVED)
+
+  # Convert Translation Matrix Values ---------------------------------------
+
+  if (!show.raw.values) {
+    ##LIGHTSOURCE CONVERSION
+    object at METADATA[["LIGHTSOURCE"]] <-
+      unname(LIGHTSOURCE.lookup[object at METADATA[["LIGHTSOURCE"]]])
+
+    ##LTYPE CONVERSION
+    object at METADATA[["LTYPE"]] <-
+      unname(LTYPE.lookup[object at METADATA[["LTYPE"]]])
+
+    ##DTYPE CONVERSION
+    object at METADATA[["DTYPE"]] <-
+      unname(DTYPE.lookup[object at METADATA[["DTYPE"]]])
+
+        ##CHECK for oddly set LTYPES, this may happen in old BIN-file versions
+        if (object at METADATA[["VERSION"]][1] == 3) {
+          object at METADATA[["LTYPE"]] <-
+            sapply(1:length(object at METADATA[["LTYPE"]]), function(x) {
+              if (object at METADATA[["LTYPE"]][x] == "OSL" &
+                  object at METADATA[["LIGHTSOURCE"]][x] == "IR diodes/IR Laser") {
+                return("IRSL")
+
+              } else{
+                return(object at METADATA[["LTYPE"]][x])
+
+              }
+
+            })
+
+        }
+
+    ##TIME CONVERSION, do not do for odd time formats as this could cause problems during export
+    if (TIME_SIZE == 6) {
+      object at METADATA[["TIME"]] <-
+        format(strptime(as.character(object at METADATA[["TIME"]]), "%H%M%S"), "%H:%M:%S")
+
+    }
+
+  }
+
+  ## check for empty BIN-files names ... if so, set the name of the file as BIN-file name
+  ## This can happen if the user uses different equipment
+  if(all(is.na(object at METADATA[["FNAME"]]))){
+    object at METADATA[["FNAME"]] <- strsplit(x = basename(file), split = ".", fixed = TRUE)[[1]][1]
+
+
+  }
+
+
+  # Fast Forward --------------------------------------------------------------------------------
+  ## set fastForward to TRUE if one of this arguments is used
+  if(any(names(list(...)) %in% names(formals(Risoe.BINfileData2RLum.Analysis))[-1]) &
+     fastForward == FALSE) {
+    fastForward <- TRUE
+    warning("[read_BIN2R()] automatically reset 'fastForward = TRUE'")
+
+  }
+
+  ##return values
+  ##with fast fastForward they will be converted directly to a list of RLum.Analysis objects
+  if(fastForward){
+     object <- Risoe.BINfileData2RLum.Analysis(object, ...)
+
+
+     ##because we expect a list
+     if(!is(object, "list")){
+       object <- list(object)
+
+     }
+
+
+  }
+
+
+   return(object)
+
+
+}
diff --git a/R/read_Daybreak2R.R b/R/read_Daybreak2R.R
new file mode 100644
index 0000000..229dac3
--- /dev/null
+++ b/R/read_Daybreak2R.R
@@ -0,0 +1,261 @@
+#' Import Daybreak ASCII dato into R
+#'
+#' Import a *.txt (ASCII) file produced by a Daybreak reader into R.
+#'
+#' @param file \code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
+#' file to be imported. Alternatively a list of file names can be provided or just the path a folder
+#' containing measurement data. Please note that the specific, common, file extension (txt) is likely
+#' leading to function failures during import when just a path is provided.
+#'
+#' @param verbose \code{\link{logical}} (with default): enables or disables terminal feedback
+#'
+#' @param txtProgressBar \code{\link{logical}} (with default): enables or disables
+#' \code{\link{txtProgressBar}}.
+#'
+#' @return  A list of \code{\linkS4class{RLum.Analysis}} objects (each per position) is provided.
+#'
+#' @note \bold{[BETA VERSION]} This function version still needs to be properly tested.
+#'
+#' @section Function version: 0.2.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)\cr Based on a suggestion by Willian Amidon and Andrew Louis Gorin.
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}}
+#'
+#' @references -
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' ## This function has no example yet.
+#'
+#' @export
+read_Daybreak2R <- function(
+  file,
+  verbose = TRUE,
+  txtProgressBar = TRUE
+){
+
+  ##TODO
+  ## - run tests
+  ## - check where the warning messages are comming from
+  ## - implement further integegrity tests
+
+  # Self Call -----------------------------------------------------------------------------------
+  # Option (a): Input is a list, every element in the list will be treated as file connection
+  # with that many file can be read in at the same time
+  # Option (b): The input is just a path, the function tries to grep ALL Daybreaks-txt files in the
+  # directory and import them, if this is detected, we proceed as list
+
+  if(is(file, "character")) {
+
+    ##If this is not really a path we skip this here
+    if (dir.exists(file) & length(dir(file)) > 0) {
+      if(verbose){
+        cat("[read_Daybreak2R()] Directory detected, trying to extract '*.txt' files ...\n")
+      }
+
+      file <-
+        as.list(paste0(file,dir(
+          file, recursive = FALSE, pattern = ".txt"
+        )))
+
+    }
+
+  }
+
+  ##if the input is already a list
+  if (is(file, "list")) {
+    temp.return <- lapply(1:length(file), function(x) {
+      read_Daybreak2R(
+        file = file[[x]],
+        txtProgressBar = txtProgressBar
+      )
+    })
+
+    ##return
+      return(temp.return)
+
+  }
+
+
+
+  # Integrity checks ----------------------------------------------------------------------------
+
+  ##check if file exists
+  if(!file.exists(file)){
+    stop("[read_Daybreak2R()] file name doesn't seem to exist.")
+
+  }
+
+
+  # Read ASCII file -----------------------------------------------------------------------------
+
+  ##read file
+  file2read <- readLines(file)
+
+  ##(0) get rid off all the empty lines
+  file2read <- file2read[file2read != ""]
+
+  ##(1)
+  ##get all rows with the term "[NewRecord]" - that's what we are interested in and it defines
+  ##the number of elements we need
+  records.row_number <- grep(pattern = "\\[NewRecord\\]", x = file2read)
+
+  ##(1)
+  ##make a list ... this is not essentially needed but it makes things easier
+  data.list <- lapply(1:length(records.row_number), function(x) {
+
+    ##grep each element
+    if (!is.na(records.row_number[x + 1])) {
+      return(file2read[records.row_number[x]:(records.row_number[x + 1] - 1)])
+
+    }else{
+      return(file2read[records.row_number[x]:length(file2read)])
+
+    }
+
+  })
+
+    ##clear memory
+    rm(file2read)
+
+
+  ##TERMINAL FEEDBACK
+  if(verbose){
+    cat("\n[read_Daybreak2R()]")
+    cat(paste("\n >> Importing:", file[1],"\n"))
+  }
+
+  ##PROGRESS BAR
+  if(txtProgressBar & verbose){
+    pb <- txtProgressBar(min=0,max=length(data.list), char = "=", style=3)
+  }
+
+  ##(2)
+  ##Loop over the list to create RLum.Data.Curve objects
+  RLum.Data.Curve.list <- lapply(1:length(data.list), function(x){
+
+
+    ##get length of record
+    record.length <- length(data.list[[x]])
+
+    ##get header length until the argument 'Points'
+    header.length <- grep(pattern = "Points", x = data.list[[x]])
+
+    if(length(header.length)>0){
+      temp.meta_data <- unlist(strsplit(data.list[[x]][2:header.length], split = "=", fixed = TRUE))
+
+    }else{
+      temp.meta_data <- unlist(strsplit(data.list[[x]][2:length(data.list[[x]])], split = "=", fixed = TRUE))
+
+    }
+
+    ##get list names for the info element list
+    info.names <- temp.meta_data[seq(1,length(temp.meta_data), by = 2)]
+
+    ##info elements
+    info <- as.list(temp.meta_data[seq(2,length(temp.meta_data), by = 2)])
+    names(info) <- info.names
+
+    ##add position, which is 'Disk'
+    info <- c(info, position = as.integer(info$Disk))
+
+    if(length(header.length)>0){
+      ##get measurement data
+      temp.data <- unlist(strsplit(unlist(strsplit(
+        data.list[[x]][12:length(data.list[[x]])], split = "="
+      )), split = ";"))
+
+      ##grep only data of interest
+      point.x <-
+        suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(2, length(temp.data), by = 4)])))
+      point.y <-
+        suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(3,length(temp.data), by = 4)])))
+
+
+      ##combine it into a matrix
+      data <- matrix(c(point.x,point.y), ncol = 2)
+
+    }else{
+
+      ##we presume this should be irradiation ...
+      if ("IrradTime" %in% names(info)) {
+
+        point.x <- 1:as.numeric(info$IrradTime)
+        point.y <- rep(1, length(point.x))
+
+        data <- matrix(c(point.x,point.y), ncol = 2)
+
+      }
+
+    }
+
+    ##update progress bar
+    if (txtProgressBar & verbose) {
+      setTxtProgressBar(pb, x)
+    }
+
+    ##return RLum object
+    return(
+      set_RLum(
+        class = "RLum.Data.Curve",
+        originator = "read_Daybreak2R",
+        recordType = sub(" ", replacement = "_", x = info$DataType),
+        curveType = "measured",
+        data = data,
+        info = info
+      )
+    )
+
+  })
+
+  ##close ProgressBar
+  if(txtProgressBar & verbose){close(pb)}
+
+  ##(3)
+  ##Now we have to find out how many aliquots we do have
+  positions.id <-  sapply(RLum.Data.Curve.list, function(x){
+
+    get_RLum(x, info.object = "position")
+
+  })
+
+  ##(4)
+  ##now combine everyting in an RLum.Analysis object in accordance to the position number
+  RLum.Analysis.list <- lapply(unique(positions.id), function(x){
+
+    ##get list ids for position number
+    n <- which(positions.id == x)
+
+    ##make list
+    temp.list <- lapply(n, function(x){
+      RLum.Data.Curve.list[[x]]
+
+    })
+
+    ##put in RLum.Analysis object
+    object <- set_RLum(
+      class = "RLum.Analysis",
+      originator = "read_Daybreak2R",
+      protocol = "Custom",
+      records = temp.list
+    )
+
+    ##set parent id of records
+    object <- .set_pid(object)
+
+    return(object)
+
+
+  })
+
+  ##TERMINAL FEEDBACK
+  if(verbose){
+    cat(paste0("\n ",length(unlist(get_RLum(RLum.Analysis.list))), " records have been read sucessfully!\n"))
+  }
+
+  return(RLum.Analysis.list)
+}
diff --git a/R/read_SPE2R.R b/R/read_SPE2R.R
new file mode 100644
index 0000000..e1b3740
--- /dev/null
+++ b/R/read_SPE2R.R
@@ -0,0 +1,437 @@
+#' Import Princeton Intruments (TM) SPE-file into R
+#'
+#' Function imports Princeton Instruments (TM) SPE-files into R environment and
+#' provides \code{RLum} objects as output.
+#'
+#' Function provides an import routine for the Princton Instruments SPE format.
+#' Import functionality is based on the file format description provided by
+#' Princton Instruments and a MatLab script written by Carl Hall (s.
+#' references).
+#'
+#' @param file \link{character} (\bold{required}): spe-file name (including
+#' path), e.g. \cr [WIN]: \code{read_SPE2R("C:/Desktop/test.spe")}, \cr
+#' [MAC/LINUX]: \code{readSPER("/User/test/Desktop/test.spe")}
+#'
+#' @param output.object \code{\link{character}} (with default): set \code{RLum}
+#' output object.  Allowed types are \code{"RLum.Data.Spectrum"},
+#' \code{"RLum.Data.Image"} or \code{"matrix"}
+#'
+#' @param frame.range \code{\link{vector}} (optional): limit frame range, e.g.
+#' select first 100 frames by \code{frame.range = c(1,100)}
+#'
+#' @param txtProgressBar \link{logical} (with default): enables or disables
+#' \code{\link{txtProgressBar}}.
+#'
+#' @return Depending on the chosen option the functions returns three different
+#' type of objects:\cr
+#'
+#' \code{output.object}. \cr
+#'
+#' \code{RLum.Data.Spectrum}\cr
+#'
+#' An object of type \code{\linkS4class{RLum.Data.Spectrum}} is returned.  Row
+#' sums are used to integrate all counts over one channel.
+#'
+#' \code{RLum.Data.Image}\cr
+#'
+#' An object of type \code{\linkS4class{RLum.Data.Image}} is returned.  Due to
+#' performace reasons the import is aborted for files containing more than 100
+#' frames. This limitation can be overwritten manually by using the argument
+#' \code{frame.frange}.
+#'
+#' \code{matrix}\cr
+#'
+#' Returns a matrix of the form: Rows = Channels, columns = Frames. For the
+#' transformation the function \code{\link{get_RLum}} is used,
+#' meaning that the same results can be obtained by using the function
+#' \code{\link{get_RLum}} on an \code{RLum.Data.Spectrum} or \code{RLum.Data.Image} object.
+#' @note \bold{The function does not test whether the input data are spectra or
+#' pictures for spatial resolved analysis!}\cr
+#'
+#' The function has been successfully tested for SPE format versions 2.x.
+#'
+#' \emph{Currently not all information provided by the SPE format are
+#' supported.}
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso \code{\link{readBin}}, \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\link[raster]{raster}}
+#'
+#' @references Princeton Instruments, 2014. Princeton Instruments SPE 3.0 File
+#' Format Specification, Version 1.A,
+#' \url{ftp://ftp.princetoninstruments.com/Public/Manuals/Princeton\%20Instruments/SPE\%203.0\%20File\%20Format\%20Specification.pdf}
+#'
+#' Hall, C., 2012: readSPE.m.
+#' \url{http://www.mathworks.com/matlabcentral/fileexchange/35940-readspe/content/readSPE.m}
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#'
+#' ## to run examples uncomment lines and run the code
+#'
+#' ##(1) Import data as RLum.Data.Spectrum object
+#' #file <- file.choose()
+#' #temp <- read_SPE2R(file)
+#' #temp
+#'
+#' ##(2) Import data as RLum.Data.Image object
+#' #file <- file.choose()
+#' #temp <- read_SPE2R(file, output.object = "RLum.Data.Image")
+#' #temp
+#'
+#' ##(3) Import data as matrix object
+#' #file <- file.choose()
+#' #temp <- read_SPE2R(file, output.object = "matrix")
+#' #temp
+#'
+#' ##(4) Export raw data to csv, if temp is a RLum.Data.Spectrum object
+#' # write.table(x = get_RLum(temp),
+#' #             file = "[your path and filename]",
+#' #             sep = ";", row.names = FALSE)
+#'
+#'
+#' @export
+read_SPE2R <- function(
+  file,
+  output.object = "RLum.Data.Image",
+  frame.range,
+  txtProgressBar = TRUE
+){
+
+  # Consistency check -------------------------------------------------------
+
+  ##check if file exists
+  if(file.exists(file) == FALSE){
+
+    stop("[read_SPE2R()] File not found!")
+
+  }
+
+  ##check file extension
+  if(strsplit(file, split = "\\.")[[1]][2] != "SPE"){
+
+    temp.text <- paste("[read_SPE2R()] Unsupported file format: *.",
+                       strsplit(file, split = "\\.")[[1]][2], sep = "")
+
+    stop(temp.text)
+
+  }
+
+
+  # Open Connection ---------------------------------------------------------
+
+  #open connection
+  con<-file(file, "rb")
+
+  # read header -------------------------------------------------------------
+
+  temp <- readBin(con, what="int", 2, size=2, endian="little", signed = TRUE)
+  ControllerVersion <- temp[1] #Hardware version
+  LogicOutput <- temp[2] #Definition of Output BNC
+
+  temp <- readBin(con, what="int", 2, size=2, endian="little", signed = FALSE)
+  AmpHiCapLowNoise <- temp[1] #Amp Switching Mode
+  xDimDet <- temp[2] #Detector x dimension of chip.
+
+  #timing mode
+  mode <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE)
+
+  #alternative exposure, in sec.
+  exp_sec <- readBin(con, what="double", 1, size=4, endian="little")
+
+  temp <- readBin(con, what="int", 2, size=2, endian="little", signed = TRUE)
+  VChipXdim <- temp[1] # Virtual Chip X dim
+  VChipYdim <- temp[2] # Virtual Chip Y dim
+
+  #y dimension of CCD or detector.
+  yDimDet <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE)
+
+  #Date
+  Date <- readChar(con, 10, useBytes=TRUE)
+
+  ##jump
+  stepping <- readBin(con, what="raw", 4, size=1, endian="little", signed = TRUE)
+
+  #Old number of scans - should always be -1
+  noscan <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE)
+
+  #Detector Temperature Set
+  DetTemperature <- readBin(con, what="double", 1, size=4, endian="little")
+
+  # CCD/DiodeArray type
+  DetType <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE)
+
+  #actual # of pixels on x axis
+  xdim <- readBin(con, what="int", 1, size=2, endian="little", signed = FALSE)
+
+  ##jump
+  stepping <- readBin(con, what="raw", 64, size=1, endian="little", signed = TRUE)
+
+  ##experiment data type
+  ##0 = 32f (4 bytes)
+  ##1 = 32s (4 bytes)
+  ##3 = 16u (2 bytes)
+  ##8 = 32u (4 bytes)
+  datatype <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE)
+
+  ##jump
+  stepping <- readBin(con, what="raw", 546, size=1, endian="little")
+
+  #y dimension of raw data.
+  ydim <- readBin(con, what="int", 1, size=2, endian="little", signed = FALSE)
+
+  ##0=scrambled,1=unscrambled
+  scramble <- readBin(con, what="int", 1, size=2, endian="little", signed = FALSE)
+
+  ##jump
+  stepping <- readBin(con, what="raw", 4, size=1, endian="little")
+
+  #Number of scans (Early WinX)
+  lnoscan <- readBin(con, what="int", 1, size=4, endian="little", signed = TRUE)
+
+  #Number of Accumulations
+  lavgexp <- readBin(con, what="int", 1, size=4, endian="little", signed = TRUE)
+
+  ##Experiment readout time
+  ReadoutTime <- readBin(con, what="double", 1, size=4, endian="little")
+
+  #T/F Triggered Timing Option
+  TriggeredModeFlag <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE)
+
+  ##jump
+  stepping <- readBin(con, what="raw", 768, size=1, endian="little")
+
+  ##number of frames in file.
+  NumFrames <- readBin(con, what="int", 1, size=4, endian="little", signed = TRUE)
+
+  if(NumFrames > 100 & missing(frame.range) & output.object == "RLum.Data.Image"){
+
+    error.message <- paste0("[read_SPE2R()] Import aborted. This file containes > 100 (", NumFrames, "). Use argument 'frame.range' to force import.")
+    stop(error.message)
+
+  }
+
+  ##set frame.range
+  if(missing(frame.range) == TRUE){frame.range <- c(1,NumFrames)}
+
+  ##jump
+  stepping <- readBin(con, what="raw", 542, size=1, endian="little")
+
+  #file_header_ver
+  file_header_ver <- readBin(con, what="double", 1, size=4, endian="little")
+
+  ##jump
+  stepping <- readBin(con, what="raw", 1000, size=1, endian="little")
+
+  ##WinView_id - set to 19,088,743 (or 1234567 hex) (required for legacy reasons)
+  WinView_id <- readBin(con, what="integer", 1, size=4, endian="little", signed = TRUE)
+
+  ##jump
+  stepping <- readBin(con, what="raw", 1098, size=1, endian="little")
+
+  ##lastvalue - set to 21,845 (or 5555 hex) (required for legacy reasons)
+  lastvalue <- readBin(con, what="integer", 1, size=2, endian="little", signed = TRUE)
+
+
+  ##end header
+  ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+  ##create info element list from data
+  temp.info <- list(ControllerVersion,
+                    LogicOutput,
+                    AmpHiCapLowNoise,
+                    xDimDet, yDimDet,
+                    xdim, ydim,
+                    VChipXdim, VChipYdim,
+                    Date,
+                    noscan,
+                    mode,  exp_sec,
+                    DetTemperature,
+                    DetType,
+                    datatype,
+                    scramble,
+                    lnoscan,
+                    lavgexp,
+                    ReadoutTime,
+                    TriggeredModeFlag,
+                    NumFrames,
+                    file_header_ver)
+
+  ##set name for list elements
+  names(temp.info) <- c("ControllerVersion", "LogicOutput", "AmpHiCapLowNoise", "xDimDet", "yDimDet",
+                        "xdim", "ydim", "VChipXdim", "VChipYdim", "Date", "noscan", "mode", "exp_sec",
+                        "DetTemperature", "DetType", "datatype", "scramble", "lnoscan", "lavgexp",
+                        "ReadoutTime", "TriggeredModeFlag", "NumFrames", "file_header_ver")
+
+  # read count value data ---------------------------------------------------
+  ##set functions
+
+  if(datatype  == 0){
+
+    read.data <- function(n.counts){
+      readBin(con, what="double", n.counts, size=4, endian="little")
+    }
+
+  }else if(datatype == 1){
+
+    read.data <- function(n.counts){
+      readBin(con, what="integer", n.counts, size=4, endian="little", signed = TRUE)
+    }
+
+  }else if(datatype == 2){
+
+    read.data <- function(n.counts){
+      readBin(con, what="integer", n.counts, size=2, endian="little", signed = TRUE)
+    }
+
+  }else if(datatype == 3){
+
+    read.data <- function(n.counts){
+
+      readBin(con, what="int", n.counts, size=2, endian="little", signed = FALSE)
+
+    }
+
+  }else if(datatype == 8){
+
+    read.data <- function(n.counts){
+      readBin(con, what="integer", n.counts, size=4, endian="little", signed = FALSE)
+    }
+
+  }else{
+
+    stop("[read_SPE2R()] Unknown 'datatype'.")
+
+  }
+
+
+  ##loop over all frames
+  ##output
+  cat(paste("\n[read_SPE2R.R]\n\t >> ",file,sep=""), fill=TRUE)
+
+  ##set progressbar
+  if(txtProgressBar==TRUE){
+    pb<-txtProgressBar(min=0,max=diff(frame.range)+1, char="=", style=3)
+  }
+
+  ##stepping for frame range
+  temp <- readBin(con, what = "raw", (min(frame.range)-1)*2, size = 1, endian = "little")
+
+  for(i in 1:(diff(frame.range)+1)){#NumFrames
+
+    temp.data <- matrix(read.data(n.counts = (xdim * ydim)),
+                        ncol = ydim,
+                        nrow = xdim)
+
+    if(exists("data.list") == FALSE){
+
+      data.list <- list(temp.data)
+
+    }else{
+
+      data.list <- c(data.list, list(temp.data))
+
+    }
+
+    ##update progress bar
+    if(txtProgressBar==TRUE){
+      setTxtProgressBar(pb, i)
+    }
+
+  }
+
+  ##close
+  if(txtProgressBar==TRUE){close(pb)
+
+                           ##output
+                           cat(paste("\t >> ",i," records have been read successfully!\n\n", sep=""))
+  }
+
+  # Output ------------------------------------------------------------------
+
+  if(output.object == "RLum.Data.Spectrum" | output.object == "matrix"){
+
+    ##to create a spectrum object the matrix has to transposed and
+    ##the row sums are needed
+
+    data.spectrum.vector <- sapply(1:length(data.list), function(x){
+
+      rowSums(data.list[[x]])
+
+    })
+
+    ##split vector to matrix
+    data.spectrum.matrix <- matrix(data.spectrum.vector,
+                                   nrow = xdim,
+                                   ncol = length(data.list))
+
+    ##set column and row names
+    colnames(data.spectrum.matrix) <- as.character(1:ncol(data.spectrum.matrix))
+    rownames(data.spectrum.matrix) <- as.character(1:nrow(data.spectrum.matrix))
+
+
+    ##set output object
+    object <- set_RLum(
+      class = "RLum.Data.Spectrum",
+      originator = "read_SPE2R",
+      recordType = "Spectrum",
+                                     curveType = "measured",
+                                     data = data.spectrum.matrix,
+                                     info = temp.info)
+
+    ##optional matrix object
+    if(output.object == "matrix"){
+
+      object <- get_RLum(object)}
+
+
+  }else if(output.object == "RLum.Data.Image"){
+
+    ##combine to raster
+    data.raster.list <- lapply(1:length(data.list), function(x){
+
+      if(txtProgressBar==TRUE){
+
+        cat(paste("\r Converting to RasterLayer: ", x, "/",length(data.list), sep = ""))
+
+      }
+
+      raster::raster(t(data.list[[x]]),
+             xmn = 0, xmx = max(xdim),
+             ymn = 0, ymx = max(ydim))
+
+
+    })
+
+    ##Convert to raster brick
+    data.raster <- raster::brick(x = data.raster.list)
+
+    ##Create RLum.object
+    object <- set_RLum(
+      class = "RLum.Data.Image",
+      originator = "read_SPE2R",
+      recordType = "Image",
+      curveType = "measured",
+      data = data.raster,
+      info = temp.info)
+
+  }else{
+
+    stop("[read_SPE2R()] Chosen 'output.object' not supported. Please check manual!")
+
+  }
+
+  ##close con
+  close(con)
+
+
+  ##return values
+  return(object)
+
+}
diff --git a/R/read_XSYG2R.R b/R/read_XSYG2R.R
new file mode 100644
index 0000000..063ba49
--- /dev/null
+++ b/R/read_XSYG2R.R
@@ -0,0 +1,753 @@
+#' Import XSYG files to R
+#'
+#' Imports XSYG files produced by a Freiberg Instrument lexsyg reader into R.
+#'
+#' \bold{How does the import function work?}\cr\cr The function uses the
+#' \code{\link{xml}} package to parse the file structure. Each sequence is
+#' subsequently translated into an \code{\linkS4class{RLum.Analysis}}
+#' object.\cr\cr
+#'
+#' \bold{General structure XSYG format}\cr\cr \code{<?xml?}\cr \code{
+#' <Sample>}\cr \code{ <Sequence>}\cr \code{ <Record>}\cr \code{ <Curve
+#' name="first curve" />}\cr \code{ <Curve name="curve with data">}\cr \code{
+#' x0 , y0 ; x1 , y1 ; x2 , y2 ; x3 , y3}\cr \code{ </Curve>}\cr \code{
+#' </Record>}\cr \code{ </Sequence>}\cr \code{ </Sample>}\cr\cr So far, each
+#' XSYG file can only contain one \code{<Sample></Sample>}, but multiple
+#' sequences. \cr\cr Each record may comprise several curves.\cr\cr
+#'
+#' \bold{TL curve recalculation}\cr
+#'
+#' On the FI lexsyg device TL curves are recorded as time against count values.
+#' Temperature values are monitored on the heating plate and stored in a
+#' separate curve (time vs. temperature). If the option
+#' \code{recalculate.TL.curves = TRUE} is chosen, the time values for each TL
+#' curve are replaced by temperature values.\cr
+#'
+#' Practically, this means combining two matrices (Time vs. Counts and Time vs.
+#' Temperature) with different row numbers by their time values. Three cases
+#' are considered:
+#'
+#' HE: Heating element\cr PMT: Photomultiplier tube\cr Interpolation is done
+#' using the function \code{\link{approx}}\cr
+#'
+#' CASE (1): \code{nrow(matrix(PMT))} > \code{nrow(matrix(HE))} \cr
+#'
+#' Missing temperature values from the heating element are calculated using
+#' time values from the PMT measurement.\cr
+#'
+#' CASE (2): \code{nrow(matrix(PMT))} < \code{nrow(matrix(HE))} \cr
+#'
+#' Missing count values from the PMT are calculated using time values from the
+#' heating element measurement.\cr
+#'
+#' CASE (3): \code{nrow(matrix(PMT))} == \code{nrow(matrix(HE))} \cr
+#'
+#' A new matrix is produced using temperature values from the heating element
+#' and count values from the PMT. \cr
+#'
+#' \emph{Note: Please note that due to the recalculation of the temperature
+#' values based on values delivered by the heating element, it may happen that
+#' mutiple count values exists for each temperature value and temperature
+#' values may also decrease during heating, not only increase. }\cr
+#'
+#' \bold{Advanced file import}\cr
+#'
+#' To allow for a more efficient usage of the function, instead of single path to a file just
+#' a directory can be passed as input. In this particular case the function tries to extract
+#' all XSYG-files found in the directory and import them all. Using this option internally the function
+#' constructs as list of the XSYG-files found in the directory. Please note no recursive detection
+#' is supported as this may lead to endless loops.
+#'
+#' @param file \code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
+#' XSYG file. If input is a \code{list} it should comprise only \code{character}s representing each valid
+#' path and xsyg-file names. Alternatively the input character can be just a directory (path), in this case the
+#' the function tries to detect and import all xsyg files found in the directory.
+#'
+#' @param recalculate.TL.curves \link{logical} (with default): if set to
+#' \code{TRUE}, TL curves are returned as temperature against count values (see
+#' details for more information) Note: The option overwrites the time vs. count
+#' TL curve. Select \code{FALSE} to import the raw data delivered by the
+#' lexsyg. Works for TL curves and spectra.
+#'
+#' @param fastForward \code{\link{logical}} (with default): if \code{TRUE} for a
+#' more efficient data processing only a list of \code{RLum.Analysis} objects is returned.
+#'
+#' @param import \code{\link{logical}} (with default): if set to \code{FALSE}, only
+#' the XSYG file structure is shown.
+#'
+#' @param pattern \code{\link{regex}} (with default): optional regular expression if \code{file} is
+#' a link to a folder, to select just specific XSYG-files
+#'
+#' @param txtProgressBar \link{logical} (with default): enables \code{TRUE} or
+#' disables \code{FALSE} the progression bar during import
+#'
+#' @return \bold{Using the option \code{import = FALSE}}\cr\cr A list
+#' consisting of two elements is shown: \item{Sample}{\link{data.frame} with
+#' information on file.} \item{Sequences}{\link{data.frame} with information on
+#' the sequences stored in the XSYG file}.\cr\cr \bold{Using the option
+#' \code{import = TRUE} (default)} \cr\cr A list is provided, the list elements
+#' contain: \item{Sequence.Header}{\link{data.frame} with information on the
+#' sequence.} \item{Sequence.Object}{\code{\linkS4class{RLum.Analysis}}
+#' containing the curves.}
+#'
+#' @note This function is a beta version as the XSYG file format is not yet
+#' fully specified. Thus, further file operations (merge, export, write) should
+#' be done using the functions provided with the package \code{\link{xml}}.\cr
+#'
+#' \bold{So far, no image data import is provided!}\cr Corresponding values in
+#' the XSXG file are skipped.
+#'
+#'
+#' @section Function version: 0.5.7
+#'
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#'
+#' @seealso \code{\link{xml}}, \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Data.Curve}}, \code{\link{approx}}
+#'
+#'
+#' @references Grehl, S., Kreutzer, S., Hoehne, M., 2013. Documentation of the
+#' XSYG file format. Unpublished Technical Note. Freiberg, Germany \cr\cr
+#'
+#' \bold{Further reading} \cr\cr XML: \url{http://en.wikipedia.org/wiki/XML}
+#'
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#'
+#' ##(1) import XSYG file to R (uncomment for usage)
+#'
+#' #FILE <- file.choose()
+#' #temp <- read_XSYG2R(FILE)
+#'
+#' ##(2) additional examples for pure XML import using the package XML
+#' ##    (uncomment for usage)
+#'
+#'   ##import entire XML file
+#'   #FILE <- file.choose()
+#'   #temp <- XML::xmlRoot(XML::xmlTreeParse(FILE))
+#'
+#'   ##search for specific subnodes with curves containing 'OSL'
+#'   #getNodeSet(temp, "//Sample/Sequence/Record[@@recordType = 'OSL']/Curve")
+#'
+#' ##(2) How to extract single curves ... after import
+#' data(ExampleData.XSYG, envir = environment())
+#'
+#' ##grep one OSL curves and plot the first curve
+#' OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]]
+#'
+#' ##(3) How to see the structure of an object?
+#' structure_RLum(OSL.SARMeasurement$Sequence.Object)
+#'
+#'
+#' @export
+read_XSYG2R <- function(
+  file,
+  recalculate.TL.curves = TRUE,
+  fastForward = FALSE,
+  import = TRUE,
+  pattern = ".xsyg",
+  txtProgressBar = TRUE
+){
+
+  # Self Call -----------------------------------------------------------------------------------
+  # Option (a): Input is a list, every element in the list will be treated as file connection
+  # with that many file can be read in at the same time
+  # Option (b): The input is just a path, the function tries to grep ALL xsyg/XSYG files in the
+  # directory and import them, if this is detected, we proceed as list
+
+  if(is(file, "character")) {
+
+    ##If this is not really a path we skip this here
+    if (dir.exists(file) & length(dir(file)) > 0) {
+      message("[read_XSYG2R()] Directory detected, trying to extract '*.xsyg' files ...\n")
+      file <-
+        as.list(paste0(file,dir(
+          file, recursive = TRUE, pattern = pattern
+        )))
+
+    }
+
+  }
+
+  if (is(file, "list")) {
+    temp.return <- lapply(1:length(file), function(x) {
+      read_XSYG2R(
+        file = file[[x]],
+        recalculate.TL.curves = recalculate.TL.curves,
+        fastForward = fastForward,
+        import = import,
+        txtProgressBar = txtProgressBar
+      )
+    })
+
+    ##return
+    if (fastForward) {
+
+      if(import){
+        return(unlist(temp.return, recursive = FALSE))
+
+      }else{
+        return(as.data.frame(data.table::rbindlist(temp.return)))
+
+      }
+
+
+    }else{
+      return(temp.return)
+
+    }
+
+  }
+
+
+  # Consistency check -------------------------------------------------------
+
+
+  ##check if file exists
+  if(!file.exists(file)){
+
+    warning("[read_XSYG2R()] Wrong file name or file does not exist, nothing imported!")
+    return(NULL)
+  }
+
+  ##check if file is XML file
+  if(tail(unlist(strsplit(file, split = "\\.")), 1) != "xsyg" &
+     tail(unlist(strsplit(file, split = "\\.")), 1) != "XSYG" ){
+
+    warning("[read_XSYG2R()] File is not of type 'XSYG', nothing imported!")
+    return(NULL)
+
+  }
+
+  # (0) config --------------------------------------------------------------
+  #version.supported <- c("1.0")
+
+  #additional functions
+  ##get curve values
+  get_XSYG.curve.values <- function(curve.node){
+
+    ##Four steps
+    ##(1) split string to paris of xy-values
+    ##(2) split string to xy-values itself
+    ##(3) convert to numeric
+    ##(4) create matrix
+
+   curve.node <- t(
+      vapply(
+        strsplit(
+          strsplit(
+            XML::xmlValue(curve.node), split = ";", fixed = TRUE)[[1]],
+          split = ",", fixed = TRUE),
+        FUN = as.numeric,
+        FUN.VALUE = c(1,1L)))
+
+  }
+
+  get_XSYG.spectrum.values <- function(curve.node){
+
+    ##1st grep wavelength table
+    wavelength <- XML::xmlAttrs(curve.node)["wavelengthTable"]
+
+    ##string split
+    wavelength <- as.numeric(unlist(strsplit(wavelength, split = ";", fixed = TRUE)))
+
+    ##2nd grep time values
+    curve.node <- unlist(strsplit(XML::xmlValue(curve.node), split = ";", fixed = TRUE))
+    curve.node <- unlist(strsplit(curve.node, split = ",", fixed = TRUE), recursive = FALSE)
+
+    curve.node.time <- as.numeric(curve.node[seq(1,length(curve.node),2)])
+
+    ##3rd grep count values
+    curve.node.count <- as.character(curve.node[seq(2,length(curve.node),2)])
+
+    ##remove from pattern...
+    curve.node.count <- do.call("gsub", list(pattern="[[]|[]]", replacement=" ",
+                                             x=curve.node.count))
+
+    ##4th combine to spectrum matrix
+    spectrum.matrix <- matrix(0,length(wavelength),length(curve.node.time))
+    spectrum.matrix <- sapply(1:length(curve.node.time), function(x){
+
+      as.numeric(unlist(strsplit(curve.node.count[x], "[|]")))
+
+    })
+
+
+    ##change row names (rows are wavelength)
+    rownames(spectrum.matrix) <- round(wavelength, digits=3)
+
+    ##change column names (columns are time/temp values)
+    colnames(spectrum.matrix) <- round(curve.node.time, digits=3)
+
+
+    return(spectrum.matrix)
+  }
+
+  # (1) Integrity tests -----------------------------------------------------
+
+  ##parse XML tree using the package XML
+  temp <- try(XML::xmlRoot(XML::xmlTreeParse(file, useInternalNodes = TRUE)), silent = TRUE)
+
+
+
+  ##show error
+  if(is(temp, "try-error") == TRUE){
+
+    warning("[read_XSYG2R()] XML file not readable, nothing imported!)")
+    return(NULL)
+
+  }
+
+  # (2) Further file processing ---------------------------------------------
+
+  ##==========================================================================##
+  ##SHOW STRUCTURE
+  if(import == FALSE){
+
+    ##sample information
+    temp.sample <- as.data.frame(XML::xmlAttrs(temp), stringsAsFactors = FALSE)
+
+    ##grep sequences files
+
+    ##set data.frame
+    temp.sequence.header <- data.frame(t(1:length(names(XML::xmlAttrs(temp[[1]])))),
+                                       stringsAsFactors = FALSE)
+
+    colnames(temp.sequence.header) <- names(XML::xmlAttrs(temp[[1]]))
+
+    ##fill information in data.frame
+    for(i in 1:XML::xmlSize(temp)){
+
+      temp.sequence.header[i,] <- t(XML::xmlAttrs(temp[[i]]))
+
+    }
+
+
+      ##additional option for fastForward == TRUE
+      if(fastForward){
+
+        ##change column header
+        temp.sample <- t(temp.sample)
+        colnames(temp.sample) <- paste0("sample::", colnames(temp.sample))
+        output <- cbind(temp.sequence.header, temp.sample)
+
+
+      }else{
+        output <-  list(Sample = temp.sample, Sequences = temp.sequence.header)
+
+
+      }
+
+    return(output)
+
+  }else{
+
+
+    ##==========================================================================##
+    ##IMPORT XSYG FILE
+
+    ##Display output
+    message(paste0("[read_XSYG2R()]\n  Importing: ",file))
+
+    ##PROGRESS BAR
+    if(txtProgressBar){
+      pb <- txtProgressBar(min=0,max=XML::xmlSize(temp), char = "=", style=3)
+    }
+
+    ##loop over the entire sequence by sequence
+    output <- lapply(1:XML::xmlSize(temp), function(x){
+
+      ##read sequence header
+      temp.sequence.header <- as.data.frame(XML::xmlAttrs(temp[[x]]), stringsAsFactors = FALSE)
+      colnames(temp.sequence.header) <- ""
+
+      ###-----------------------------------------------------------------------
+      ##LOOP
+      ##read records >> records are combined to one RLum.Analysis object
+      temp.sequence.object <- unlist(lapply(1:XML::xmlSize(temp[[x]]), function(i){
+
+        ##get recordType
+        temp.sequence.object.recordType <- try(XML::xmlAttrs(temp[[x]][[i]])["recordType"],
+                                               silent = TRUE)
+
+        ##the XSYG file might be broken due to a machine error during the measurement, this
+        ##control flow helps; if a try-error is observed NULL is returned
+        if(!inherits(temp.sequence.object.recordType, "try-error")){
+
+        ##correct record type in depending on the stimulator
+        if(temp.sequence.object.recordType == "OSL"){
+
+          if(XML::xmlAttrs(temp[[x]][[i]][[
+            XML::xmlSize(temp[[x]][[i]])]])["stimulator"] == "ir_LED_850" |
+            XML::xmlAttrs(temp[[x]][[i]][[
+              XML::xmlSize(temp[[x]][[i]])]])["stimulator"] == "ir_LD_850"){
+
+            temp.sequence.object.recordType  <- "IRSL"
+
+          }
+
+        }
+
+
+        ##loop 3rd level
+        lapply(1:XML::xmlSize(temp[[x]][[i]]), function(j){
+
+          ##get values
+          temp.sequence.object.curveValue <- temp[[x]][[i]][[j]]
+
+          ##get curveType
+          temp.sequence.object.curveType <- as.character(
+            XML::xmlAttrs(temp[[x]][[i]][[j]])["curveType"])
+
+          ##get detector
+          temp.sequence.object.detector <- as.character(
+            XML::xmlAttrs(temp[[x]][[i]][[j]])["detector"])
+
+
+          ##get stimulator
+          temp.sequence.object.stimulator <- as.character(
+            XML::xmlAttrs(temp[[x]][[i]][[j]])["stimulator"])
+
+
+          ##get parentID
+          temp.sequence.object.parentID <- as.numeric(
+            XML::xmlAttrs(temp[[x]][[i]][[j]])["partentID"])
+
+          ##get additional information
+          temp.sequence.object.info <- as.list(XML::xmlAttrs(temp.sequence.object.curveValue))
+
+          ##add stimulator and detector and so on
+          temp.sequence.object.info <- c(temp.sequence.object.info,
+                                         partentID = temp.sequence.object.parentID,
+                                         position = as.integer(as.character(temp.sequence.header["position",])),
+                                         name = as.character(temp.sequence.header["name",]))
+
+
+
+
+          ## TL curve recalculation ============================================
+          if(recalculate.TL.curves == TRUE){
+
+            ##TL curve heating values is stored in the 3rd curve of every set
+            if(temp.sequence.object.recordType == "TL" && j == 1){
+
+              #grep values from PMT measurement or spectrometer
+              if("Spectrometer" %in% temp.sequence.object.detector == FALSE){
+
+                temp.sequence.object.curveValue.PMT <- get_XSYG.curve.values(
+                  temp[[x]][[i]][[j]])
+
+
+                ##round values (1 digit is technical resolution of the heating element)
+                temp.sequence.object.curveValue.PMT[,1] <- round(
+                  temp.sequence.object.curveValue.PMT[,1], digits = 1)
+
+                #grep values from heating element
+                temp.sequence.object.curveValue.heating.element <- get_XSYG.curve.values(
+                  temp[[x]][[i]][[3]])
+
+
+
+              }else{
+
+                temp.sequence.object.curveValue.spectrum <- get_XSYG.spectrum.values(
+                  temp.sequence.object.curveValue)
+
+                ##get time values which are stored in the row labels
+                temp.sequence.object.curveValue.spectrum.time <- as.numeric(
+                  colnames(temp.sequence.object.curveValue.spectrum))
+
+                ##round values (1 digit is technical resolution of the heating element)
+                temp.sequence.object.curveValue.spectrum.time <- round(
+                  temp.sequence.object.curveValue.spectrum.time, digits = 1)
+
+              }
+
+              #grep values from heating element
+              temp.sequence.object.curveValue.heating.element <- get_XSYG.curve.values(
+                temp[[x]][[i]][[3]])
+
+
+              if("Spectrometer" %in% temp.sequence.object.detector == FALSE){
+
+                #reduce matrix values to values of the detection
+                temp.sequence.object.curveValue.heating.element <-
+                  temp.sequence.object.curveValue.heating.element[
+                    temp.sequence.object.curveValue.heating.element[,1] >=
+                      min(temp.sequence.object.curveValue.PMT[,1]) &
+                      temp.sequence.object.curveValue.heating.element[,1] <=
+                      max(temp.sequence.object.curveValue.PMT[,1]),]
+
+              }else{
+
+                #reduce matrix values to values of the detection
+                temp.sequence.object.curveValue.heating.element <-
+                  temp.sequence.object.curveValue.heating.element[
+                    temp.sequence.object.curveValue.heating.element[,1] >=
+                      min(temp.sequence.object.curveValue.spectrum.time) &
+                      temp.sequence.object.curveValue.heating.element[,1] <=
+                      max(temp.sequence.object.curveValue.spectrum.time),]
+
+              }
+
+
+
+
+              ## calculate corresponding heating rate, this makes only sense
+              ## for linear heating, therefore is has to be the maximum value
+
+              ##remove 0 values (not measured) and limit to peak
+              heating.rate.values <- temp.sequence.object.curveValue.heating.element[
+                temp.sequence.object.curveValue.heating.element[,2] > 0 &
+                  temp.sequence.object.curveValue.heating.element[,2] <=
+                  max(temp.sequence.object.curveValue.heating.element[,2]),]
+
+              heating.rate <- (heating.rate.values[length(heating.rate.values[,2]), 2] -
+                                 heating.rate.values[1,2])/
+                (heating.rate.values[length(heating.rate.values[,1]), 1] -
+                   heating.rate.values[1,1])
+
+
+              ##round values
+              heating.rate <- round(heating.rate, digits=1)
+
+              ##add to info element
+              temp.sequence.object.info <- c(temp.sequence.object.info,
+                                             RATE = heating.rate)
+
+
+              ##PERFORM RECALCULATION
+              ##check which object contains more data
+
+              if("Spectrometer" %in% temp.sequence.object.detector == FALSE){
+
+                ##CASE (1)
+                if(nrow(temp.sequence.object.curveValue.PMT) >
+                   nrow(temp.sequence.object.curveValue.heating.element)){
+
+                  temp.sequence.object.curveValue.heating.element.i <- approx(
+                    x = temp.sequence.object.curveValue.heating.element[,1],
+                    y = temp.sequence.object.curveValue.heating.element[,2],
+                    xout = temp.sequence.object.curveValue.PMT[,1],
+                    rule = 2)
+
+                  temperature.values <-
+                    temp.sequence.object.curveValue.heating.element.i$y
+
+                  count.values <-
+                    temp.sequence.object.curveValue.PMT[,2]
+
+                  ##CASE (2)
+                }else if((nrow(temp.sequence.object.curveValue.PMT) <
+                          nrow(temp.sequence.object.curveValue.heating.element))){
+
+                  temp.sequence.object.curveValue.PMT.i <- approx(
+                    x = temp.sequence.object.curveValue.PMT[,1],
+                    y = temp.sequence.object.curveValue.PMT[,2],
+                    xout = temp.sequence.object.curveValue.heating.element[,1],
+                    rule = 2)
+
+                  temperature.values <-
+                    temp.sequence.object.curveValue.heating.element[,2]
+
+                  count.values <- temp.sequence.object.curveValue.PMT.i$y
+
+                  ##CASE (3)
+                }else{
+
+                  temperature.values <-
+                    temp.sequence.object.curveValue.heating.element[,2]
+
+                  count.values <- temp.sequence.object.curveValue.PMT[,2]
+
+                }
+
+                ##combine as matrix
+                temp.sequence.object.curveValue <- as.matrix(cbind(
+                  temperature.values,
+                  count.values))
+
+                ##set curve identifier
+                temp.sequence.object.info$curveDescripter <- "Temperature [\u00B0C]; Counts [a.u.]"
+
+
+              }else{
+
+                ##CASE (1) here different approach. in contrast to the PMT measurements, as
+                ##         usually the resolution should be much, much lower for such measurements
+                ##         Otherwise we would introduce some pseudo signals, as we have to
+                ##         take care of noise later one
+
+                if(length(temp.sequence.object.curveValue.spectrum.time) !=
+                   nrow(temp.sequence.object.curveValue.heating.element)){
+
+                  temp.sequence.object.curveValue.heating.element.i <- approx(
+                    x = temp.sequence.object.curveValue.heating.element[,1],
+                    y = temp.sequence.object.curveValue.heating.element[,2],
+                    xout = temp.sequence.object.curveValue.spectrum.time,
+                    rule = 2,
+                    ties = -2)
+
+                  temperature.values <-
+                    temp.sequence.object.curveValue.heating.element.i$y
+
+                  ##check for duplicated values and if so, increase this values
+                  if(anyDuplicated(temperature.values)>0){
+
+                    temperature.values[which(duplicated(temperature.values))] <-
+                      temperature.values[which(duplicated(temperature.values))]+1
+
+                    warning("read_XSYG2R()] Temperatures values are found to be duplicated and increased by 1 K")
+
+                  }
+
+
+
+
+                  ##CASE (2)  (equal)
+                }else{
+
+                  temperature.values <-
+                    temp.sequence.object.curveValue.heating.element[,2]
+
+                }
+
+                ##reset values of the matrix
+                colnames(temp.sequence.object.curveValue.spectrum) <- temperature.values
+                temp.sequence.object.curveValue <- temp.sequence.object.curveValue.spectrum
+
+                ##change curve descriptor
+                temp.sequence.object.info$curveDescripter <- "Temperature [\u00B0C]; Wavelength [nm]; Counts [1/ch]"
+
+              }
+
+
+
+
+            }##endif
+          }##endif recalculate.TL.curves == TRUE
+
+
+          ##Set RLum.Data objects
+          if("Spectrometer" %in% temp.sequence.object.detector == FALSE){
+
+            if(is(temp.sequence.object.curveValue, "matrix") == FALSE){
+
+              temp.sequence.object.curveValue <-
+                get_XSYG.curve.values(temp.sequence.object.curveValue)
+
+            }
+
+
+            set_RLum(
+              class = "RLum.Data.Curve",
+              originator = "read_XSYG2R",
+              recordType = paste(temp.sequence.object.recordType,
+                                 " (", temp.sequence.object.detector,")",
+                                 sep = ""),
+              curveType = temp.sequence.object.curveType,
+              data = temp.sequence.object.curveValue,
+              info = temp.sequence.object.info)
+
+          }else if("Spectrometer" %in% temp.sequence.object.detector == TRUE) {
+
+
+            if(is(temp.sequence.object.curveValue, "matrix") == FALSE){
+
+              temp.sequence.object.curveValue <-
+                get_XSYG.spectrum.values(temp.sequence.object.curveValue)
+
+            }
+
+
+            set_RLum(
+              class = "RLum.Data.Spectrum",
+              originator = "read_XSYG2R",
+              recordType = paste(temp.sequence.object.recordType,
+                                 " (",temp.sequence.object.detector,")",
+                                 sep = ""),
+              curveType = temp.sequence.object.curveType,
+              data = temp.sequence.object.curveValue,
+              info = temp.sequence.object.info)
+
+          }
+
+        })
+
+        }else{
+
+         return(NULL)
+
+        }##if-try condition
+
+      }),
+       use.names = FALSE)
+
+
+      ##if the XSYG file is broken we get NULL as list element
+      if (!is.null(temp.sequence.object)) {
+        ##set RLum.Analysis object
+        temp.sequence.object <-  set_RLum(
+          originator = "read_XSYG2R",
+          class = "RLum.Analysis",
+          records = temp.sequence.object,
+          protocol = as.character(temp.sequence.header["protocol",1])
+        )
+
+        ##set parent uid of RLum.Anlaysis as parent ID of the records
+        temp.sequence.object <- .set_pid(temp.sequence.object)
+
+        ##update progress bar
+        if (txtProgressBar) {
+          setTxtProgressBar(pb, x)
+        }
+
+
+        ##merge output and return values
+        if(fastForward){
+          return(temp.sequence.object)
+
+        }else{
+          return(list(Sequence.Header = temp.sequence.header, Sequence.Object = temp.sequence.object))
+
+        }
+
+      }else{
+        return(temp.sequence.object)
+
+      }
+
+    })##end loop for sequence list
+
+    ##close ProgressBar
+    if(txtProgressBar ){close(pb)}
+
+    ##show output informatioj
+    if(length(output[sapply(output, is.null)]) == 0){
+
+      message(paste("\t >>",XML::xmlSize(temp), " sequence(s) loaded successfully.\n"), sep = "")
+
+    }else{
+
+      message(paste("\t >>",XML::xmlSize(temp), " sequence(s) in file.",
+                XML::xmlSize(temp)-length(output[sapply(output, is.null)]), "sequence(s) loaded successfully. \n"), sep = "")
+
+      warning(paste0(length(output[sapply(output, is.null)])), " incomplete sequence(s) removed.")
+
+    }
+
+    ##output
+    invisible(output)
+
+  }#end if
+
+  ##get rid of the NULL elements (as stated before ... invalid files)
+  return(output[!sapply(output,is.null)])
+
+}
diff --git a/R/replicate_RLum.R b/R/replicate_RLum.R
new file mode 100644
index 0000000..438e06c
--- /dev/null
+++ b/R/replicate_RLum.R
@@ -0,0 +1,26 @@
+#' General replication function for RLum S4 class objects
+#'
+#' Function replicates RLum S4 class objects and returns a list for this objects
+#'
+#' @param object an object of class \code{\linkS4class{RLum}} (\bold{required})
+#'
+#' @param times \code{\link{integer}} (optional): number for times each element is repeated
+#' element
+#'
+#' @return Returns a \code{\link{list}} of the object to be repeated
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso
+#' \code{\linkS4class{RLum}},
+#'
+#' @keywords utilities
+#'
+#' @export
+setGeneric("replicate_RLum", function (object, times = NULL) {
+   standardGeneric("replicate_RLum")
+})
+
diff --git a/R/report_RLum.R b/R/report_RLum.R
new file mode 100644
index 0000000..82aa91e
--- /dev/null
+++ b/R/report_RLum.R
@@ -0,0 +1,742 @@
+#' Create a HTML report for (RLum) objects
+#'
+#' This function creates a HTML report for a given object, listing its complete
+#' structure and content. The object itself is saved as a serialised .Rds file.
+#' The report file serves both as a convenient way of browsing through objects with 
+#' complex data structures as well as a mean of properly documenting and saving
+#' objects.
+#'
+#' The HTML report is created with \code{\link[rmarkdown]{render}} and has the
+#' following structure:
+#' 
+#' \tabular{ll}{
+#'  \bold{Section} \tab \bold{Description} \cr
+#'  \code{Header} \tab A summary of general characteristics of the object \cr
+#'  \code{Object content} \tab A comprehensive list of the complete structure
+#'  and content of the provided object. \cr
+#'  \code{Object structure} \tab Summary of the objects structure given as a table \cr
+#'  \code{File} \tab Information on the saved RDS file \cr
+#'  \code{Session Info} \tab Captured output from sessionInfo() \cr
+#'  \code{Plots} \tab (optional) For \code{RLum-class} objects a variable number of plots \cr
+#' }
+#'
+#' The structure of the report can be controlled individually by providing one or more of the
+#' following arguments (all \code{logical}):
+#' 
+#' \tabular{ll}{
+#' \bold{Argument} \tab \bold{Description} \cr
+#' \code{header} \tab Hide or show general information on the object \cr
+#' \code{main} \tab Hide or show the object's content \cr
+#' \code{structure} \tab Hide or show object's structure \cr
+#' \code{rds} \tab Hide or show information on the saved RDS file \cr
+#' \code{session} \tab Hide or show the session info \cr
+#' \code{plot} \tab Hide or show the plots (depending on object) \cr
+#' }
+#' 
+#' Note that these arguments have higher precedence than \code{compact}.
+#'
+#' Further options that can be provided via the \code{...} argument:
+#' 
+#' \tabular{ll}{
+#' \bold{Argument} \tab \bold{Description} \cr
+#' \code{short_table} \tab If \code{TRUE} only show the first and last 5 rows of lang tables. \cr
+#' \code{theme} \tab Specifies the Bootstrap
+#' theme to use for the report. Valid themes include "default", "cerulean", "journal", "flatly", 
+#' "readable", "spacelab", "united", "cosmo", "lumen", "paper", "sandstone", "simplex", and "yeti". \cr
+#' \code{highlight} \tab Specifies the syntax highlighting
+#'  style. Supported styles include "default", "tango", "pygments", "kate", "monochrome", 
+#'  "espresso", "zenburn", "haddock", and "textmate". \cr
+#' \code{css} \tab \code{TRUE} or \code{FALSE} to enable/disable custom CSS styling \cr
+#' }
+#' 
+#' The following arguments can be used to customise the report via CSS (Cascading Style Sheets):
+#' 
+#' \tabular{ll}{
+#' \bold{Argument} \tab \bold{Description} \cr
+#' \code{font_family} \tab Define the font family of the HTML document (default: arial) \cr
+#' \code{headings_size} \tab Size of the <h1> to <h6> tags used to define HTML headings (default: 166\%). \cr
+#' \code{content_color} \tab Color of the object's content (default: #a72925). \cr
+#' }
+#' 
+#' Note that these arguments must all be of class \code{\link{character}} and follow standard CSS syntax.
+#' For exhaustive CSS styling you can provide a custom CSS file for argument \code{css.file}. 
+#' CSS styling can be turned of using \code{css = FALSE}.
+#'
+#' @param object (\bold{required}): 
+#' The object to be reported on, preferably of any \code{RLum}-class.
+#' 
+#' @param file \code{\link{character}} (with default): 
+#' A character string naming the output file. If no filename is provided a 
+#' temporary file is created.
+#' 
+#' @param title \code{\link{character}} (with default):
+#' A character string specifying the title of the document.
+#' 
+#' @param compact \code{\link{logical}} (with default):
+#' When \code{TRUE} the following report components are hidden: 
+#' \code{@@.pid}, \code{@@.uid}, \code{'Object structure'}, \code{'Session Info'}
+#' and only the first and last 5 rows of long matrices and data frames are shown.
+#' See details.
+#' 
+#' @param timestamp \code{\link{logical}} (with default):
+#' \code{TRUE} to add a timestamp to the filename (suffix).
+#' 
+#' @param launch.browser \code{\link{logical}} (with default):
+#' \code{TRUE} to open the HTML file in the system's default web browser after
+#' it has been rendered.
+#' 
+#' @param css.file \code{\link{character}} (optional):
+#' Path to a CSS file to change the default styling of the HTML document.
+#' 
+#' @param quiet \code{\link{logical}} (with default):
+#' \code{TRUE} to supress printing of the pandoc command line.
+#' 
+#' @param clean \code{\link{logical}} (with default): 
+#' \code{TRUE} to clean intermediate files created during rendering.
+#' 
+#' @param ... further arguments passed to or from other methods and to control
+#' the document's structure (see details).
+#' 
+#' @section Function version: 0.1.0
+#' 
+#' @author 
+#' Christoph Burow, University of Cologne (Germany) \cr
+#' 
+#' @note
+#' This function requires the R packages 'rmarkdown', 'pander' and 'rstudioapi'.
+#' 
+#' @seealso \code{\link[rmarkdown]{render}}, \code{\link[pander]{pander_return}},
+#' \code{\link[pander]{openFileInOS}}, \code{\link[rstudioapi]{viewer}},
+#' \code{\link{browseURL}}
+#' 
+#' @return
+#' Writes a HTML and .Rds file.
+#' 
+#' @export
+#'
+#' @examples
+#' 
+#' \dontrun{
+#' ## Example: RLum.Results ----
+#' 
+#' # load example data
+#' data("ExampleData.DeValues")
+#' 
+#' # apply the MAM-3 age model and save results
+#' mam <- calc_MinDose(ExampleData.DeValues$CA1, sigmab = 0.2) 
+#' 
+#' # create the HTML report
+#' report_RLum(object = mam, file = "~/CA1_MAM.Rmd",
+#'             timestamp = FALSE,
+#'             title = "MAM-3 for sample CA1")
+#' 
+#' # when creating a report the input file is automatically saved to a 
+#' # .Rds file (see saveRDS()).
+#' mam_report <- readRDS("~/CA1_MAM.Rds")
+#' all.equal(mam, mam_report)
+#' 
+#' 
+#' ## Example: Temporary file & Viewer/Browser ----
+#' 
+#' # (a)
+#' # Specifying a filename is not necessarily required. If no filename is provided,
+#' # the report is rendered in a temporary file. If you use the RStudio IDE, the
+#' # temporary report is shown in the interactive Viewer pane.
+#' report_RLum(object = mam)
+#' 
+#' # (b)
+#' # Additionally, you can view the HTML report in your system's default web browser.
+#' report_RLum(object = mam, launch.browser = TRUE)
+#' 
+#' 
+#' ## Example: RLum.Analysis ----
+#' 
+#' data("ExampleData.RLum.Analysis")
+#' 
+#' # create the HTML report (note that specifying a file
+#' # extension is not necessary)
+#' report_RLum(object = IRSAR.RF.Data, file = "~/IRSAR_RF")
+#' 
+#' 
+#' ## Example: RLum.Data.Curve ----
+#' 
+#' data.curve <- get_RLum(IRSAR.RF.Data)[[1]]
+#' 
+#' # create the HTML report
+#' report_RLum(object = data.curve, file = "~/Data_Curve")
+#' 
+#' ## Example: Any other object ----
+#' x <- list(x = 1:10, 
+#'           y = runif(10, -5, 5), 
+#'           z = data.frame(a = LETTERS[1:20], b = dnorm(0:9)),
+#'           NA)
+#' 
+#' report_RLum(object = x, file = "~/arbitray_list")
+#' }
+report_RLum <- function(object, 
+                        file  = tempfile(),
+                        title = "RLum.Report",
+                        compact = TRUE,
+                        timestamp = TRUE,
+                        launch.browser = FALSE,
+                        css.file = NULL,
+                        quiet = TRUE,
+                        clean = TRUE, 
+                        ...) {
+  
+  ## ------------------------------------------------------------------------ ##
+  ## PRE-CHECKS ----
+  
+  # check if required namespace(s) are available
+  if (!requireNamespace("rmarkdown", quietly = TRUE))
+    stop("Creating object reports requires the 'rmarkdown' package.",
+         " To install this package run 'install.packages('rmarkdown')' in your R console.", 
+         call. = FALSE)
+  if (!requireNamespace("pander", quietly = TRUE))
+    stop("Creating object reports requires the 'pander' package.",
+         " To install this package run 'install.packages('pander')' in your R console.", 
+         call. = FALSE)
+  if (!requireNamespace("rstudioapi", quietly = TRUE)) {
+    warning("Creating object reports requires the 'rstudioapi' package.",
+            " To install this package run 'install.packages('rstudioapi')' in your R console.", 
+            call. = FALSE)
+    isRStudio <- FALSE
+  } else {
+    isRStudio <- TRUE
+  }
+  
+  # check if files exist
+  if (!is.null(css.file))
+    if(!file.exists(css.file))
+      stop("Couldn't find the specified CSS file at '", css.file, "'", call. = FALSE)
+  
+  ## ------------------------------------------------------------------------ ##
+  ## STRUCTURE ----
+  structure <- list(header = TRUE,
+                    main = TRUE,
+                    structure = ifelse(compact, FALSE, TRUE),
+                    rds = TRUE,
+                    session = ifelse(compact, FALSE, TRUE),
+                    plot = TRUE)
+  
+  # specifying report components has higher precedence than the 'compact' arg
+  structure <- modifyList(structure, list(...))
+  
+  
+  ## OPTIONS ----
+  options <- list(short_table = ifelse(compact, TRUE, FALSE),
+                  theme = "cerulean",
+                  highlight = "haddock",
+                  css = TRUE)
+  
+  options <- modifyList(options, list(...))
+  
+  ## CSS DEFAULTS ----
+  css <- list(font_family = "arial",
+              headings_size = "166%",
+              content_color = "#a72925")
+  
+  css <- modifyList(css, list(...))
+  
+  ## ------------------------------------------------------------------------ ##
+  ## CREATE FILE ----
+  
+  isTemp <- missing(file)
+  
+  # make sure the filename ends with .Rmd extension
+  if (!grepl(".rmd$", file, ignore.case = TRUE))
+    file <- paste0(file, ".Rmd")
+  
+  # Timestamp: currently added as a suffix to the filename
+  # if we were to change it to a prefix, we need to first figure out the filename
+  # (i.e., separate it from the possible path) using the following regular 
+  # expression strsplit(string, "\\\\|\\\\\\\\|\\/|\\/\\/"). This looks for
+  # \, \\, /, // and the last element is the filename.
+  if (timestamp)
+    file <- gsub(".rmd$", paste0(format(Sys.time(), "_%Y%b%d"), ".Rmd"), file,
+                 ignore.case = TRUE)
+  
+  # sanitize file name
+  file <- gsub("\\\\", "\\/", file)
+  file.html <- gsub(".rmd$", ".html", file, ignore.case = TRUE)
+  file.rds <- gsub(".rmd$", ".Rds", file, ignore.case = TRUE)
+  
+  # Create and open the file
+  file.create(file)
+  tmp <- file(file, open = "w")
+  
+  # save RDS file
+  saveRDS(object, file.rds)
+  
+  ## ------------------------------------------------------------------------ ##
+  ## WRITE CONTENT ----
+  
+  # HEADER ----
+  writeLines("---", tmp)
+  writeLines("output:", tmp)
+  writeLines("  html_document:", tmp)
+  writeLines("    mathjax: null", tmp)
+  writeLines("    title: RLum.Report", tmp)
+  writeLines(paste("    theme:", options$theme), tmp)
+  writeLines(paste("    highlight:", options$highlight), tmp)
+  writeLines("    toc: true", tmp)
+  writeLines("    toc_float: true", tmp)
+  writeLines("    toc_depth: 6", tmp)
+  if (!is.null(css.file))
+    writeLines(paste("    css:", css.file), tmp)
+  writeLines("    md_extensions: -autolink_bare_uris", tmp)
+  writeLines("---", tmp)
+  
+  # CASCADING STYLE SHEETS ----
+  if (options$css) {
+    writeLines(paste0(
+      "<style>",
+      paste0("h1, h2, h3, h4, h5, h6 { font-size:", css$headings_size," } \n"),
+      paste0("#root { color: ", css$content_color," } \n"),
+      paste0("BODY { font-family:", css$font_family, " } \n"),
+      "</style>"
+    ),
+    tmp)
+  }
+  
+  # INFO ----
+  # check if Luminescence package is installed and get details
+  pkg <- as.data.frame(installed.packages(), row.names = FALSE)
+  if ("Luminescence" %in% pkg$Package)
+    pkg <- pkg[which(pkg$Package == "Luminescence"), ]
+  else
+    pkg <- data.frame(LibPath = "-", Version = "not installed", Built = "-")
+  
+  # Title
+  writeLines(paste("<div align='center'><h1>", title, "</h1></div>\n\n<hr>"), tmp) 
+  
+  # write information on R, Luminescence package, Object
+  if (structure$header) {
+    writeLines(paste("**Date:**", Sys.time(), "\n\n",
+                     "**R version:**", R.version.string, "\n\n",
+                     "**Luminescence package** \n\n",
+                     "**  » Path:**", pkg$LibPath, "\n\n",
+                     "**  » Version:**", pkg$Version, "\n\n",
+                     "**  » Built:**", pkg$Built, "\n\n",
+                     "**Object** \n\n",
+                     "**  » Created:**", 
+                     tryCatch(paste(paste(strsplit(object at .uid, '-|\\.')[[1]][1:3], collapse = "-"),
+                                    strsplit(object at .uid, '-|\\.')[[1]][4]),
+                              error = function(e) "-"), "\n\n",
+                     "**  » Class:**", class(object), "\n\n",
+                     "**  » Originator:**", 
+                     tryCatch(object at originator, error = function(e) "-"), "\n\n",
+                     "**  » Name:**", deparse(substitute(object)), "\n\n",
+                     "**  » Parent ID:**", 
+                     tryCatch(object at .pid, error = function(e) "-"), "\n\n",
+                     "**  » Unique ID:**", 
+                     tryCatch(object at .uid, error = function(e) "-"), "\n\n",
+                     "<hr>"),
+               tmp)
+    
+    if (isTemp) {
+      writeLines(paste("<a href=", paste0("file:///", file.html),
+                       "class='btn btn-primary' download>Save report</a>"), tmp)
+      writeLines(paste("<a href=", paste0("file:///", file.rds),
+                       "class='btn btn-primary' download>Save data</a> \n\n"), tmp)
+    }
+    
+  }#EndOf::Header
+  
+  # OBJECT ----
+  elements <- .struct_RLum(object, root = deparse(substitute(object)))
+  
+  if (structure$main) {
+    for (i in 1:nrow(elements)) {
+      
+      # SKIP ELEMENT?
+      # hide @.pid and @.uid if this is a shortened report (default)
+      if (elements$bud[i] %in% c(".uid", ".pid") && compact == TRUE)
+        next
+      
+      
+      # HEADER
+      short.name <- elements$bud[i]
+      links <- gsub("[^@$\\[]", "", as.character(elements$branch[i]))
+      type <- ifelse(nchar(links) == 0, "", substr(links, nchar(links), nchar(links)))
+      if (type == "[")
+        type = ""
+      
+      # HTML header level is determined by the elements depth in the object
+      # exception: first row is always the object's name and has depth zero
+      if (i == 1)
+        hlevel <- "#"
+      else
+        hlevel <- paste(rep("#", elements$depth[i]), collapse = "")
+      
+      # write header; number of dots represents depth in the object. because there
+      # may be duplicate header names, for each further occurence of a name
+      # Zero-width non-joiner entities are added to the name (non visible)
+      writeLines(paste0(hlevel, " ",
+                        "<span style='color:#74a9d8'>",
+                        paste(rep("..", elements$depth[i]), collapse = ""),
+                        type,
+                        "</span>",
+                        paste(rep("‌", elements$bud.freq[i]), collapse = ""),
+                        short.name[length(short.name)],
+                        ifelse(elements$endpoint[i], "", "{#root}"),
+                        "\n\n"),
+                 tmp)
+      
+      # SUBHEADER
+      # contains information on Class, Length, Dimensions, Path
+      writeLines(paste0("<pre style='padding:0px;border:0px'>",
+                        "<span style='color:#428bca'>",
+                        " Class: </span>", elements$class[i],
+                        "<span style='color:#428bca'>",
+                        "   Length: </span>", elements$length[i],
+                        "<span style='color:#428bca'>",
+                        "   Dimensions: </span>", 
+                        ifelse(elements$row[i] != 0, paste0(elements$row[i], ", ", elements$col[i]), "-"),
+                        "<span style='color:#428bca'>",
+                        "\n Path: </span>", gsub("@", "<span>@</span>", elements$branch[i]),
+                        "</pre>",
+                        "\n\n"),
+                 tmp)
+      
+      # TABLE CONTENT
+      # the content of a branch is only printed if it was determined an endpoint
+      # in the objects structure
+      if (elements$endpoint[i]) {
+        table <- tryCatch(eval(parse(text = elements$branch[i])),
+                          error = function(e) {
+                            return(NULL)
+                          })
+        # exceptions: content may be NULL; convert raw to character to stay
+        # compatible with pander::pander
+        if (is.null(table) | length(table) == 0)
+          table <- "NULL"
+        if (any(class(table) == "raw"))
+          table <- as.character(table)
+        
+        # exception: surround objects of class "call" with <pre> tags to prevent
+        # HTML autoformatting
+        if (elements$class[i] == "call") {
+          table <- capture.output(table)
+          writeLines("<pre>", tmp)
+          for (i in 1:length(table))
+            writeLines(table[i], tmp)
+          writeLines("</pre>", tmp)
+          table <- NULL
+        }
+        
+        # shorten the table if it has more than 15 rows
+        if (options$short_table) {
+          if (is.matrix(table) || is.data.frame(table)) {
+            if (nrow(table) > 15) {
+              
+              writeLines(pander::pander_return(rbind(head(table, 5),
+                                                     tail(table, 5)),
+                                               caption = "shortened (only first and last five rows shown)"), tmp)
+              next
+              
+            }
+          }
+        }
+        
+        # write table using pander and end each table with a horizontal line
+        writeLines(pander::pander_return(table),
+                   tmp)
+        writeLines("\n\n<hr>", tmp)
+        
+      }
+    }
+  }#EndOf::Main
+  
+  # OBJECT STRUCTURE ----
+  if (structure$structure) {
+    writeLines(paste("\n\n# Object structure\n\n"), tmp)
+    
+    elements.html <- elements
+    elements.html$branch <- gsub("\\$", "$", elements$branch)
+    writeLines(pander::pander_return(elements.html, 
+                                     justify = paste(rep("l", ncol(elements)), collapse = "")),
+               tmp)
+    writeLines("\n\n", tmp)
+  }#EndOf::Structure
+    
+  if (structure$rds) {
+    # SAVE SERIALISED OBJECT (.rds file) ----
+    writeLines(paste("<hr># File \n\n"), tmp)
+    
+    writeLines(paste0("<code>",
+                      "<a href='", paste0("file:///", gsub("\\~\\/", "", file.rds)),"' download>",
+                      "Click here to access the data file", "</a>",
+                      "</code>"), tmp)
+    
+    writeLines(paste("\nThe R object was saved to <span style='color:#428bca'>", file.rds, "</span>.",
+                     "To import the object into your R session with the following command:",
+                     paste0("<pre>",
+                            "x <- readRDS('", file.rds, "')",
+                            "</pre>"),
+                     "**NOTE:** If you moved the file to another directory or",
+                     "renamed the file you need to change the path/filename in the",
+                     "code above accordingly!"),
+               tmp)
+  }#EndOf::File
+  
+  # SESSION INFO ----
+  if (structure$session) {
+    writeLines(paste("\n\n<hr># Session Info\n\n"), tmp)
+    sessionInfo <- capture.output(sessionInfo())
+    writeLines(paste(sessionInfo, collapse = "\n\n"),
+               tmp)
+  }
+  
+  # PLOTTING ----
+  if (structure$plot) {
+    isRLumObject <- length(grep("RLum", class(object)))
+    
+    if (is.list(object))
+      isRLumList <- all(sapply(object, function(x) inherits(x, "RLum.Data.Curve")))
+    else
+      isRLumList <- FALSE
+    
+    if (isRLumObject | isRLumList) {
+      
+      # mutual exclusivity: it is either a list or an RLum-Object 
+      if (isRLumList)
+        plotCommand <- "invisible(sapply(x, plot)) \n"
+      else
+        plotCommand <- "plot(x) \n"
+      
+      writeLines(paste("\n\n<hr># Plots\n\n"), tmp)
+      writeLines(paste0(
+        "```{r}\n",
+        "library(Luminescence) \n",
+        "x <- readRDS('", file.rds,"') \n",
+        plotCommand,
+        "```"),
+        tmp)
+      
+      if (inherits(object, "RLum.Results")) {
+        
+        # AGE MODELS ----
+        models <- c("calc_CommonDose",
+                    "calc_CentralDose",
+                    "calc_FiniteMixture",
+                    "calc_MinDose",
+                    "calc_MaxDose",
+                    "calc_IEU",
+                    "calc_FuchsLang2001")
+        
+        if (object at originator %in% models) {
+          writeLines(paste0(
+            "```{r}\n",
+            "plot_AbanicoPlot(x) \n",
+            "plot_Histogram(x) \n",
+            "plot_KDE(x) \n",
+            "plot_ViolinPlot(x) \n",
+            "```"),
+            tmp)
+        }
+      }
+      
+    }
+  }#EndOf::Plot
+  
+  ## ------------------------------------------------------------------------ ##
+  ## CLOSE & RENDER ----
+  close(tmp)
+  on.exit(closeAllConnections())
+  rmarkdown::render(file, clean = clean, quiet = quiet)
+  
+  ## ------------------------------------------------------------------------ ##
+  ## SHOW FILE -----
+  
+  # SHOW REPORT IN RSTUDIOS VIEWER PANE ----
+  if (isRStudio) {
+    if (isTemp) {
+      try(rstudioapi::viewer(file.html))
+    } else {
+      # The Viewer Pane only works for files in a sessions temp directory
+      # see: https://support.rstudio.com/hc/en-us/articles/202133558-Extending-RStudio-with-the-Viewer-Pane
+      file.copy(file.html, file.path(tempdir(), "report.html"), overwrite = TRUE)
+      try(rstudioapi::viewer(file.path(tempdir(), "report.html")))
+    }
+  }
+  
+  # launch browser if desired
+  # browseURL() listens on localhost to show the file with the problem that
+  # the download links dont work anymore. hence, we try to open the file
+  # with pander::openFileInOS and use browseURL() only as fallback
+  if (launch.browser) {
+    opened <- tryCatch(pander::openFileInOS(file.html), error = function(e) "error")
+    if (!is.null(opened))
+      try(browseURL(file.html))
+  }
+  
+  
+  ## ------------------------------------------------------------------------ ##
+  ## CLEANUP ----
+  
+  # note that 'clean' as also passed to rmarkdown::render
+  if (clean)
+    file.remove(file)
+  
+  invisible()
+}
+
+
+################################################################################
+##                                                                            ##
+##                        HELPER FUNCTIONS                                    ##
+##                                                                            ##
+################################################################################
+
+# ---------------------------------------------------------------------------- #
+# This is a recursive function that goes the objects structure and prints
+# all slots/elements along with their class, length, depth. 
+# ---------------------------------------------------------------------------- #
+.tree_RLum <- function(x, root) {
+  
+  if (missing(root))
+    root <- deparse(substitute(x))
+  
+  ## S4 object -----
+  if (isS4(x)) {
+    
+    # print -----
+    cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|")
+    
+    for (slot in slotNames(x)) {
+      s4.root <- paste0(root, "@", slot)
+      .tree_RLum(slot(x, slot), root = s4.root)
+    }
+    invisible()
+    
+    ## List objects -----
+  }  else if (inherits(x, "list") | typeof(x) == "list" & !inherits(x, "data.frame")) {
+    
+    if (!is.null(names(x)) && length(x) != 0) {
+      
+      # print -----
+      cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|") 
+      
+      element <- names(x)
+      
+      for (i in 1:length(x)) {
+        
+        if (grepl(" ", element[i]))
+          element[i] <- paste0("`", element[i], "`")
+        
+        if (element[i] == "")
+          list.root <- paste0(root, "[[", i, "]]")
+        else
+          list.root <- paste0(root, "$", element[i])
+        
+        .tree_RLum(x[[i]], root = list.root)
+      }
+    } else if (length(x) != 0) {
+      
+      # print -----
+      cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|") 
+      
+      element <- paste0("[[", seq(1, length(x),1), "]]")
+      
+      for (i in 1:length(x)) {
+        if (grepl(" ", element[i]))
+          element[i] <- paste0("`", element[i], "`")
+        
+        list.root <- paste0(root, element[i])
+        .tree_RLum(x[[i]], root = list.root)
+      }
+    } else if (length(x) == 0) {
+      
+      cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|") 
+      
+    }
+    
+    invisible()
+    
+    ## Data frames -----
+  } else if (inherits(x, "data.frame")) { 
+    
+    if (any(sapply(x, function(col) { inherits(col, "matrix") } ))) {
+      
+      element <- names(x)
+      
+      for (i in 1:length(x)) {
+        if (grepl(" ", element[i]))
+          element[i] <- paste0("`", element[i], "`")
+        
+        list.root <- paste0(root, "$", element[[i]])
+        .tree_RLum(x[[i]], root = list.root)
+      }
+    } else {
+      # print ----
+      cat(c(root, .class(x), base::length(x), .depth(root), TRUE, .dimension(x), "\n"), sep = "|")
+    }
+    invisible()
+    
+    ## Last elements -----  
+  }  else {
+    
+    # print ----
+    cat(c(root, .class(x), base::length(x), .depth(root), TRUE, .dimension(x), "\n"), sep = "|") 
+    
+    invisible()
+  }
+}
+
+# ---------------------------------------------------------------------------- #
+# a) Derive depth in the structure tree by splitting the directory by 
+# indicative accessors @, $, [[
+# b) Wrapper for dim() to cope with NULL values
+# c) Wrapper for class() that collapses the classes of an object
+# ---------------------------------------------------------------------------- #
+.depth <- function(x) {
+  length(strsplit(x, split = "\\$|@|\\[\\[")[[1]]) - 1
+}
+.dimension <- function(x) {
+  if (!is.null(dim(x)))
+    dim <- paste(dim(x), collapse = "|")
+  else
+    dim <- c(0, 0)
+}
+.class <- function(x) {
+  paste(class(x), collapse = "/")
+}
+
+# ---------------------------------------------------------------------------- #
+# This function captures the output of the real worker .tree_RLum and returns
+# the structure of the object as a data.frame
+# ---------------------------------------------------------------------------- #
+.struct_RLum <- function(x, root) {
+  if (missing(root))
+    root <- deparse(substitute(x))
+  s <- capture.output(.tree_RLum(x, root = root))
+  df <- as.data.frame(do.call(rbind, strsplit(s, "|", fixed = TRUE)), stringsAsFactors = FALSE)
+  names(df) <- c("branch", "class", "length", "depth", "endpoint", "row", "col")
+  df$depth <- as.integer(df$depth)
+  df$length <- as.numeric(df$length)
+  df$endpoint <- as.logical(df$endpoint)
+  df$row <- as.integer(df$row)
+  df$col <- as.integer(df$col)
+  df$bud <- do.call(c, lapply(strsplit(df$branch, "\\$|@|\\[\\["), 
+                              function(x) x[length(x)]))
+  if (length(grep("]", df$bud)) != 0)
+    df$bud[grep("]", df$bud)] <- paste0("[[", df$bud[grep("]", df$bud)])
+  df$bud.freq <- NA # 1:nrow(df)
+  
+  # reorder data.frame
+  df <- df[ ,c("branch", "bud", "bud.freq", "class", 
+               "length", "depth", "row", "col", "endpoint")]
+  
+  # for the report we must not have the same last element names of same
+  # depth (HTML cannot discriminate between #links of <h> headers)
+  ## TODO: this is highly inefficient for unnamed list due to recurrent indices
+  dlevel <- max(table(df$bud))
+  
+  for (i in 1:dlevel) {
+    unique.bud <- unique(df[is.na(df$bud.freq), ]$bud)
+    df[is.na(df$bud.freq), ][match(unique.bud, df[is.na(df$bud.freq), ]$bud), ]$bud.freq <- i - 1
+  }
+  
+  invisible(df)
+}
\ No newline at end of file
diff --git a/R/set_RLum.R b/R/set_RLum.R
new file mode 100644
index 0000000..996b8ce
--- /dev/null
+++ b/R/set_RLum.R
@@ -0,0 +1,77 @@
+#' General set function for RLum S4 class objects
+#'
+#' Function calls object-specific set functions for RLum S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the given class, the
+#' corresponding method to create an object from this class will be selected.
+#' Allowed additional arguments can be found in the documentations of the
+#' corresponding \code{\linkS4class{RLum}} class: \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Image}}, \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Analysis}} and \code{\linkS4class{RLum.Results}}
+#'
+#' @param class \code{\linkS4class{RLum}} (\bold{required}): name of the S4 class to
+#' create
+#'
+#' @param originator \code{\link{character}} (automatic): contains the name of the calling function
+#' (the function that produces this object); can be set manually.
+#'
+#' @param .uid \code{\link{character}} (automatic): sets an unique ID for this object
+#' using the internal C++ function \code{.create_UID}.
+#'
+#' @param .pid \code{\link{character}} (with default): option to provide a parent id for nesting
+#' at will.
+#'
+#' @param \dots further arguments that one might want to pass to the specific
+#' set method
+#'
+#' @return Returns an object of the specified class.
+#'
+#' @section Function version: 0.3.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso
+#' \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}
+#'
+#' @keywords utilities
+#'
+#' @examples
+#'
+#' ##produce empty objects from each class
+#' set_RLum(class = "RLum.Data.Curve")
+#' set_RLum(class = "RLum.Data.Spectrum")
+#' set_RLum(class = "RLum.Data.Spectrum")
+#' set_RLum(class = "RLum.Analysis")
+#' set_RLum(class = "RLum.Results")
+#'
+#' ##produce a curve object with arbitrary curve values
+#' object <- set_RLum(
+#' class = "RLum.Data.Curve",
+#' curveType = "arbitrary",
+#' recordType = "OSL",
+#' data = matrix(c(1:100,exp(-c(1:100))),ncol = 2))
+#'
+#' ##plot this curve object
+#' plot_RLum(object)
+#'
+#' @export
+setGeneric("set_RLum", function (class, originator, .uid = .create_UID(), .pid = NA_character_, ... ) {
+  class(class) <- as.character(class)
+
+  if(missing(originator)) {
+    if (is(sys.call(which = -1)[[1]], "name")) {
+      originator <- as.character(sys.call(which = -1)[[1]])
+    } else{
+      originator <- NA_character_
+    }
+  }
+
+  standardGeneric("set_RLum")
+})
+
diff --git a/R/set_Risoe.BINfileData.R b/R/set_Risoe.BINfileData.R
new file mode 100644
index 0000000..2409e71
--- /dev/null
+++ b/R/set_Risoe.BINfileData.R
@@ -0,0 +1,25 @@
+#' General accessor function for RLum S4 class objects
+#'
+#' Function calls object-specific get functions for RisoeBINfileData S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{Risoe.BINfileData}} objects.\cr Depending on the input object, the
+#' corresponding get function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{Risoe.BINfileData}} class.
+#'
+#' @param METADATA x
+#' @param DATA x
+#' @param .RESERVED x
+#'
+#' @return Return is the same as input objects as provided in the list.
+#' @section Function version: 0.1
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#' @seealso
+#' \code{\linkS4class{Risoe.BINfileData}}
+#' @keywords utilities
+#' 
+#' @export
+setGeneric("set_Risoe.BINfileData", function(METADATA, DATA, .RESERVED) {
+  standardGeneric("set_Risoe.BINfileData")
+})
diff --git a/R/structure_RLum.R b/R/structure_RLum.R
new file mode 100644
index 0000000..04c8631
--- /dev/null
+++ b/R/structure_RLum.R
@@ -0,0 +1,43 @@
+#' General structure function for RLum S4 class objects
+#'
+#' Function calls object-specific get functions for RLum S4 class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+#' corresponding structure function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+#'
+#' @param object \code{\linkS4class{RLum}} (\bold{required}): S4 object of
+#' class \code{RLum}
+#'
+#' @param \dots further arguments that one might want to pass to the specific
+#' structure method
+#'
+#' @return Returns a \code{data.frame} with structure of the object.
+#'
+#' @section Function version: 0.2.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @seealso
+#' \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Image}},
+#' \code{\linkS4class{RLum.Data.Spectrum}},
+#' \code{\linkS4class{RLum.Analysis}},
+#' \code{\linkS4class{RLum.Results}}
+#'
+#' @keywords utilities
+#'
+#' @examples
+#'
+#' ##load example data
+#' data(ExampleData.XSYG, envir = environment())
+#'
+#' ##show structure
+#' structure_RLum(OSL.SARMeasurement$Sequence.Object)
+#'
+#' @export
+setGeneric("structure_RLum", function(object, ...) {
+  standardGeneric("structure_RLum")
+})
diff --git a/R/template_DRAC.R b/R/template_DRAC.R
new file mode 100644
index 0000000..3fc7eea
--- /dev/null
+++ b/R/template_DRAC.R
@@ -0,0 +1,324 @@
+#' Create a DRAC input data template (v1.1)
+#'
+#' This function returns a DRAC input template (v1.1) to be used in conjunction
+#' with the use_DRAC() function
+#' 
+#' @param nrow \code{\link{integer}} (with default): specifies the number of rows
+#' of the template (i.e., the number of data sets you want to submit)
+#' 
+#' @param notification \code{\link{logical}} (with default): show or hide the
+#' notification
+#'
+#' @return A list.
+#' 
+#' @author Christoph Burow, University of Cologne (Germany)
+#'
+#' @references
+#'
+#' Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating.
+#' Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012
+#'
+#' @seealso \code{\link{as.data.frame}} \code{\link{list}} 
+#'
+#' @examples
+#' 
+#' # create a new DRAC input input
+#' input <- template_DRAC()
+#' 
+#' # show content of the input
+#' print(input)
+#' print(input$`Project ID`)
+#' print(input[[4]])
+#' 
+#' 
+#' ## Example: DRAC Quartz example
+#' # note that you only have to assign new values where they 
+#' # are different to the default values
+#' input$`Project ID` <- "DRAC-Example"
+#' input$`Sample ID` <- "Quartz"
+#' input$`Conversion factors` <- "AdamiecAitken1998"
+#' input$`ExternalU (ppm)` <- 3.4
+#' input$`errExternal U (ppm)` <- 0.51
+#' input$`External Th (ppm)` <- 14.47
+#' input$`errExternal Th (ppm)` <- 1.69
+#' input$`External K (%)` <- 1.2
+#' input$`errExternal K (%)` <- 0.14
+#' input$`Calculate external Rb from K conc?` <- "N"
+#' input$`Calculate internal Rb from K conc?` <- "N"
+#' input$`Scale gammadoserate at shallow depths?` <- "N"
+#' input$`Grain size min (microns)` <- 90
+#' input$`Grain size max (microns)` <- 125
+#' input$`Water content ((wet weight - dry weight)/dry weight) %` <- 5
+#' input$`errWater content %` <- 2
+#' input$`Depth (m)` <- 2.2
+#' input$`errDepth (m)` <- 0.22
+#' input$`Overburden density (g cm-3)` <- 1.8
+#' input$`errOverburden density (g cm-3)` <- 0.1
+#' input$`Latitude (decimal degrees)` <- 30.0000
+#' input$`Longitude (decimal degrees)` <- 70.0000
+#' input$`Altitude (m)` <- 150
+#' input$`De (Gy)` <- 20
+#' input$`errDe (Gy)` <- 0.2
+#' 
+#' # use DRAC
+#' \dontrun{
+#' output <- use_DRAC(input)
+#' }
+#' 
+#' @export
+template_DRAC <- function(nrow = 1, notification = TRUE) {
+  
+  ## TODO:
+  # 1 - allow mineral specific presets; new argument 'mineral'
+  # 2 - add option to return the DRAC example data set
+  
+  if (nrow < 0 | nrow > 33) 
+    stop("'nrow' must be a number between 0 and 33.", call. = FALSE)
+  
+  ## LEGAL NOTICE ----
+  messages <- list("\n",
+                   "\t-------------------- IMPORTANT NOTE ------------------------\n",
+                   "\t This function returns a DRAC input template to be used in ",
+                   "\t conjunction with the use_DRAC() function.  \n",
+                   "\t The template was reproduced with great care, but we do not",
+                   "\t take any responsibility and we are not liable for any ",
+                   "\t mistakes or unforeseen misbehaviour.",
+                   "\t Note that this template is only compatible with DRAC",
+                   "\t version 1.1. Before using this template make sure that",
+                   "\t this is the correct version, otherwise expect unspecified",
+                   "\t errors.\n",
+                   "\t Please ensure you cite the use of DRAC in your work,",
+                   "\t published or otherwise. Please cite the website name and",
+                   "\t version (e.g. DRAC v1.1) and the accompanying journal",
+                   "\t article:",
+                   "\t Durcan, J.A., King, G.E., Duller, G.A.T., 2015.",
+                   "\t DRAC: Dose rate and age calculation for trapped charge",
+                   "\t dating. Quaternary Geochronology 28, 54-61. \n",
+                   "\t Set 'notification = FALSE' to hide this message. \n",
+                   "\t-------------------- IMPORTANT NOTE ------------------------",
+                   "\n")
+  
+  if (notification) lapply(messages, message)
+  
+  # CREATE TEMPLATE ----
+  template <- list(
+    
+    `Project ID` = 
+      structure(rep("RLum", nrow), required = TRUE, allowsX = FALSE, key = "TI:1",
+                description = "Inputs can be alphabetic, numeric or selected symbols (/ - () [] _). Spaces are not permitted."), # 
+    
+    `Sample ID` = 
+      structure(rep("999", nrow), required = TRUE, allowsX = FALSE, key = "TI:2",
+                description = "Inputs can be alphabetic, numeric or selected symbols (/ - () [] _). Spaces are not permitted."), #
+    
+    `Mineral` = 
+      structure(factor(rep("Q", nrow), c("Q", "F", "PM")), required = TRUE, allowsX = FALSE, key = "TI:3",
+                description = "The mineral used for dating: quartz, feldspar or polymineral. Input must be 'Q', 'F' or 'PM'."), #
+    
+    `Conversion factors` = 
+      structure(factor(rep("Liritzisetal2013", nrow), c("AdamiecAitken1998", "Guerinetal2011", "Liritzisetal2013", "X")), required = FALSE, allowsX = TRUE, key = "TI:4",
+                description = "The conversion factors required to calculate dose rates from radionuclide concentrations. Users have the option of datasets from Adamiec and Aitken (1998), Guerin et al. (2011) or Liritzis et al. (2013). Input must be 'AdamiecAitken1998', 'Guerinetal2011', 'Liritzisetal2013' or 'X' if conversion factors are not required."), #
+    
+    `ExternalU (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:5",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # 
+    
+    `errExternal U (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:6",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `External Th (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:7",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `errExternal Th (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:8",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `External K (%)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:9",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `errExternal K (%)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:10",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `External Rb (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:11",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `errExternal Rb (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:12",
+                description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `Calculate external Rb from K conc?` = 
+      structure(factor(rep("Y", nrow), c("Y", "N")), required = FALSE, allowsX = FALSE, key = "TI:13",
+                description = "Option to calculate a Rubidium concentration from Potassium, using the 270:1 ratio suggested by Mejdahl (1987). Input should be yes 'Y' or no 'N'."), #
+    
+    `Internal U (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:14",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `errInternal U (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:15",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `Internal Th (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:16",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `errInternal Th (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:17",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `Internal K (%)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:18",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `errInternal K (%)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:19",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `Rb (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:20",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `errRb (ppm)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:21",
+                description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), #
+    
+    `Calculate internal Rb from K conc?` = 
+      structure(factor(rep("Y", nrow), c("Y", "N", "X")), required = FALSE, allowsX = TRUE, key = "TI:22",
+                description = "Option to calculate an internal Rubidium concentration from Potassium, using the 270:1 ratio suggested by Mejdahl (1987). Input should be yes 'Y' or no 'N'."), #
+    
+    `User external alphadoserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:23",
+                description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), #
+    
+    `errUser external alphadoserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:24",
+                description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), #
+    
+    `User external betadoserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:25",
+                description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), #
+    
+    `errUser external betadoserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:26",
+                description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), #
+    
+    `User external gamma doserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:27",
+                description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), #
+    
+    `errUser external gammadoserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:28",
+                description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), #
+    
+    `User internal doserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:29",
+                description = "Users may input an internal dose rate (either alpha, beta or the sum of the two; in Gy.ka-1). DRAC will assume that this value has already been corrected for attenuation. Inputs in this field will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and not left blank."), #
+    
+    `errUser internal doserate (Gy.ka-1)` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:30",
+                description = "Users may input an internal dose rate (either alpha, beta or the sum of the two; in Gy.ka-1). DRAC will assume that this value has already been corrected for attenuation. Inputs in this field will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and not left blank."), #
+    
+    `Scale gammadoserate at shallow depths?` = 
+      structure(factor(rep("Y", nrow), c("Y", "N")), required = FALSE, allowsX = FALSE, key = "TI:31",
+                description = "Users may choose to scale gamma dose rates for samples taken within 0.3 m of the ground surface. The scaling factors of Aitken (1985) are used. Input should be yes 'Y' or no 'N'."), #
+    
+    `Grain size min (microns)` = 
+      structure(rep(100, nrow), required = TRUE, allowsX = FALSE, key = "TI:32",
+                description = "The grain size range analysed. DRAC can be used for the grain size ranges between 1 and 1000 microns. Inputs should range between 1 and 1000 and not be left blank."), #
+    
+    `Grain size max (microns)` = 
+      structure(rep(150, nrow), required = TRUE, allowsX = FALSE, key = "TI:33",
+                description = "The grain size range analysed. DRAC can be used for the grain size ranges between 1 and 1000 microns. Inputs should range between 1 and 1000 and not be left blank."), #
+    
+    `alpha-Grain size attenuation` = 
+      structure(factor(rep("Brennanetal1991", nrow), c("Bell1980", "Brennanetal1991")), required = TRUE, allowsX = FALSE, key = "TI:34",
+                description = "The grain size attenuation factors for the alpha dose rate. Users have the option of datasets from Bell (1980) and Brennan et al. (1991). Input must be 'Bell1980' or 'Brennanetal1991'."), #
+    
+    `beta-Grain size attenuation ` = 
+      structure(factor(rep("Guerinetal2012-Q", nrow), c("Mejdahl1979", "Brennan2003", "Guerinetal2012-Q", "Guerinetal2012-F")), required = TRUE, allowsX = FALSE, key = "TI:35",
+                description = "The grain size attenuation factors for the beta dose rate. Users have the option of datasets from Mejdahl (1979), Brennan (2003) and Guerin et al. (2012) for quartz or feldspar. Input must be 'Mejdahl1979', 'Brennan2003', 'Guerinetal2012-Q' or 'Guerinetal2012-F' ."), #
+    
+    `Etch depth min (microns)` = 
+      structure(rep(8, nrow), required = TRUE, allowsX = FALSE, key = "TI:36",
+                description = "The user defined etch depth range (microns). Inputs should range between 0 and 30 and not be left blank."), #
+    
+    `Etch depth max (microns)` = 
+      structure(rep(10, nrow), required = TRUE, allowsX = FALSE, key = "TI:37",
+                description = "The user defined etch depth range (microns). Inputs should range between 0 and 30 and not be left blank."), #
+    
+    `beta-Etch depth attenuation factor` = 
+      structure(factor(rep("Bell1979", nrow), c("Bell1979", "Brennan2003", "X")), required = FALSE, allowsX = TRUE, key = "TI:38",
+                description = "The etch depth attenuation factors for the beta dose rate. Users have the option of datasets from Bell (1979) and Brennan (2003). Input must be 'Bell1979' or 'Brennan2003'. Note: only the dataset of Bell (1980) is provided for attenuation of the alpha dose rate by etching."), #
+    
+    `a-value` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:39",
+                description = "Alpha track efficiency value and uncertainty defined by the user. Inputs should be 0 or positive and not left blank."), #
+    
+    `erra-value` = 
+      structure(rep(0, nrow), required = TRUE, allowsX = TRUE, key = "TI:40",
+                description = "Alpha track efficiency value and uncertainty defined by the user. Inputs should be 0 or positive and not left blank."), #
+    
+    `Water content ((wet weight - dry weight)/dry weight) %` = 
+      structure(rep(0, nrow), required = TRUE, allowsX = FALSE, key = "TI:41",
+                description = "Sediment water content (%) over the burial period. Inputs should be 0 or positive and not be left blank."), #
+    
+    `errWater content %` = 
+      structure(rep(0, nrow), required = FALSE, allowsX = FALSE, key = "TI:42",
+                description = "Sediment water content (%) over the burial period. Inputs should be 0 or positive and not be left blank."), #
+    
+    `Depth (m)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:43",
+                description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank. If user defined Dc will be used then an 'X' must be input."), #
+    
+    `errDepth (m)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:44",
+                description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank. If user defined Dc will be used then an 'X' must be input."), #
+    
+    `Overburden density (g cm-3)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:45",
+                description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. If user defined Dc will be used then an 'X' must be input."), #
+    
+    `errOverburden density (g cm-3)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:46",
+                description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. If user defined Dc will be used then an 'X' must be input."), #
+    
+    `Latitude (decimal degrees)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:47",
+                description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude. If user defined Dc will be used then an 'X' must be input."), # 
+    
+    `Longitude (decimal degrees)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:48",
+                description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude. If user defined Dc will be used then an 'X' must be input."), # 
+    
+    `Altitude (m)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:49",
+                description = "Altitude of sample location in metres above sea level. Input should be less than 5000 and not left blank. If user defined Dc will be used then an 'X' must be input."), #
+    
+    `User cosmicdoserate (Gy.ka-1)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:50",
+                description = "Users may input a cosmic dose rate (in Gy.ka-1). Inputs in these fields will override the DRAC calculated cosmic dose rate. Inputs should be positive or 'X' if not required, and not left blank."), #
+    
+    `errUser cosmicdoserate (Gy.ka-1)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:51",
+                description = "Users may input a cosmic dose rate (in Gy.ka-1). Inputs in these fields will override the DRAC calculated cosmic dose rate. Inputs should be positive or 'X' if not required, and not left blank."), #
+    
+    `De (Gy)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:52",
+                description = "Sample De and uncertainty (in Gy). Inputs should be positive or 'X' if not required, and not left blank."), #
+    
+    `errDe (Gy)` = 
+      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:53",
+                description = "Sample De and uncertainty (in Gy). Inputs should be positive or 'X' if not required, and not left blank.") #
+  )   
+  
+  
+  ## RETURN VALUE ---
+  # add an additional DRAC class so we can define our own S3 method for as.data.frame
+  class(template) <- c("DRAC.list", "list")
+  invisible(template)
+}
\ No newline at end of file
diff --git a/R/tune_Data.R b/R/tune_Data.R
new file mode 100644
index 0000000..1dc9a71
--- /dev/null
+++ b/R/tune_Data.R
@@ -0,0 +1,106 @@
+#' Tune data for experimental purpose
+#'
+#' The error can be reduced and sample size increased for specific purpose.
+#'
+#' @param data \code{\link{data.frame}} (\bold{required}): input values,
+#' structure: data (\code{values[,1]}) and data error (\code{values [,2]}) are
+#' required
+#'
+#' @param decrease.error \code{\link{numeric}}: factor by which the error
+#' is decreased, ranges between 0 and 1.
+#'
+#' @param increase.data \code{\link{numeric}}: factor by which the error
+#' is decreased, ranges between 0 and inf.
+#'
+#' @return Returns a \code{\link{data.frame}} with tuned values.
+#'
+#' @note You should not use this function to improve your poor data set!
+#'
+#' @section Function version: 0.5.0
+#'
+#' @author Michael Dietze, GFZ Potsdam (Germany)
+#'
+#' @seealso #
+#'
+#' @references #
+#'
+#' @keywords manip
+#'
+#' @examples
+#' ## load example data set
+#' data(ExampleData.DeValues, envir = environment())
+#' x <- ExampleData.DeValues$CA1
+#'
+#' ## plot original data
+#' plot_AbanicoPlot(data = x,
+#'                  summary = c("n", "mean"))
+#'
+#' ## decrease error by 10 %
+#' plot_AbanicoPlot(data = tune_Data(x, decrease.error = 0.1),
+#'                  summary = c("n", "mean"))
+#'
+#' ## increase sample size by 200 %
+#' #plot_AbanicoPlot(data = tune_Data(x, increase.data = 2) ,
+#' #                summary = c("n", "mean"))
+#'
+#'
+#' @export
+tune_Data <- function(
+  data,
+  decrease.error = 0,
+  increase.data = 0
+){
+
+  if(missing(decrease.error) == FALSE) {
+
+    error.rel <- data[,2] / data[,1]
+
+    data[,2] <- error.rel * (1 - decrease.error) * data[,1]
+  }
+
+  if(missing(increase.data) == FALSE) {
+
+    n <- round(x = increase.data * 100,
+               digits = 0)
+
+    i.new <- sample(x = 1:nrow(data),
+                    size = n,
+                    replace = TRUE)
+
+    x.new <- rnorm(n = n,
+                   mean = data[i.new, 1],
+                   sd = data[i.new, 2])
+
+    e.new <- rnorm(n = n,
+                   mean = data[i.new, 2],
+                   sd = data[i.new, 2] * 0.05)
+
+    x.merge <- c(data[,1], x.new)
+    e.merge <- c(data[,2], e.new)
+
+    e.merge <- e.merge[order(x.merge)]
+    x.merge <- x.merge[order(x.merge)]
+
+    data.out <- data.frame(x.merge, e.merge)
+
+    names(data.out) <- names(data)
+
+    data <- data.out
+  }
+
+  info <- Sys.info()
+  user <- info[length(info)]
+  os <- info[1]
+
+  warning(paste("Dear ",
+                user,
+                ", these activities on your ",
+                os,
+                " machine have been tracked and will be submitted to ",
+                "the R.Lum data base. Cheating does not pay off! [",
+                Sys.time(),
+                "]",
+                sep = ""))
+
+  return(data)
+}
diff --git a/R/use_DRAC.R b/R/use_DRAC.R
new file mode 100644
index 0000000..dcedf04
--- /dev/null
+++ b/R/use_DRAC.R
@@ -0,0 +1,342 @@
+#' Use DRAC to calculate dose rate data
+#'
+#' The function provides an interface from R to DRAC. An R-object or a
+#' pre-formatted XLS/XLSX file is passed to the DRAC website and the
+#' results are re-imported into R.
+#'
+#'
+#' @param file \code{\link{character}}: spreadsheet to be passed
+#' to the DRAC website for calculation. Can also be a DRAC template object
+#' obtained from \code{template_DRAC()}.
+#'
+#' @param name \code{\link{character}}: Optional user name submitted to DRAC. If
+#' omitted, a random name will be generated
+#'
+#' @param ... Further arguments.
+#'
+#' @return Returns an \code{\linkS4class{RLum.Results}} object containing the following elements:
+#'
+#' \item{DRAC}{\link{list}: a named list containing the following elements in slot \code{@@data}:
+#'
+#' \tabular{lll}{
+#'    \code{$highlights} \tab \code{\link{data.frame}} \tab summary of 25 most important input/output fields \cr
+#'    \code{$header} \tab \code{\link{character}} \tab HTTP header from the DRAC server response \cr
+#'    \code{$labels} \tab \code{\link{data.frame}} \tab descriptive headers of all input/output fields \cr
+#'    \code{$content} \tab \code{\link{data.frame}} \tab complete DRAC input/output table \cr
+#'    \code{$input} \tab \code{\link{data.frame}} \tab DRAC input table \cr
+#'    \code{$output} \tab \code{\link{data.frame}} \tab DRAC output table \cr
+#' }
+#'
+#' }
+#' \item{data}{\link{character} or \link{list} path to the input spreadsheet or a DRAC template}
+#' \item{call}{\link{call} the function call}
+#' \item{args}{\link{list} used arguments}
+#'
+#' The output should be accessed using the function \code{\link{get_RLum}}.
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Michael Dietze,
+#' GFZ Potsdam (Germany), Christoph Burow, University of Cologne (Germany)\cr
+#'
+#' @references
+#'
+#' Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating.
+#' Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012
+#'
+#' @examples
+#'
+#' ## (1) Method using the DRAC spreadsheet
+#'
+#' file <-  "/PATH/TO/DRAC_Input_and_Output_Template.xlsx"
+#'
+#' # send the actual IO template spreadsheet to DRAC
+#' \dontrun{
+#' use_DRAC(file = file)
+#' }
+#'
+#'
+#'
+#' ## (2) Method using an R template object
+#'
+#' # Create a template
+#' input <- template_DRAC()
+#'
+#' # Fill the template with values
+#' input$`Project ID` <- "DRAC-Example"
+#' input$`Sample ID` <- "Quartz"
+#' input$`Conversion factors` <- "AdamiecAitken1998"
+#' input$`ExternalU (ppm)` <- 3.4
+#' input$`errExternal U (ppm)` <- 0.51
+#' input$`External Th (ppm)` <- 14.47
+#' input$`errExternal Th (ppm)` <- 1.69
+#' input$`External K (%)` <- 1.2
+#' input$`errExternal K (%)` <- 0.14
+#' input$`Calculate external Rb from K conc?` <- "N"
+#' input$`Calculate internal Rb from K conc?` <- "N"
+#' input$`Scale gammadoserate at shallow depths?` <- "N"
+#' input$`Grain size min (microns)` <- 90
+#' input$`Grain size max (microns)` <- 125
+#' input$`Water content ((wet weight - dry weight)/dry weight) %` <- 5
+#' input$`errWater content %` <- 2
+#' input$`Depth (m)` <- 2.2
+#' input$`errDepth (m)` <- 0.22
+#' input$`Overburden density (g cm-3)` <- 1.8
+#' input$`errOverburden density (g cm-3)` <- 0.1
+#' input$`Latitude (decimal degrees)` <- 30.0000
+#' input$`Longitude (decimal degrees)` <- 70.0000
+#' input$`Altitude (m)` <- 150
+#' input$`De (Gy)` <- 20
+#' input$`errDe (Gy)` <- 0.2
+#'
+#' # use DRAC
+#' \dontrun{
+#' output <- use_DRAC(input)
+#' }
+#'
+#' @export
+use_DRAC <- function(
+  file,
+  name,
+  ...
+){
+  ## TODO:
+  ## (1) Keep the data set as unmodified as possible. Check structure and order of parameters
+  ## for meaningful cominbination.
+  ##
+  ## (2)
+  ## Leave it to the user where the calculations made in our package should be used
+
+  # Integrity tests -----------------------------------------------------------------------------
+  if (inherits(file, "character")) {
+    if(!file.exists(file)){
+      stop("[use_DRAC()] It seems that the file doesn't exist!")
+
+    }
+
+    # Import data ---------------------------------------------------------------------------------
+
+    ## Import and skipt the first rows and remove NA lines and the 2 row, as this row contains
+    ## only meta data
+
+    ##check if is the original DRAC table
+    if (readxl::excel_sheets(file)[1] != "DRAC_1.1_input") {
+      stop("[use_DRAC()] It looks like that you are not using the original DRAC XLSX template. This is currently
+         not supported!")
+    }
+    input.raw <- na.omit(as.data.frame(readxl::read_excel(path = file, sheet = 1, skip = 5)))[-1, ]
+
+  } else if (inherits(file, "DRAC.list")) {
+    input.raw <- as.data.frame(file)
+
+  } else if (inherits(file, "DRAC.data.frame")) {
+    input.raw <- file
+
+  } else {
+    stop("The provided data object is not a valid DRAC template.", call. = FALSE)
+  }
+  
+  if (nrow(input.raw) > 50)
+    stop("DRAC can only handle 50 data sets at once. Please reduce the number of rows and re-run this function again.", call. = FALSE)
+
+  # Settings ------------------------------------------------------------------------------------
+  settings <- list(name = ifelse(missing(name),
+                                 paste(sample(if(runif(1,-10,10)>0){LETTERS}else{letters},
+                                              runif(1, 2, 4)), collapse = ""),
+                                 name),
+                   verbose = TRUE,
+                   url = "https://www.aber.ac.uk/en/iges/research-groups/quaternary/luminescence-research-laboratory/dose-rate-calculator/?show=calculator")
+  
+  # override defaults with args in ...
+  settings <- modifyList(settings, list(...))
+
+  # Set helper function -------------------------------------------------------------------------
+  ## The real data are transferred without any encryption, so we have to mask the original
+
+  ##(0) set masking function
+  .masking <- function(mean, sd, n) {
+    temp <- rnorm(n = 30 * n, mean = mean,sd = sd)
+    temp.result <-
+      sapply(seq(1, length(temp), by = 30), function(x) {
+        c(format(mean(temp[x:(x + 29)]), digits = 2),
+          format(sd(temp[x:(x + 29)]), digits = 2))
+      })
+    return(t(temp.result))
+  }
+
+
+  # Process data --------------------------------------------------------------------------------
+  if (settings$verbose) message("\n\t Preparing data...")
+
+  ##(1) expand the rows in the data.frame a little bit
+  mask.df <-  input.raw[rep(1:nrow(input.raw), each = 3), ]
+
+  ##(2) generate some meaningful randome variables
+  mask.df <- lapply(seq(1, nrow(input.raw), by = 3), function(x) {
+
+    if (mask.df[x,"TI:52"] != "X") {
+      ##replace some values - the De value
+      mask.df[x:(x + 2), c("TI:52","TI:53")] <- .masking(
+        mean = as.numeric(mask.df[x,"TI:52"]),
+        sd = as.numeric(mask.df[x,"TI:53"]),
+        n = 3)
+      return(mask.df)
+    }
+
+  })
+
+  ##(3) bin values
+  DRAC_submission.df <- rbind(input.raw,mask.df[[1]])
+
+
+  ##(4) replace ID values
+  DRAC_submission.df$`TI:1` <-   paste0(paste0(paste0(sample(if(runif(1,-10,10)>0){LETTERS}else{letters},
+                                                             runif(1, 2, 4)), collapse = ""),
+                                               ifelse(runif(1,-10,10)>0, "-", "")),
+                                        gsub(" ", "0", prettyNum(seq(sample(1:50, 1, prob = 50:1/50, replace = FALSE),
+                                                                     by = 1, length.out = nrow(DRAC_submission.df)), width = 2)))
+
+
+
+  ##(5) store the real IDs in a sperate object
+  DRAC_results.id <-  DRAC_submission.df[1:nrow(input.raw), "TI:1"]
+
+  ##(6) create DRAC submission string
+  DRAC_submission.df <- DRAC_submission.df[sample(x = 1:nrow(DRAC_submission.df), nrow(DRAC_submission.df),
+                                                  replace = FALSE), ]
+
+  ##convert all columns of the data.frame to class 'character'
+  for (i in 1:ncol(DRAC_submission.df))
+    DRAC_submission.df[ ,i] <- as.character(DRAC_submission.df[, i])
+
+  if (settings$verbose) message("\t Creating submission string...")
+  ##get line by line and remove unwanted characters
+  DRAC_submission.string <- sapply(1:nrow(DRAC_submission.df), function(x) {
+    paste0(gsub(",", "", toString(DRAC_submission.df[x, ])), "\n")
+  })
+
+  ##paste everything together to get the format we want
+  DRAC_input <- paste(DRAC_submission.string, collapse = "")
+
+  # Send data to DRAC ---------------------------------------------------------------------------
+  if (settings$verbose) message(paste("\t Establishing connection to", settings$url))
+
+  ## send data set to DRAC website and receive repsonse
+  DRAC.response <- httr::POST(settings$url,
+                              body = list("drac_data[name]"  = settings$name,
+                                          "drac_data[table]" = DRAC_input))
+
+  ## check for correct response
+  if (DRAC.response$status_code != 200) {
+    stop(paste0("[use_DRAC()] transmission failed with HTTP status code: ",
+                DRAC.response$status_code))
+  } else {
+    if (settings$verbose) message("\t The request was successful, processing the reply...")
+  }
+
+  ## assign DRAC response data to variables
+  http.header <- DRAC.response$header
+  DRAC.content <- httr::content(x = DRAC.response, as = "text")
+
+  ## if the input was valid from a technical standpoint, but not with regard
+  ## contents, we indeed get a valid response, but no DRAC output
+  if (!grepl("DRAC Outputs", DRAC.content)) {
+    stop(paste("\n\t We got a response from the server, but it\n",
+                       "\t did not contain DRAC output. Please check\n",
+                       "\t your data and verify its validity.\n"),
+         call. = FALSE)
+  } else {
+    if (settings$verbose) message("\t Finalising the results...")
+  }
+
+  ## split header and content
+  DRAC.content.split <- strsplit(x = DRAC.content,
+                                 split = "DRAC Outputs\n\n")
+
+  ## assign DRAC header part
+  DRAC.header <- as.character(DRAC.content.split[[1]][1])
+
+  ## assign DRAC content part
+  DRAC.raw <- read.table(text = as.character(DRAC.content.split[[1]][2]),
+                         sep = ",",
+                         stringsAsFactors = FALSE)
+
+  ## remove first two lines
+  DRAC.content <- DRAC.raw[-c(1, 2), ]
+
+  ##Get rid of all the value we do not need anymore
+  DRAC.content <-  subset(DRAC.content, DRAC.content$V1 %in% DRAC_results.id)
+  DRAC.content <- DRAC.content[with(DRAC.content, order(V1)), ]
+
+  ##replace by original names
+  DRAC.content[ ,1] <- input.raw[ ,1]
+
+  ## assign column names
+  colnames(DRAC.content) <- DRAC.raw[1, ]
+
+  ## save column labels and use them as attributes for the I/O table columns
+  DRAC.labels <- DRAC.raw[2, ]
+  for (i in 1:length(DRAC.content)) {
+    attr(DRAC.content[ ,i], "description") <- DRAC.labels[1,i]
+  }
+
+  ## DRAC also returns the input, so we need to split input and output
+  DRAC.content.input <- DRAC.content[ ,grep("TI:", names(DRAC.content))]
+  DRAC.content.output <- DRAC.content[ ,grep("TO:", names(DRAC.content))]
+
+  ## The DRAC ouput also contains a hightlight table, which results in
+  ## duplicate columns. When creating the data.frame duplicate columns
+  ## are automatically appended '.1' in their names, so we can identify
+  ## and remove them easily
+  DRAC.content.input <- DRAC.content.input[ ,-grep("\\.1", names(DRAC.content.input))]
+  DRAC.content.output <- DRAC.content.output[ ,-grep("\\.1", names(DRAC.content.output))]
+
+  ## The output table (v1.1) has 198 columns, making it unreasonable complex
+  ## for standard data evaluation. We reproduce the DRAC highlight table
+  ## and use the descriptions (saved as attributes) as column names.
+  highlight.keys <- c("TI:1","TI:2","TI:3","TO:FQ","TO:FR",
+                      "TO:FS", "TO:FT", "TO:FU", "TO:FV", "TO:FW",
+                      "TO:FX", "TO:FY", "TO:FZ", "TO:GG", "TO:GH",
+                      "TO:GI", "TO:GJ", "TO:GK", "TO:GL", "TO:GM",
+                      "TO:GN", "TI:52", "TI:53", "TO:GO", "TO:GP")
+  DRAC.highlights <- subset(DRAC.content, select = highlight.keys)
+  DRAC.highlights.labels <- as.character(DRAC.labels[1, which(unique(names(DRAC.content)) %in% highlight.keys)])
+  colnames(DRAC.highlights) <- DRAC.highlights.labels
+  for (i in 1:length(DRAC.highlights)) {
+    attr(DRAC.highlights[ ,i], "key") <- highlight.keys[i]
+  }
+
+  ## finally, we add the 'DRAC.highlights' class so that we can use a custom print method
+  class(DRAC.highlights) <- c("DRAC.highlights", "data.frame")
+
+  ## Final Disclaimer
+  messages <- list("\t Done! \n",
+                   "\t We, the authors of the R package 'Luminescence', do not take any responsibility and we are not liable for any ",
+                   "\t mistakes or unforeseen misbehaviour. All calculations are done by DRAC and it is outside our reference to",
+                   "\t verify the input and output. \n",
+                   "\t Note that this function is only compatible with DRAC version 1.1. Before using this function make sure that",
+                   "\t this is the correct version, otherwise expect unspecified errors.\n",
+                   "\t Please ensure you cite the use of DRAC in your work, published or otherwise. Please cite the website name and",
+                   "\t version (e.g. DRAC v1.1) and the accompanying journal article:",
+                   "\t Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose rate and age calculation for trapped charge",
+                   "\t dating. Quaternary Geochronology 28, 54-61. \n",
+                   "\t Use 'verbose = FALSE' to hide this message. \n")
+
+  if (settings$verbose) lapply(messages, message)
+
+
+  ## return output
+  DRAC.return <- set_RLum("RLum.Results",
+                          data = list(
+                            DRAC = list(highlights = DRAC.highlights,
+                                        header = DRAC.header,
+                                        labels = DRAC.labels,
+                                        content = DRAC.content,
+                                        input = DRAC.content.input,
+                                        output = DRAC.content.output),
+                            data = file,
+                            call = sys.call(),
+                            args = as.list(sys.call()[-1])))
+
+  invisible(DRAC.return)
+}
diff --git a/R/verify_SingleGrainData.R b/R/verify_SingleGrainData.R
new file mode 100644
index 0000000..7834e9c
--- /dev/null
+++ b/R/verify_SingleGrainData.R
@@ -0,0 +1,462 @@
+#' Verify single grain data sets and check for invalid grains, i.e. zero light level grains
+#'
+#' This function tries to identify automatically zero light level curves (grains) from single grain data
+#' measurements. \cr
+#'
+#' \bold{How the method works?}\cr
+#'
+#' The function compares the expected values (\eqn{E(X)}) and the variance (\eqn{Var(X)})
+#' of the count values for each curve. Assuming that the background roughly follows a poisson
+#' distribution the absolute difference of both values should be zero or at least around zero as
+#'
+#' \deqn{E(x) = Var(x) = \lambda}
+#'
+#' Thus the function checks for:
+#'
+#' \deqn{abs(E(x) - Var(x)) >= \Theta}
+#'
+#' With \eqn{\Theta} an arbitray, user defined, threshold. Values above indicating curves
+#' comprising a signal.\cr
+#'
+#' Note: the absolute difference of \eqn{E(X)} and \eqn{Var(x)} instead of the ratio was chosen as
+#' both can become 0 which would result in \code{Inf} values.
+#'
+#' @param object \code{\linkS4class{Risoe.BINfileData}} or \code{\linkS4class{RLum.Analysis}}
+#' (\bold{required}): input object. The function also accepts a list with objects of allowed type.
+#'
+#' @param threshold \code{\link{numeric}} (with default): numeric threshold value for the allowed difference between
+#' the \code{mean} and the \code{var} of the count values (see details)
+#'
+#' @param cleanup \code{\link{logical}} (with default): if set to \code{TRUE} curves indentified as
+#' zero light level curves are automatically removed. Ouput is an object as same type as the input, i.e.
+#' either \code{\linkS4class{Risoe.BINfileData}} or \code{\linkS4class{RLum.Analysis}}
+#'
+#' @param cleanup_level \code{\link{character}} (with default): selects the level for the cleanup
+#' of the input data sets. Two options are allowed: \code{"curve"} or \code{"aliquot"}. If  \code{"curve"}
+#' is selected every single curve marked as \code{invalid} is removed. If \code{"aliquot"} is selected,
+#' curves of one aliquot (grain or disc) can be marked as invalid, but will not be removed. An aliquot
+#' will be only removed if all curves of this aliquot are marked as invalid.
+#'
+#' @param verbose \code{\link{logical}} (with default): enables or disables terminal feedback
+#'
+#' @param plot \code{\link{logical}} (with default): enables or disables graphical feedback
+#'
+#' @return The function returns
+#'
+#' -----------------------------------\cr
+#' [ NUMERICAL OUTPUT ]\cr
+#' -----------------------------------\cr
+#' \bold{\code{RLum.Reuslts}}-object\cr
+#'
+#' \bold{slot:} \bold{\code{@data}}\cr
+#' \tabular{lll}{
+#' \bold{Element} \tab \bold{Type} \tab \bold{Description}\cr
+#'  \code{$unique_pairs} \tab \code{data.frame} \tab the unique position and grain pairs \cr
+#'  \code{$selection_id} \tab \code{numeric} \tab the selection as record ID \cr
+#'  \code{$selection_full} \tab \code{data.frame} \tab implemented models used in the baSAR-model core \cr
+#' }
+#'
+#'\bold{slot:} \bold{\code{@info}}\cr
+#'
+#' The original function call\cr
+#'
+#' \bold{Output variation}\cr
+#'
+#' For \code{cleanup = TRUE} the same object as the input, but with cleaned up (invalid curves removed).
+#' This means: Either an \code{\linkS4class{Risoe.BINfileData}} or an \code{\linkS4class{RLum.Analysis}}
+#' object is returned in such cases. An \code{\linkS4class{Risoe.BINfileData}} object can be exported
+#' to a BIN-file by using the function \code{\link{write_R2BIN}}.
+#'
+#' @note This function can work with \code{\linkS4class{Risoe.BINfileData}} objects or
+#' \code{\linkS4class{RLum.Analysis}} objects (or a list of it). However, the function is highly optimised
+#' for \code{\linkS4class{Risoe.BINfileData}} objects as it make sense to remove identify invalid
+#' grains before the conversion to an \code{\linkS4class{RLum.Analysis}} object.\cr
+#'
+#' The function checking for invalid curves works rather robust and it is likely that Reg0 curves
+#' within a SAR cycle are removed as well. Therefore it is strongly recommended to use the argument
+#' \code{cleanup = TRUE} carefully.
+#'
+#' @section Function version: 0.2.0
+#'
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#'
+#' @seealso \code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}},
+#' \code{\link{write_R2BIN}}, \code{\link{read_BIN2R}}
+#'
+#' @references -
+#'
+#' @keywords manip datagen
+#'
+#' @examples
+#'
+#' ##01 - basic example I
+#' ##just show how to apply the function
+#' data(ExampleData.XSYG, envir = environment())
+#'
+#' ##verify and get data.frame out of it
+#' verify_SingleGrainData(OSL.SARMeasurement$Sequence.Object)$selection_full
+#'
+#' ##02 - basic example II
+#' data(ExampleData.BINfileData, envir = environment())
+#' id <- verify_SingleGrainData(object = CWOSL.SAR.Data,
+#' cleanup_level = "aliquot")$selection_id
+#'
+#' \dontrun{
+#' ##03 - advanced example I
+#' ##importing and exporting a BIN-file
+#'
+#' ##select and import file
+#' file <- file.choose()
+#' object <- read_BIN2R(file)
+#'
+#' ##remove invalid aliquots(!)
+#' object <- verify_SingleGrainData(object, cleanup = TRUE)
+#'
+#' ##export to new BIN-file
+#' write_R2BIN(object, paste0(dirname(file),"/", basename(file), "_CLEANED.BIN"))
+#' }
+#'
+#' @export
+verify_SingleGrainData <- function(
+  object,
+  threshold = 10,
+  cleanup = FALSE,
+  cleanup_level = 'aliquot',
+  verbose = TRUE,
+  plot = FALSE
+){
+
+
+  ##three types of input are allowed:
+  ##(1) RisoeBINfileData
+  ##(2) RLum.Analysis
+  ##(3) List of RLum.Analysis
+
+  # Self Call -----------------------------------------------------------------------------------
+  if(is(object, "list")){
+
+    results <- lapply(1:length(object), function(x) {
+      verify_SingleGrainData(
+        object = object[[x]],
+        threshold = threshold,
+        cleanup = cleanup,
+        cleanup_level = cleanup_level,
+        verbose = verbose
+      )
+    })
+
+      ##account for cleanup
+      if(cleanup){
+        return(results)
+
+      }else{
+        return(merge_RLum(results))
+
+      }
+
+  }
+
+  ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+  ##RisoeBINfileData
+  if(is(object, "Risoe.BINfileData")){
+
+      ##run test on DATA slot
+        ##MEAN + SD
+        temp.results_matrix <- lapply(X = object at DATA, FUN = function(x){
+            c(mean(x), var(x))
+
+        })
+
+        temp.results_matrix <- do.call(rbind,  temp.results_matrix)
+
+        ##DIFF
+        temp.results_matrix_RATIO <- temp.results_matrix[,2]/temp.results_matrix[,1]
+
+        ##SEL
+        temp.results_matrix_VALID <- temp.results_matrix_RATIO > threshold
+
+      ##combine everything to in a data.frame
+        selection <- data.frame(
+          POSITION = object at METADATA$POSITION,
+          GRAIN = object at METADATA$GRAIN,
+          MEAN = temp.results_matrix[, 1],
+          VAR = temp.results_matrix[, 2],
+          RATIO = temp.results_matrix_RATIO,
+          THRESHOLD = rep_len(threshold, length(object at DATA)),
+          VALID = temp.results_matrix_VALID
+        )
+
+        ##get unique pairs for POSITION and GRAIN for VALID == TRUE
+        unique_pairs <- unique(
+          selection[selection[["VALID"]], c("POSITION", "GRAIN")])
+
+
+        if(cleanup_level == "aliquot"){
+
+          selection_id <- sort(unlist(lapply(1:nrow(unique_pairs), function(x) {
+            which(
+              .subset2(selection, 1) == .subset2(unique_pairs, 1)[x] &
+                .subset2(selection, 2) == .subset2(unique_pairs, 2)[x]
+            )
+
+
+          })))
+
+
+        }else{
+
+         ##reduce data to TRUE selection
+         selection_id <- which(selection[["VALID"]])
+
+        }
+
+
+      ##select output on the chosen input
+      if(cleanup){
+
+        ##selected wanted elements
+        object at DATA <- object at DATA[selection_id]
+        object at METADATA <- object at METADATA[selection_id,]
+        object at METADATA$ID <- 1:length(object at DATA)
+
+
+        ##print message
+        selection_id <- paste(selection_id, collapse = ", ")
+        if(verbose){
+          cat(paste0("\n[verify_SingleGrainData()] Risoe.BINfileData object reduced to records: \n", selection_id))
+          cat("\n\n[verify_SingleGrainData()] Risoe.BINfileData object record index reset.")
+
+        }
+
+         ##return
+        return_object <- object
+
+      }else{
+        return_object <- set_RLum(
+          class = "RLum.Results",
+          data = list(
+            unique_pairs =  unique_pairs,
+            selection_id = selection_id,
+            selection_full = selection),
+          info = list(call = sys.call())
+        )
+
+      }
+
+
+  ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+  ##RLum.Analysis and list with RLum.Analysis objects
+  ## ... and yes it make sense not to mix that up with the code above
+  }else if(is(object,"RLum.Analysis")){
+
+    ##first extract all count values from all curves
+    object_list <- lapply(get_RLum(object), function(x){
+        ##yes, would work differently, but it is faster
+        x at data[,2]
+
+    })
+
+    ##MEAN + SD
+    temp.results_matrix <- lapply(X = object_list, FUN = function(x){
+      c(mean(x), var(x))
+
+     })
+
+    temp.results_matrix <- do.call(rbind,  temp.results_matrix)
+
+    ##DIFF
+    temp.results_matrix_RATIO <- temp.results_matrix[,2]/temp.results_matrix[,1]
+
+    ##SEL
+    temp.results_matrix_VALID <- temp.results_matrix_RATIO > threshold
+
+    ##get structure for the RLum.Anlaysis object
+    temp_structure <- structure_RLum(object, fullExtent = TRUE)
+
+      ##now we have two cases, depending on where measurement is coming from
+      if (object at originator == "Risoe.BINfileData2RLum.Analysis") {
+
+        ##combine everything to in a data.frame
+        selection <- data.frame(
+          POSITION = temp_structure$info.POSITION,
+          GRAIN = temp_structure$info.GRAIN,
+          MEAN = temp.results_matrix[, 1],
+          VAR = temp.results_matrix[, 2],
+          RATIO = temp.results_matrix_RATIO,
+          THRESHOLD = rep_len(threshold, length(object_list)),
+          VALID = temp.results_matrix_VALID
+        )
+
+        ##get unique pairs for POSITION and GRAIN for VALID == TRUE
+        unique_pairs <- unique(
+          selection[selection[["VALID"]], c("POSITION", "GRAIN")])
+
+
+      } else if (object at originator == "read_XSYG2R") {
+
+        ##combine everything to in a data.frame
+        selection <- data.frame(
+          POSITION = if(any(grepl(pattern = "position", names(temp_structure)))){
+            temp_structure$info.position}else{
+              NA
+            },
+          GRAIN = NA,
+          MEAN = temp.results_matrix[, 1],
+          VAR = temp.results_matrix[, 2],
+          RATIO = temp.results_matrix_RATIO,
+          THRESHOLD = rep_len(threshold, length(object_list)),
+          VALID = temp.results_matrix_VALID
+        )
+
+        ##get unique pairs for POSITION for VALID == TRUE
+        unique_pairs <- unique(
+          selection[["POSITION"]][selection[["VALID"]]])
+
+      } else{
+
+        stop("[verify_SingleGrainData()] I don't know what to do object 'originator' not supported!")
+      }
+
+
+      ##set up cleanup
+      if(cleanup_level == "aliquot") {
+        if (object at originator == "read_XSYG2R") {
+
+          if(!is.na(unique_pairs)){
+
+          selection_id <-
+            sort(unlist(lapply(1:nrow(unique_pairs), function(x) {
+              which(.subset2(selection, 1) == .subset2(unique_pairs, 1)[x])
+
+
+            })))
+
+          }else{
+           selection_id <- NA
+
+          }
+
+
+        } else if (object at originator == "Risoe.BINfileData2RLum.Analysis") {
+          selection_id <-
+            sort(unlist(lapply(1:nrow(unique_pairs), function(x) {
+              which(
+                .subset2(selection, 1) == .subset2(unique_pairs, 1)[x] &
+                  .subset2(selection, 2) == .subset2(unique_pairs, 2)[x]
+              )
+
+
+            })))
+
+        }
+
+      } else{
+        ##reduce data to TRUE selection
+        selection_id <- which(selection[["VALID"]])
+
+      }
+
+    ##return value
+    ##select output on the chosen input
+    if(cleanup && !is.na(selection_id)){
+
+      ##print message
+      if(verbose){
+        selection_id <- paste(selection_id, collapse = ", ")
+        cat(paste0("[verify_SingleGrainData()] RLum.Analysis object reduced to records: ", selection_id))
+
+      }
+
+      ##selected wanted elements
+      if (length(selection_id) == 0) {
+        object <- set_RLum(
+          class = "RLum.Analysis",
+          originator = object at originator,
+          protocol = object at protocol,
+          records = list(),
+          info = list(
+            unique_pairs = unique_pairs,
+            selection_id = selection_id,
+            selection_full = selection)
+        )
+
+      } else{
+
+        object <- set_RLum(
+          class = "RLum.Analysis",
+          records = get_RLum(object, record.id = selection_id, drop = FALSE),
+          info = list(
+            unique_pairs = unique_pairs,
+            selection_id = selection_id,
+            selection_full = selection)
+        )
+
+     }
+
+      ##return
+      return_object <- object
+
+    }else{
+      if(is.na(selection_id)){
+        warning("[verify_SingleGrainData()] selection_id is NA, nothing removed, everything selected!")
+
+      }
+
+      return_object <- set_RLum(
+        class = "RLum.Results",
+        data = list(
+          unique_pairs = unique_pairs,
+          selection_id = selection_id,
+          selection_full = selection),
+        info = list(call = sys.call())
+      )
+
+    }
+
+
+  }else{
+    stop(paste0("[verify_SingleGrainData()] Input type '", is(object)[1], "' is not allowed for this function!"), call. = FALSE)
+
+  }
+
+  # Plot ----------------------------------------------------------------------------------------
+  if(plot){
+
+    ##plot area
+    plot(
+      NA,
+      NA,
+      xlim = c(1,nrow(selection)),
+      ylim = range(selection[["RATIO"]]),
+      log = "y",
+      xlab = "Record index",
+      ylab = "Calculated ratio [a.u.]",
+      main = "Record selection"
+    )
+
+    ##plot points above the threshold
+    points(x = which(selection[["VALID"]]),
+           y = selection[["RATIO"]][selection[["VALID"]]], pch = 20, col = "darkgreen")
+    points(x = which(!selection[["VALID"]]),
+           y = selection[["RATIO"]][!selection[["VALID"]]], pch = 20, col = rgb(0,0,0,0.5))
+
+    abline(h = threshold, col = "red", lty = 1, lwd = 2)
+
+    mtext(
+      side = 3,
+      text = paste0(
+        "(total: ", nrow(selection),
+        " | valid: ", length(which(selection[["VALID"]])),
+        " | invalid: ", length(which(!selection[["VALID"]])), ")"),
+      cex = 0.9 * par()$cex)
+
+  }
+
+  # Return --------------------------------------------------------------------------------------
+  return(return_object)
+
+
+}
+
diff --git a/R/write_R2BIN.R b/R/write_R2BIN.R
new file mode 100644
index 0000000..82c19fa
--- /dev/null
+++ b/R/write_R2BIN.R
@@ -0,0 +1,1320 @@
+#' Export Risoe.BINfileData into Risoe BIN-file
+#'
+#' Exports a Risoe.BINfileData object in a *.bin or *.binx file that can be
+#' opened by the Analyst software or other Risoe software.
+#'
+#' The structure of the exported binary data follows the data structure
+#' published in the Appendices of the Analyst manual p. 42.\cr\cr If
+#' \code{LTYPE}, \code{DTYPE} and \code{LIGHTSOURCE} are not of type
+#' \code{\link{character}}, no transformation into numeric values is done.
+#'
+#' @param object \code{\linkS4class{Risoe.BINfileData}} (\bold{required}):
+#' input object to be stored in a bin file.
+#'
+#' @param file \code{\link{character}} (\bold{required}): file name and path of
+#' the output file\cr [WIN]: \code{write_R2BIN(object, "C:/Desktop/test.bin")},
+#' \cr [MAC/LINUX]: \code{write_R2BIN("/User/test/Desktop/test.bin")}
+#'
+#' @param version \code{\link{character}} (optional): version number for the
+#' output file. If no value is provided the highest version number from the
+#' \code{\linkS4class{Risoe.BINfileData}} is taken automatically.\cr\cr Note:
+#' This argument can be used to convert BIN-file versions.
+#'
+#' @param compatibility.mode \code{\link{logical}} (with default): this option
+#' recalculates the position values if necessary and set the max. value to 48.
+#' The old position number is appended as comment (e.g., 'OP: 70). This option
+#' accounts for potential compatibility problems with the Analyst software.
+#'
+#' @param txtProgressBar \link{logical} (with default): enables or disables
+#' \code{\link{txtProgressBar}}.
+#' @return Write a binary file.
+#' @note The function just roughly checks the data structures. The validity of
+#' the output data depends on the user.\cr\cr The validity of the file path is
+#' not further checked. \cr BIN-file conversions using the argument
+#' \code{version} may be a lossy conversion, depending on the chosen input and
+#' output data (e.g., conversion from version 08 to 07 to 06 to 04 or 03).\cr
+#'
+#' \bold{Warning}\cr
+#'
+#' Although the coding was done carefully it seems that the BIN/BINX-files
+#' produced by Risoe DA 15/20 TL/OSL readers slightly differ on the byte level.
+#' No obvious differences are observed in the METADATA, however, the
+#' BIN/BINX-file may not fully compatible, at least not similar to the once
+#' directly produced by the Risoe readers!\cr
+#'
+#' @section Function version: 0.4.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @note ROI definitions (introduced in BIN-file version 8) are not supported! There are furthermore
+#' ignored by the function \code{\link{read_BIN2R}}.
+#'
+#' @seealso \code{\link{read_BIN2R}}, \code{\linkS4class{Risoe.BINfileData}},
+#' \code{\link{writeBin}}
+#'
+#' @references
+#'
+#' DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
+#' \url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' ##uncomment for usage
+#'
+#' ##data(ExampleData.BINfileData, envir = environment())
+#' ##write_R2BIN(CWOSL.SAR.Data, file="[your path]/output.bin")
+#'
+#' @export
+write_R2BIN <- function(
+  object,
+  file,
+  version,
+  compatibility.mode = FALSE,
+  txtProgressBar = TRUE
+){
+
+  # Config ------------------------------------------------------------------
+
+  ##set supported BIN format version
+  VERSION.supported <- as.raw(c(3, 4, 6, 7, 8))
+
+  # Check integrity ---------------------------------------------------------
+
+  ##check if input object is of type 'Risoe.BINfileData'
+  if(is(object, "Risoe.BINfileData") == FALSE){
+
+    stop("[write_R2BIN()] Input object is not of type Risoe.BINfileData!")
+
+  }
+
+  ##check if it fullfills the last definition
+  if(ncol(object at METADATA)!=80){
+
+    stop("[write_R2BIN()] The number of columns in your slot 'METADATA' does not fit to the latest definition. What you are probably trying to do is to export a Risoe.BINfileData object you generated by your own or you imported with an old package version some time ago. Please re-import the BIN-file using the function read_BIN2R().")
+
+  }
+
+  ##check if input file is of type 'character'
+  if(is(file, "character") == FALSE){
+
+    stop("[write_R2BIN()] argument 'file' has to be of type character!")
+
+  }
+
+
+  # Check Risoe.BINfileData Struture ----------------------------------------
+
+  ##VERSION
+
+  ##If missing version argument set to the highest value
+  if(missing(version)){
+
+    version <- as.raw(max(as.numeric(object at METADATA[,"VERSION"])))
+    version.original <- version
+
+
+  }else{
+
+    version.original <- as.raw(max(as.numeric(object at METADATA[,"VERSION"])))
+    version <- as.raw(version)
+    object at METADATA[["VERSION"]] <- version
+
+    ##Furthermore, entries length needed to be recalculated
+    if(version.original != version){
+
+      ##stepping decision
+      header.stepping <- switch(as.character(version),
+                                "08" = 507,
+                                "07" = 447,
+                                "06" = 447,
+                                "04" = 272,
+                                "03" = 272)
+
+      object at METADATA[,"LENGTH"] <- sapply(1:nrow(object at METADATA), function(x){
+
+        header.stepping + 4 * object at METADATA[x,"NPOINTS"]
+
+      })
+
+      object at METADATA[,"PREVIOUS"] <- sapply(1:nrow(object at METADATA), function(x){
+
+        if(x == 1){
+          0
+        }else{
+          header.stepping + 4 * object at METADATA[x-1,"NPOINTS"]
+        }
+
+      })
+
+    }
+
+  }
+
+  ##check whether this file can be exported without problems due to the latest specifications
+  if(ncol(object at METADATA) != 80){
+
+    stop("[write_R2BIN()] Your Risoe.BINfileData object seems not be compatible with the latest specification of this S4-class object. You are probably trying to export a Risoe.BINfileData from your workspace you produced manually or with an old version.")
+
+  }
+
+  ##Check if the BINfile object contains of unsupported versions
+  if((as.raw(object at METADATA[1,"VERSION"]) %in% VERSION.supported) == FALSE ||
+       version %in% VERSION.supported == FALSE){
+
+    ##show error message
+    error.text <- paste("[write_R2BIN()] Writing BIN-files in format version (",
+                        object at METADATA[1,"VERSION"],") is currently not supported!
+                        Supported version numbers are: ",
+                        paste(VERSION.supported,collapse=", "),".",sep="")
+    stop(error.text)
+  }
+
+  ##CHECK file name for version == 06 it has to be *.binx and correct for it
+  if(version == 06 | version == 07 | version == 08){
+
+    ##grep file ending
+    temp.file.name <- unlist(strsplit(file, "[:.:]"))
+
+    ##*.bin? >> correct to binx
+    if(temp.file.name[length(temp.file.name)]=="bin"){
+
+      temp.file.name[length(temp.file.name)] <- "binx"
+      file <- paste(temp.file.name, collapse=".")
+
+    }
+  }
+
+
+  ##SEQUENCE
+  if (suppressWarnings(max(nchar(as.character(object at METADATA[,"SEQUENCE"]), type =
+                                 "bytes"), na.rm = TRUE)) > 8) {
+    stop("[write_R2BIN()] Value in 'SEQUENCE' exceed storage limit!")
+
+  }
+
+  ##USER
+  if (suppressWarnings(max(nchar(as.character(object at METADATA[,"USER"]), type =
+                                 "bytes"), na.rm = TRUE)) > 8) {
+    stop("[write_R2BIN()] 'USER' exceed storage limit!")
+
+  }
+
+  ##SAMPLE
+  if (suppressWarnings(max(nchar(as.character(object at METADATA[,"SAMPLE"]), type =
+                                 "bytes"), na.rm = TRUE)) > 20) {
+    stop("[write_R2BIN()] 'SAMPLE' exceed storage limit!")
+
+  }
+
+  ##enables compatibility to the Analyst as the the max value for POSITION becomes 48
+  if(compatibility.mode){
+
+    ##just do if position values > 48
+    if(max(object at METADATA[,"POSITION"])>48){
+
+      ##grep relevant IDs
+      temp.POSITION48.id <- which(object at METADATA[,"POSITION"]>48)
+
+      ##find unique values
+      temp.POSITION48.unique <- unique(object at METADATA[temp.POSITION48.id,"POSITION"])
+
+      ##set translation vector starting from 1 and ending at 48
+      temp.POSITION48.new <- rep_len(1:48, length.out = length(temp.POSITION48.unique))
+
+      ##recaluate POSITION and update comment
+      for(i in 1:length(temp.POSITION48.unique)){
+
+        object at METADATA[object at METADATA[,"POSITION"] == temp.POSITION48.unique[i],"COMMENT"] <-
+          paste0(object at METADATA[object at METADATA[,"POSITION"] == temp.POSITION48.unique[i],"COMMENT"],
+                 "OP:",object at METADATA[object at METADATA[,"POSITION"] == temp.POSITION48.unique[i],"POSITION"])
+
+        object at METADATA[object at METADATA[,"POSITION"] == temp.POSITION48.unique[i],"POSITION"] <-
+          temp.POSITION48.new[i]
+
+      }
+
+    }
+
+  }
+
+
+  ##COMMENT
+  if(max(nchar(as.character(object at METADATA[,"COMMENT"]), type="bytes"))>80){
+
+    stop("[write_R2BIN()] 'COMMENT' exceed storage limit!")
+
+  }
+
+
+
+  # Tranlation Matrices -----------------------------------------------------
+
+  ##LTYPE
+  LTYPE.TranslationMatrix <- matrix(NA, nrow=14, ncol=2)
+  LTYPE.TranslationMatrix[,1] <- 0:13
+  LTYPE.TranslationMatrix[,2] <- c("TL",
+                                   "OSL",
+                                   "IRSL",
+                                   "M-IR",
+                                   "M-VIS",
+                                   "TOL",
+                                   "TRPOSL",
+                                   "RIR",
+                                   "RBR",
+                                   "USER",
+                                   "POSL",
+                                   "SGOSL",
+                                   "RL",
+                                   "XRF")
+
+
+
+  ##DTYPE
+  DTYPE.TranslationMatrix <- matrix(NA, nrow=8, ncol=2)
+  DTYPE.TranslationMatrix[,1] <- 0:7
+  DTYPE.TranslationMatrix[,2] <- c("Natural","N+dose","Bleach",
+                                   "Bleach+dose","Natural (Bleach)",
+                                   "N+dose (Bleach)","Dose","Background")
+
+
+  ##LIGHTSOURCE
+  LIGHTSOURCE.TranslationMatrix <- matrix(NA, nrow=8, ncol=2)
+  LIGHTSOURCE.TranslationMatrix[,1] <- 0:7
+  LIGHTSOURCE.TranslationMatrix[,2] <- c("None",
+                                         "Lamp",
+                                         "IR diodes/IR Laser",
+                                         "Calibration LED",
+                                         "Blue Diodes",
+                                         "White light",
+                                         "Green laser (single grain)",
+                                         "IR laser (single grain)"
+  )
+
+
+  ##TRANSLATE VALUES IN METADATA
+
+  ##LTYPE
+  if(is(object at METADATA[1,"LTYPE"], "character") == TRUE |
+       is(object at METADATA[1,"LTYPE"], "factor") == TRUE){
+
+    object at METADATA[,"LTYPE"]<- sapply(1:length(object at METADATA[,"LTYPE"]),function(x){
+
+      as.integer(LTYPE.TranslationMatrix[object at METADATA[x,"LTYPE"]==LTYPE.TranslationMatrix[,2],1])
+
+    })
+  }
+
+  ##DTYPE
+  if(is(object at METADATA[1,"DTYPE"], "character") == TRUE |
+       is(object at METADATA[1,"DTYPE"], "factor") == TRUE){
+    object at METADATA[,"DTYPE"]<- sapply(1:length(object at METADATA[,"DTYPE"]),function(x){
+
+      as.integer(DTYPE.TranslationMatrix[object at METADATA[x,"DTYPE"]==DTYPE.TranslationMatrix[,2],1])
+
+    })
+  }
+
+  ##LIGHTSOURCE
+  if(is(object at METADATA[1,"LIGHTSOURCE"], "character") == TRUE |
+       is(object at METADATA[1,"LIGHTSOURCE"], "factor") == TRUE){
+
+    object at METADATA[,"LIGHTSOURCE"]<- sapply(1:length(object at METADATA[,"LIGHTSOURCE"]),function(x){
+
+      as.integer(LIGHTSOURCE.TranslationMatrix[
+        object at METADATA[x,"LIGHTSOURCE"]==LIGHTSOURCE.TranslationMatrix[,2],1])
+
+    })}
+
+  ##TIME
+  object at METADATA[,"TIME"] <- sapply(1:length(object at METADATA[,"TIME"]),function(x){
+
+    as.character(gsub(":","",object at METADATA[x,"TIME"]))
+
+  })
+
+  ##TAG and SEL
+  ##in TAG information on the SEL are storred, here the values are copied to TAG
+  ##before export
+  object at METADATA[,"TAG"] <- ifelse(object at METADATA[,"SEL"] == TRUE, 1, 0)
+
+  ##
+
+
+  # SET FILE AND VALUES -----------------------------------------------------
+
+  con<-file(file, "wb")
+
+  ##get records
+  n.records <- length(object at METADATA[,"ID"])
+
+  ##output
+  cat(paste("\n[write_R2BIN()]\n\t >> ",file,sep=""), fill=TRUE)
+
+  ##set progressbar
+  if(txtProgressBar==TRUE){
+    pb<-txtProgressBar(min=0,max=n.records, char="=", style=3)
+  }
+
+
+
+  # LOOP -------------------------------------------------------------------
+
+  ID <- 1
+
+  if(version == 03 || version == 04){
+    ## version 03 and 04
+
+    ##start loop for export BIN data
+    while(ID<=n.records) {
+
+      ##VERSION
+      writeBin(as.raw(object at METADATA[ID,"VERSION"]),
+               con,
+               size = 1,
+               endian="little")
+
+      ##stepping
+      writeBin(raw(length=1),
+               con,
+               size = 1,
+               endian="little")
+
+
+      ##LENGTH, PREVIOUS, NPOINTS
+      writeBin(c(as.integer(object at METADATA[ID,"LENGTH"]),
+                 as.integer(object at METADATA[ID,"PREVIOUS"]),
+                 as.integer(object at METADATA[ID,"NPOINTS"])),
+               con,
+               size = 2,
+               endian="little")
+
+
+      ##LTYPE
+      writeBin(object at METADATA[ID,"LTYPE"],
+               con,
+               size = 1,
+               endian="little")
+
+
+      ##LOW, HIGH, RATE
+      writeBin(c(as.double(object at METADATA[ID,"LOW"]),
+                 as.double(object at METADATA[ID,"HIGH"]),
+                 as.double(object at METADATA[ID,"RATE"])),
+               con,
+               size = 4,
+               endian="little")
+
+
+      ##TEMPERATURE, XCOORD, YCOORD, TOLDELAY; TOLON, TOLOFF
+      writeBin(c(as.integer(object at METADATA[ID,"TEMPERATURE"]),
+                 as.integer(object at METADATA[ID,"XCOORD"]),
+                 as.integer(object at METADATA[ID,"YCOORD"]),
+                 as.integer(object at METADATA[ID,"TOLDELAY"]),
+                 as.integer(object at METADATA[ID,"TOLON"]),
+                 as.integer(object at METADATA[ID,"TOLOFF"])),
+               con,
+               size = 2,
+               endian="little")
+
+      ##POSITION, RUN
+      writeBin(c(as.integer(object at METADATA[ID,"POSITION"]),
+                 as.integer(object at METADATA[ID,"RUN"])),
+               con,
+               size = 1,
+               endian="little")
+
+
+
+      ##TIME
+      TIME_SIZE <- nchar(object at METADATA[ID,"TIME"])
+      writeBin(as.integer(TIME_SIZE),
+               con,
+               size = 1,
+               endian="little")
+
+
+      writeChar(object at METADATA[ID,"TIME"],
+                con,
+                nchars =TIME_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+      if(6-TIME_SIZE>0){
+        writeBin(raw(length = c(6-TIME_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+
+      }
+
+
+      ##DATE
+      writeBin(as.integer(6),
+               con,
+               size = 1 ,
+               endian="little")
+
+
+      suppressWarnings(writeChar(as.character(object at METADATA[ID,"DATE"]),
+                                 con,
+                                 nchars = 6,
+                                 useBytes=TRUE,
+                                 eos = NULL))
+
+
+
+      ##SEQUENCE
+
+      ##count number of characters
+      SEQUENCE_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"SEQUENCE"]), type = "bytes"))
+
+      writeBin(SEQUENCE_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+      writeChar(as.character(object at METADATA[ID,"SEQUENCE"]),
+                con,
+                nchars = SEQUENCE_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+      ##stepping
+      if(8-SEQUENCE_SIZE>0){
+        writeBin(raw(length = (8-SEQUENCE_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+      }
+
+      ##USER
+      USER_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"USER"]), type="bytes"))
+
+      writeBin(USER_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+      writeChar(as.character(object at METADATA[ID,"USER"]),
+                con,
+                nchars = USER_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+      ##stepping
+      if(8-USER_SIZE>0){
+        writeBin(raw(length = (8-USER_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+      }
+
+      ##DTYPE
+      writeBin(object at METADATA[ID,"DTYPE"],
+               con,
+               size = 1,
+               endian="little")
+
+      ##IRR_TIME
+      writeBin(as.double(object at METADATA[ID,"IRR_TIME"]),
+               con,
+               size = 4,
+               endian="little")
+
+
+      ##IRR_TYPE, IRR_UNIT
+      writeBin(c(object at METADATA[ID,"IRR_TYPE"],
+                 object at METADATA[ID,"IRR_UNIT"]),
+               con,
+               size = 1,
+               endian="little")
+
+
+      ##BL_TIME
+      writeBin(as.double(object at METADATA[ID,"BL_TIME"]),
+               con,
+               size = 4,
+               endian="little")
+
+      ##BL_UNIT
+      writeBin(as.integer(object at METADATA[ID,"DTYPE"]),
+               con,
+               size = 1,
+               endian="little")
+
+
+      ##AN_TEMP, AN_TIME, NORM1, NORM2, NORM2, BG
+      writeBin(c(as.double(object at METADATA[ID,"AN_TEMP"]),
+                 as.double(object at METADATA[ID,"AN_TIME"]),
+                 as.double(object at METADATA[ID,"NORM1"]),
+                 as.double(object at METADATA[ID,"NORM2"]),
+                 as.double(object at METADATA[ID,"NORM3"]),
+                 as.double(object at METADATA[ID,"BG"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##SHIFT
+      writeBin(as.integer(object at METADATA[ID,"SHIFT"]),
+               con,
+               size = 2,
+               endian="little")
+
+
+
+      ##SAMPLE
+      SAMPLE_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"SAMPLE"]), type="bytes"))
+
+      ##avoid problems with empty sample names
+      if(SAMPLE_SIZE == 0){
+
+        SAMPLE_SIZE <- as.integer(2)
+        object at METADATA[ID,"SAMPLE"] <- "  "
+
+      }
+
+      writeBin(SAMPLE_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+
+      writeChar(as.character(object at METADATA[ID,"SAMPLE"]),
+                con,
+                nchars = SAMPLE_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+
+      if((20-SAMPLE_SIZE)>0){
+        writeBin(raw(length = (20-SAMPLE_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+      }
+
+      ##COMMENT
+      COMMENT_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"COMMENT"]), type="bytes"))
+
+      ##avoid problems with empty comments
+      if(COMMENT_SIZE == 0){
+
+        COMMENT_SIZE <- as.integer(2)
+        object at METADATA[ID,"COMMENT"] <- "  "
+
+      }
+
+      writeBin(COMMENT_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+      suppressWarnings(writeChar(as.character(object at METADATA[ID,"COMMENT"]),
+                                 con,
+                                 nchars = COMMENT_SIZE,
+                                 useBytes=TRUE,
+                                 eos = NULL))
+
+
+      if((80-COMMENT_SIZE)>0){
+        writeBin(raw(length = c(80-COMMENT_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+
+      }
+
+      ##LIGHTSOURCE, SET, TAG
+      writeBin(c(as.integer(object at METADATA[ID,"LIGHTSOURCE"]),
+                 as.integer(object at METADATA[ID,"SET"]),
+                 as.integer(object at METADATA[ID,"TAG"])),
+               con,
+               size = 1,
+               endian="little")
+
+
+      ##GRAIN
+      writeBin(as.integer(object at METADATA[ID,"GRAIN"]),
+               con,
+               size = 2,
+               endian="little")
+
+
+      ##LPOWER
+      writeBin(as.double(object at METADATA[ID,"LPOWER"]),
+               con,
+               size = 4,
+               endian="little")
+
+      ##SYSTEMID
+      writeBin(as.integer(object at METADATA[ID,"SYSTEMID"]),
+               con,
+               size = 2,
+               endian="little")
+
+      ##Further distinction need to fully support format version 03 and 04 separately
+      if(version == 03){
+
+
+        ##RESERVED 1
+        if(length(object at .RESERVED) == 0 || version.original != version){
+          writeBin(raw(length=36),
+                   con,
+                   size = 1,
+                   endian="little")
+        }else{
+
+          writeBin(object at .RESERVED[[ID]][[1]],
+                   con,
+                   size = 1,
+                   endian="little")
+
+        }
+
+        ##ONTIME, OFFTIME
+        writeBin(c(as.integer(object at METADATA[ID,"ONTIME"]),
+                   as.integer(object at METADATA[ID,"OFFTIME"])),
+                 con,
+                 size = 4,
+                 endian="little")
+
+        ##GATE_ENABLED
+        writeBin(as.integer(object at METADATA[ID,"GATE_ENABLED"]),
+                 con,
+                 size = 1,
+                 endian="little")
+
+
+        ##GATE_START, GATE_STOP
+        writeBin(c(as.integer(object at METADATA[ID,"GATE_START"]),
+                   as.integer(object at METADATA[ID,"GATE_STOP"])),
+                 con,
+                 size = 4,
+                 endian="little")
+
+
+        ##RESERVED 2
+        if(length(object at .RESERVED) == 0 || version.original != version){
+          writeBin(raw(length=1),
+                   con,
+                   size = 1,
+                   endian="little")
+        }else{
+
+          writeBin(object at .RESERVED[[ID]][[2]],
+                   con,
+                   size = 1,
+                   endian="little")
+
+        }
+
+      } else {
+        ##version 04
+
+
+        ##RESERVED 1
+        if(length(object at .RESERVED) == 0 || version.original != version){
+          writeBin(raw(length=20),
+                   con,
+                   size = 1,
+                   endian="little")
+        }else{
+
+          writeBin(object at .RESERVED[[ID]][[1]],
+                   con,
+                   size = 1,
+                   endian="little")
+
+        }
+
+        ##CURVENO
+        writeBin(as.integer(object at METADATA[ID,"CURVENO"]),
+                 con,
+                 size = 2,
+                 endian="little")
+
+        ##TIMETICK
+        writeBin(c(as.double(object at METADATA[ID,"TIMETICK"])),
+                 con,
+                 size = 4,
+                 endian="little")
+
+        ##ONTIME, STIMPERIOD
+        writeBin(c(as.integer(object at METADATA[ID,"ONTIME"]),
+                   as.integer(object at METADATA[ID,"STIMPERIOD"])),
+                 con,
+                 size = 4,
+                 endian="little")
+
+        ##GATE_ENABLED
+        writeBin(as.integer(object at METADATA[ID,"GATE_ENABLED"]),
+                 con,
+                 size = 1,
+                 endian="little")
+
+
+        ##GATE_START, GATE_STOP
+        writeBin(c(as.integer(object at METADATA[ID,"GATE_START"]),
+                   as.integer(object at METADATA[ID,"GATE_STOP"])),
+                 con,
+                 size = 4,
+                 endian="little")
+
+
+        ##PTENABLED
+        writeBin(as.integer(object at METADATA[ID,"PTENABLED"]),
+                 con,
+                 size = 1,
+                 endian="little")
+
+
+        ##RESERVED 2
+        if(length(object at .RESERVED) == 0 || version.original != version){
+          writeBin(raw(length=10),
+                   con,
+                   size = 1,
+                   endian="little")
+
+        }else{
+
+          writeBin(object at .RESERVED[[ID]][[2]],
+                   con,
+                   size = 1,
+                   endian="little")
+
+        }
+
+
+
+      }
+      ##DPOINTS
+      writeBin(as.integer(unlist(object at DATA[ID])),
+               con,
+               size = 4,
+               endian="little")
+
+
+      #SET UNIQUE ID
+      ID<-ID+1
+
+      ##update progress bar
+      if(txtProgressBar==TRUE){
+        setTxtProgressBar(pb, ID)
+      }
+    }
+  }
+  ## ====================================================
+  ## version 06
+
+  if(version == 06 | version == 07 | version == 08){
+
+    ##start loop for export BIN data
+    while(ID<=n.records) {
+
+      ##VERSION
+      writeBin(as.raw(object at METADATA[ID,"VERSION"]),
+               con,
+               size = 1,
+               endian="little")
+
+      ##stepping
+      writeBin(raw(length=1),
+               con,
+               size = 1,
+               endian="little")
+
+      ##LENGTH, PREVIOUS, NPOINTS
+      writeBin(c(as.integer(object at METADATA[ID,"LENGTH"]),
+                 as.integer(object at METADATA[ID,"PREVIOUS"]),
+                 as.integer(object at METADATA[ID,"NPOINTS"])),
+               con,
+               size = 4,
+               endian="little")
+
+      if(version == 08){
+        writeBin(object at METADATA[ID,"RECTYPE"],
+                 con,
+                 size = 1,
+                 endian="little")
+      }
+
+      ##RUN, SET, POSITION, GRAINNUMBER, CURVENO, XCOORD, YCOORD
+      writeBin(c(as.integer(object at METADATA[ID,"RUN"]),
+                 as.integer(object at METADATA[ID,"SET"]),
+                 as.integer(object at METADATA[ID,"POSITION"]),
+                 as.integer(object at METADATA[ID,"GRAINNUMBER"]),
+                 as.integer(object at METADATA[ID,"CURVENO"]),
+                 as.integer(object at METADATA[ID,"XCOORD"]),
+                 as.integer(object at METADATA[ID,"YCOORD"])),
+               con,
+               size = 2,
+               endian="little")
+
+      ##SAMPLE
+      SAMPLE_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"SAMPLE"]), type="bytes"))
+
+      ##avoid problems with empty sample names
+      if(SAMPLE_SIZE == 0){
+
+        SAMPLE_SIZE <- as.integer(2)
+        object at METADATA[ID,"SAMPLE"] <- "  "
+
+      }
+
+      writeBin(SAMPLE_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+
+      writeChar(as.character(object at METADATA[ID,"SAMPLE"]),
+                con,
+                nchars = SAMPLE_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+
+      if((20-SAMPLE_SIZE)>0){
+        writeBin(raw(length = (20-SAMPLE_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+      }
+
+      ##COMMENT
+      COMMENT_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"COMMENT"]), type="bytes"))
+
+      ##avoid problems with empty comments
+      if(COMMENT_SIZE == 0){
+
+        COMMENT_SIZE <- as.integer(2)
+        object at METADATA[ID,"COMMENT"] <- "  "
+
+      }
+
+      writeBin(COMMENT_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+      writeChar(as.character(object at METADATA[ID,"COMMENT"]),
+                con,
+                nchars = COMMENT_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+
+      if((80-COMMENT_SIZE)>0){
+        writeBin(raw(length = c(80-COMMENT_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+
+      }
+
+      ##Instrument and sequence characteristics
+      ##SYSTEMID
+      writeBin(as.integer(object at METADATA[ID,"SYSTEMID"]),
+               con,
+               size = 2,
+               endian="little")
+
+      ##FNAME
+      FNAME_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"FNAME"]), type="bytes"))
+
+        ##correct for case that this is of 0 length
+        if(length(FNAME_SIZE) == 0){FNAME_SIZE <- as.integer(0)}
+
+      writeBin(FNAME_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+      if(FNAME_SIZE>0) {
+        writeChar(
+          as.character(object at METADATA[ID,"FNAME"]),
+          con,
+          nchars = FNAME_SIZE,
+          useBytes = TRUE,
+          eos = NULL
+        )
+      }
+
+      if((100-FNAME_SIZE)>0){
+        writeBin(raw(length = c(100-FNAME_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+
+      }
+
+      ##USER
+      USER_SIZE <- as.integer(nchar(as.character(object at METADATA[ID,"USER"]), type="bytes"))
+
+      writeBin(USER_SIZE,
+               con,
+               size = 1,
+               endian="little")
+
+      writeChar(as.character(object at METADATA[ID,"USER"]),
+                con,
+                nchars = USER_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+
+      if((30-USER_SIZE)>0){
+        writeBin(raw(length = c(30-USER_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+
+      }
+
+      ##TIME
+      TIME_SIZE <- nchar(object at METADATA[ID,"TIME"])
+
+      writeBin(as.integer(TIME_SIZE),
+               con,
+               size = 1,
+               endian="little")
+
+      writeChar(object at METADATA[ID,"TIME"],
+                con,
+                nchars =TIME_SIZE,
+                useBytes=TRUE,
+                eos = NULL)
+
+      if(6-TIME_SIZE>0){
+        writeBin(raw(length = c(6-TIME_SIZE)),
+                 con,
+                 size = 1,
+                 endian="little")
+
+      }
+
+
+      ##DATE
+      writeBin(as.integer(6),
+               con,
+               size = 1 ,
+               endian="little")
+
+
+      suppressWarnings(writeChar(as.character(object at METADATA[ID,"DATE"]),
+                                 con,
+                                 nchars = 6,
+                                 useBytes=TRUE,
+                                 eos = NULL))
+
+      ##Analysis
+      ##DTYPE
+      writeBin(object at METADATA[ID,"DTYPE"],
+               con,
+               size = 1,
+               endian="little")
+
+
+      ##BL_TIME
+      writeBin(as.double(object at METADATA[ID,"BL_TIME"]),
+               con,
+               size = 4,
+               endian="little")
+
+      ##BL_UNIT
+      writeBin(as.integer(object at METADATA[ID,"DTYPE"]),
+               con,
+               size = 1,
+               endian="little")
+
+      ##NORM1, NORM2, NORM3, BG
+      writeBin(c(as.double(object at METADATA[ID,"NORM1"]),
+                 as.double(object at METADATA[ID,"NORM2"]),
+                 as.double(object at METADATA[ID,"NORM3"]),
+                 as.double(object at METADATA[ID,"BG"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##SHIFT
+      writeBin(as.integer(object at METADATA[ID,"SHIFT"]),
+               con,
+               size = 2,
+               endian="little")
+
+      ##TAG
+      writeBin(c(as.integer(object at METADATA[ID,"TAG"])),
+               con,
+               size = 1,
+               endian="little")
+
+      ##RESERVED 1
+      if(length(object at .RESERVED) == 0 || version.original != version){
+        writeBin(raw(length=20),
+                 con,
+                 size = 1,
+                 endian="little")
+      }else{
+
+        writeBin(object at .RESERVED[[ID]][[1]],
+                 con,
+                 size = 1,
+                 endian="little")
+
+      }
+
+      ##Measurement characteristics
+      ##LTYPE
+      writeBin(object at METADATA[ID,"LTYPE"],
+               con,
+               size = 1,
+               endian="little")
+
+
+      ##LIGHTSOURCE
+      writeBin(c(as.integer(object at METADATA[ID,"LIGHTSOURCE"])),
+               con,
+               size = 1,
+               endian="little")
+
+      ##LIGHTPOWER, LOW, HIGH, RATE
+      writeBin(c(as.double(object at METADATA[ID,"LIGHTPOWER"]),
+                 as.double(object at METADATA[ID,"LOW"]),
+                 as.double(object at METADATA[ID,"HIGH"]),
+                 as.double(object at METADATA[ID,"RATE"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##TEMPERATURE, MEASTEMP
+      writeBin(c(as.integer(object at METADATA[ID,"TEMPERATURE"]),
+                 as.integer(object at METADATA[ID,"MEASTEMP"])),
+               con,
+               size = 2,
+               endian="little")
+
+      ##AN_TEMP, AN_TIME
+      writeBin(c(as.double(object at METADATA[ID,"AN_TEMP"]),
+                 as.double(object at METADATA[ID,"AN_TIME"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##TOLDELAY; TOLON, TOLOFF
+      writeBin(c(as.integer(object at METADATA[ID,"TOLDELAY"]),
+                 as.integer(object at METADATA[ID,"TOLON"]),
+                 as.integer(object at METADATA[ID,"TOLOFF"])),
+               con,
+               size = 2,
+               endian="little")
+
+      ##IRR_TIME
+      writeBin(as.double(object at METADATA[ID,"IRR_TIME"]),
+               con,
+               size = 4,
+               endian="little")
+
+
+      ##IRR_TYPE
+      writeBin(c(object at METADATA[ID,"IRR_TYPE"]),
+               con,
+               size = 1,
+               endian="little")
+
+      ##IRR_DOSERATE, IRR_DOSERATEERR
+      writeBin(c(as.double(object at METADATA[ID,"IRR_DOSERATE"]),
+                 as.double(object at METADATA[ID,"IRR_DOSERATEERR"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##TIMESINCEIRR
+      writeBin(c(as.integer(object at METADATA[ID,"TIMESINCEIRR"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##TIMETICK
+      writeBin(c(as.double(object at METADATA[ID,"TIMETICK"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##ONTIME, STIMPERIOD
+      writeBin(c(suppressWarnings(as.integer(object at METADATA[ID,"ONTIME"])),
+                 as.integer(object at METADATA[ID,"STIMPERIOD"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##GATE_ENABLED
+      writeBin(as.integer(object at METADATA[ID,"GATE_ENABLED"]),
+               con,
+               size = 1,
+               endian="little")
+
+      ##GATE_START, GATE_STOP
+      writeBin(c(as.integer(object at METADATA[ID,"GATE_START"]),
+                 as.integer(object at METADATA[ID,"GATE_STOP"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##PTENABLED, DTENABLED
+      writeBin(c(as.integer(object at METADATA[ID,"PTENABLED"]),
+                 as.integer(object at METADATA[ID,"DTENABLED"])),
+               con,
+               size = 1,
+               endian="little")
+
+      ##DEADTIME, MAXLPOWER, XRF_ACQTIME, XRF_HV
+      writeBin(c(as.double(object at METADATA[ID,"DEADTIME"]),
+                 as.double(object at METADATA[ID,"MAXLPOWER"]),
+                 as.double(object at METADATA[ID,"XRF_ACQTIME"]),
+                 as.double(object at METADATA[ID,"XRF_HV"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##XRF_CURR
+      writeBin(c(as.integer(object at METADATA[ID,"XRF_CURR"])),
+               con,
+               size = 4,
+               endian="little")
+
+      ##XRF_DEADTIMEF
+      writeBin(c(as.double(object at METADATA[ID,"XRF_DEADTIMEF"])),
+               con,
+               size = 4,
+               endian="little")
+
+
+      ##add version support for V7
+      if(version == 06){
+
+        ##RESERVED 2
+        if(length(object at .RESERVED) == 0 || version.original != version){
+          writeBin(raw(length=24),
+                   con,
+                   size = 1,
+                   endian="little")
+        }else{
+
+          writeBin(object at .RESERVED[[ID]][[2]],
+                   con,
+                   size = 1,
+                   endian="little")
+        }
+
+      }else{
+
+        ##DETECTOR_ID
+        writeBin(as.integer(object at METADATA[ID,"DETECTOR_ID"]),
+                 con,
+                 size = 1,
+                 endian="little")
+
+        ##LOWERFILTER_ID, UPPERFILTER_ID
+        writeBin(c(as.integer(object at METADATA[ID,"LOWERFILTER_ID"]),
+                   as.integer(object at METADATA[ID,"UPPERFILTER_ID"])),
+                 con,
+                 size = 2,
+                 endian="little")
+
+
+        ##ENOISEFACTOR
+        writeBin(as.double(object at METADATA[ID,"ENOISEFACTOR"]),
+                 con,
+                 size = 4,
+                 endian="little")
+
+
+        ##VERSION 08
+        if(version == 07){
+
+          ##RESERVED 2
+          if(length(object at .RESERVED) == 0 || version.original != version){
+            writeBin(raw(length=15),
+                     con,
+                     size = 1,
+                     endian="little")
+          }else{
+
+            writeBin(object at .RESERVED[[ID]][[2]],
+                     con,
+                     size = 1,
+                     endian="little")
+          }
+
+
+        }else{
+
+          ##MARKPOS POSITION and extraction
+          writeBin(
+            c(
+              as.double(object at METADATA[ID, "MARKPOS_X1"]),
+              as.double(object at METADATA[ID, "MARKPOS_Y1"]),
+              as.double(object at METADATA[ID, "MARKPOS_X2"]),
+              as.double(object at METADATA[ID, "MARKPOS_Y2"]),
+              as.double(object at METADATA[ID, "MARKPOS_X3"]),
+              as.double(object at METADATA[ID, "MARKPOS_Y3"]),
+              as.double(object at METADATA[ID, "EXTR_START"]),
+              as.double(object at METADATA[ID, "EXTR_END"])
+            ),
+            con,
+            size = 4,
+            endian = "little"
+          )
+
+
+          ##RESERVED 2
+          if(length(object at .RESERVED) == 0 || version.original != version){
+            writeBin(raw(length=42),
+                     con,
+                     size = 1,
+                     endian="little")
+          }else{
+
+            writeBin(object at .RESERVED[[ID]][[2]],
+                     con,
+                     size = 1,
+                     endian="little")
+          }
+
+
+        }
+
+
+      }#end if version decision
+
+      ##DPOINTS
+      writeBin(as.integer(unlist(object at DATA[ID])),
+               con,
+               size = 4,
+               endian="little")
+
+
+      #SET UNIQUE ID
+      ID <- ID + 1
+
+      ##update progress bar
+      if(txtProgressBar==TRUE){
+        setTxtProgressBar(pb, ID)
+      }
+
+    }
+  }
+
+  # ##close con
+  close(con)
+  #
+  # ##close
+  if(txtProgressBar==TRUE){close(pb)}
+
+  ##output
+  cat(paste("\t >> ",ID-1,"records have been written successfully!\n\n",paste=""))
+
+}
diff --git a/R/zzz.R b/R/zzz.R
new file mode 100644
index 0000000..b9403de
--- /dev/null
+++ b/R/zzz.R
@@ -0,0 +1,280 @@
+##//////////////////////////////////////////////////////////////////////////////
+##//zzz.R
+##//////////////////////////////////////////////////////////////////////////////
+##
+##==============================================================================
+##author: R Luminescence Package Team
+##organisation:
+##version.: 0.2.1
+##date: 2013-11-10
+##==============================================================================
+# Set namespace .LuminescenceEnv ------------------------------------------
+.LuminescenceEnv <- new.env(parent = emptyenv())
+
+
+# Assign variables to Namespace -------------------------------------------
+##variable col to define colours in the functions for output
+assign("col",
+       unlist(colors())[c(261,552,51,62,76,151,451,474,654,657,100,513,23,612,129,27,551,393,80,652,555)],
+       pos = ".LuminescenceEnv",
+       envir = .LuminescenceEnv)
+
+
+
+##==============================================================================
+##on Attach
+.onAttach <- function(libname,pkgname){
+
+  ##set startup message
+  try(packageStartupMessage(paste("Welcome to the R package Luminescence version ",
+                              packageDescription(pkg="Luminescence")$Version,
+                              " [Built: ",
+                              strsplit(packageDescription(pkg="Luminescence")$Packaged, ";")[[1]][1],
+                             "]", sep=""),
+                            "\n",
+                            get_Quote()), silent=TRUE)
+}
+
+##==============================================================================
+# DO NOT TOUCH! -----------------------------------------------------------
+
+
+
+#' sTeve - sophisticated tool for efficient data validation and evaluation
+#'
+#' This function provides a sophisticated routine for comprehensive
+#' luminescence dating data analysis.
+#'
+#' This amazing sophisticated function validates your data seriously.
+#'
+#' @param n_frames \code{\link{integer}} (with default): n frames
+#' @param t_animation \code{\link{integer}} (with default): t animation
+#' @param n.tree \code{\link{integer}} (with default): How many trees do you
+#' want to cut?
+#' @param type \code{\link{integer}} (optional): Make a decision: 1, 2 or 3
+#' @return Validates your data.
+#' @note This function should not be taken too seriously.
+#' @author R Luminescence Team, 2012-2013
+#' @seealso \link{plot_KDE}
+#' @references #
+#' @keywords manip
+#' @examples
+#'
+#' ##no example available
+#'
+#' @export
+sTeve<- function(n_frames = 10, t_animation = 2, n.tree = 7, type) {
+
+  ## allow new overlay plot
+  par(new = TRUE)
+
+  ## infer month of year
+  month <- as.numeric(strsplit(x = as.character(Sys.Date()), split = "-")[[1]][2])
+
+  ## select showtime item based on month or user-defined type
+  if(missing(type) == TRUE) {
+    if(month >= 1 & month <= 3) {
+      type <- 1
+    } else if(month >3 & month <= 11) {
+      type <- 2
+    } else if(month > 11 & month <= 12) {
+      type <- 3
+    }
+  }
+
+
+
+  if(type == 1) {
+    ## SHOWTIME OPTION 1
+    Sys.sleep(5)
+    shape::emptyplot()
+    shape::filledrectangle(wx = 0.9, wy = 0.4,
+                    mid = c(0.5, 0.5),
+                    lcol ="red",
+                    lwd=1,
+                    col=0,
+                    angle = 45)
+
+    text(x=0.5, y=0.5,
+         labels="NOT FUNNY",
+         cex=2,
+         col="red",
+         font=2,
+         srt=45)
+  } else if(type == 2) {
+
+    ## SHOWTIME OPTION 2
+    plot(NA, xlim = c(0, 10),
+         ylim = c(0, 10),
+         main = "",
+         xlab = "",
+         ylab = "",
+         axes = FALSE,
+         frame.plot = FALSE)
+
+    n_frames <- n_frames
+    t_animation <- t_animation
+
+    dt <- t_animation / n_frames
+    x1 <- seq(0, 10, length.out = n_frames)
+    y1 <- rep(1.5, n_frames)
+    r1 <- 0.5
+
+    x2 <- seq(0, 16, length.out = n_frames)
+    y2 <- rep(8.5, n_frames)
+    r2 <- 0.5
+
+    x4 <- seq(11, 0, length.out = n_frames)
+    y4 <- rep(5, n_frames)
+    r4 <- 0.5
+
+    # set angles for each step of mouth opening
+    angles_mouth <- rep(c(0.01, 0.25, 0.5, 0.25),
+                        length.out = n_frames)
+
+    for(i in 1:n_frames){
+      # define pacman circles
+      shape::filledcircle(r1 = r1,
+                   r2 = 0.00001,
+                   mid = c(x1[i], y1[i]),
+                   from = angles_mouth[i],
+                   to = 2 * pi - angles_mouth[i],
+                   col = "yellow")
+      shape::filledcircle(r1 = r2,
+                   r2 = 0.00001,
+                   mid = c(x2[i], y2[i]),
+                   from = angles_mouth[i],
+                   to = 2 * pi - angles_mouth[i],
+                   col = "yellow")
+      shape::filledcircle(r1 = r4,
+                   r2 = 0.00001,
+                   mid = c(x4[i], y4[i]),
+                   from = angles_mouth[i] + 3,
+                   to = 2 * pi - angles_mouth[i] + 3,
+                   col = "yellow")
+
+      # dinfine eyes for pacman
+      points(x1[i] + 0.2, y1[i] + 0.75, pch = 21, bg = 1, cex = 0.7)
+      points(x2[i] + 0.2, y2[i] + 0.75, pch = 21, bg = 1, cex = 0.7)
+      points(x4[i] - 0.05, y4[i] + 0.75, pch = 21, bg = 1, cex = 0.7)
+
+      Sys.sleep(dt)
+
+      shape::plotcircle(r = 1.1 * r1,
+                 mid = c(x1[i], y1[i]),
+                 col = "white",
+                 lcol = "white")
+      shape::plotcircle(r = 1.1 * r2,
+                 mid = c(x2[i], y2[i]),
+                 col = "white",
+                 lcol = "white")
+      shape::plotcircle(r = 1.1 * r4,
+                 mid = c(x4[i], y4[i]),
+                 col = "white",
+                 lcol = "white")
+    }
+  } else if(type == 3) {
+    ## calculate display ratio
+    f <- par()$pin[2] / par()$pin[1]
+
+    ## create new overlay plot
+    plot(NA,
+         xlim = c(0, 100),
+         ylim = c(0, 100),
+         axes = F,
+         frame.plot = FALSE,
+         xlab = "",
+         ylab = "")
+
+    ## create semi-transparent layer
+    polygon(x = c(-100, -100, 200, 200),
+            y = c(-100, 200, 200, -100),
+            col = rgb(1,1,1, 0.8),
+            lty = 0)
+
+    ## draw christmas trees
+    n = n.tree
+    tree.x <- runif(n, 10, 90)
+    tree.y <- runif(n, 10, 90)
+    tree.size <- runif(n, 0.3, 1.5)
+
+    for(i in 1:n) {
+      ## stem
+      polygon(x = c(tree.x[i] - 1.5 * tree.size[i],
+                    tree.x[i] - 1.5 * tree.size[i],
+                    tree.x[i] + 1.5 * tree.size[i],
+                    tree.x[i] + 1.5 * tree.size[i]) ,
+              y = c(tree.y[i] - 12 * tree.size[i],
+                    tree.y[i] - 1 * tree.size[i],
+                    tree.y[i] - 1 * tree.size[i],
+                    tree.y[i] - 12* tree.size[i]),
+              col = "rosybrown4",
+              lty = 0)
+
+      ## branch one
+      shape::filledellipse(rx1 = 10 * tree.size[i],
+                    rx2 = 0.00001,
+                    mid = c(tree.x[i], tree.y[i] + 3 * tree.size[i]),
+                    col = "darkgreen",
+                    from = 4.0143,
+                    to = 5.41052)
+
+      ## branch two
+      shape::filledellipse(rx1 = 8 * tree.size[i],
+                    rx2 = 0.00001,
+                    mid = c(tree.x[i], tree.y[i] + 7 * tree.size[i]),
+                    col = "darkgreen",
+                    from = 4.0143,
+                    to = 5.41052)
+
+      ## branch three
+      shape::filledellipse(rx1 = 6 * tree.size[i],
+                    rx2 = 0.00001,
+                    mid = c(tree.x[i], tree.y[i] + 9 * tree.size[i]),
+                    col = "darkgreen",
+                    from = 4.0143,
+                    to = 5.41052)
+
+      ## branch four
+      shape::filledellipse(rx1 = 4 * tree.size[i],
+                    rx2 = 0.00001,
+                    mid = c(tree.x[i], tree.y[i] + 11 * tree.size[i]),
+                    col = "darkgreen",
+                    from = 4.0143,
+                    to = 5.41052)
+
+      ## sphere one
+      shape::filledellipse(rx1 = 1 * f * tree.size[i],
+                    ry1 = 1 * tree.size[i],
+                    mid = c(tree.x[i] + 2 * tree.size[i],
+                            tree.y[i] + 5 * tree.size[i]),
+                    col = shape::shadepalette(n = 20, endcol = "darkred"))
+
+      ## sphere two
+      shape::filledellipse(rx1 = 0.8 * f * tree.size[i],
+                    ry1 = 0.8 * tree.size[i],
+                    mid = c(tree.x[i] - 1 * tree.size[i],
+                            tree.y[i] + -3 * tree.size[i]),
+                    col = shape::shadepalette(n = 20, endcol = "orange"))
+
+      ## sphere three
+      shape::filledellipse(rx1 = 1.2 * f * tree.size[i],
+                    ry1 = 1.2 * tree.size[i],
+                    mid = c(tree.x[i] - 1.7 * tree.size[i],
+                            tree.y[i] + 2 * tree.size[i]),
+                    col = shape::shadepalette(n = 20, endcol = "yellow3"))
+
+      ## sphere four
+      shape::filledellipse(rx1 = 1 * f * tree.size[i],
+                    ry1 = 1 * tree.size[i],
+                    mid = c(tree.x[i] + 3 * tree.size[i],
+                            tree.y[i] - 4 * tree.size[i]),
+                    col = shape::shadepalette(n = 20, endcol = "darkblue"))
+
+      Sys.sleep(0.1)
+    }
+
+    ## add snow
+    points(runif(300, 0, 100), runif(300, 0, 100), pch = 8, col = "lightgrey")
+  }
+}#end function
diff --git a/data/BaseDataSet.CosmicDoseRate.RData b/data/BaseDataSet.CosmicDoseRate.RData
new file mode 100644
index 0000000..00e75d8
Binary files /dev/null and b/data/BaseDataSet.CosmicDoseRate.RData differ
diff --git a/data/ExampleData.BINfileData.RData b/data/ExampleData.BINfileData.RData
new file mode 100644
index 0000000..cf0cff4
Binary files /dev/null and b/data/ExampleData.BINfileData.RData differ
diff --git a/data/ExampleData.CW_OSL_Curve.RData b/data/ExampleData.CW_OSL_Curve.RData
new file mode 100644
index 0000000..cb7a1dc
Binary files /dev/null and b/data/ExampleData.CW_OSL_Curve.RData differ
diff --git a/data/ExampleData.DeValues.RData b/data/ExampleData.DeValues.RData
new file mode 100644
index 0000000..3e85206
Binary files /dev/null and b/data/ExampleData.DeValues.RData differ
diff --git a/data/ExampleData.FittingLM.RData b/data/ExampleData.FittingLM.RData
new file mode 100644
index 0000000..300e7bd
Binary files /dev/null and b/data/ExampleData.FittingLM.RData differ
diff --git a/data/ExampleData.LxTxData.RData b/data/ExampleData.LxTxData.RData
new file mode 100644
index 0000000..bbfab7a
Binary files /dev/null and b/data/ExampleData.LxTxData.RData differ
diff --git a/data/ExampleData.LxTxOSLData.RData b/data/ExampleData.LxTxOSLData.RData
new file mode 100644
index 0000000..42a8847
Binary files /dev/null and b/data/ExampleData.LxTxOSLData.RData differ
diff --git a/data/ExampleData.RLum.Analysis.RData b/data/ExampleData.RLum.Analysis.RData
new file mode 100644
index 0000000..907d366
Binary files /dev/null and b/data/ExampleData.RLum.Analysis.RData differ
diff --git a/data/ExampleData.RLum.Data.Image.RData b/data/ExampleData.RLum.Data.Image.RData
new file mode 100644
index 0000000..c052c1b
Binary files /dev/null and b/data/ExampleData.RLum.Data.Image.RData differ
diff --git a/data/ExampleData.XSYG.RData b/data/ExampleData.XSYG.RData
new file mode 100644
index 0000000..afc2616
Binary files /dev/null and b/data/ExampleData.XSYG.RData differ
diff --git a/data/datalist b/data/datalist
new file mode 100644
index 0000000..8ad23c4
--- /dev/null
+++ b/data/datalist
@@ -0,0 +1,10 @@
+BaseDataSet.CosmicDoseRate
+ExampleData.DeValues
+ExampleData.FittingLM
+ExampleData.LxTxData
+ExampleData.LxTxOSLData
+ExampleData.BINfileData
+ExampleData.CW_OSL_Curve
+ExampleData.RLum.Analysis
+ExampleData.RLum.Data.Image
+ExampleData.XSYG
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 4c364a8..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,21 +0,0 @@
-r-cran-luminescence (0.6.4-1) unstable; urgency=medium
-
-  * New upstream version
-  * Convert to dh-r
-  * Canonical homepage for CRAN
-  * d/watch: version=4
-
- -- Andreas Tille <tille at debian.org>  Fri, 11 Nov 2016 09:05:30 +0100
-
-r-cran-luminescence (0.6.1-1) unstable; urgency=medium
-
-  * New upstream version
-  * Versioned (Build-)Depends from r-cran-rcpp (>= 0.12.5)
-
- -- Andreas Tille <tille at debian.org>  Wed, 13 Jul 2016 17:03:27 +0200
-
-r-cran-luminescence (0.6.0-1) unstable; urgency=low
-
-  * Initial release (closes: #829665)
-
- -- Andreas Tille <tille at debian.org>  Tue, 05 Jul 2016 09:16:39 +0200
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index ec63514..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/debian/control b/debian/control
deleted file mode 100644
index 9e6c91b..0000000
--- a/debian/control
+++ /dev/null
@@ -1,37 +0,0 @@
-Source: r-cran-luminescence
-Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.org>
-Uploaders: Andreas Tille <tille at debian.org>
-Section: gnu-r
-Priority: optional
-Build-Depends: debhelper (>= 9),
-               dh-r,
-               r-base-dev,
-               r-cran-bbmle,
-               r-cran-data.table,
-               r-cran-httr,
-               r-cran-matrixstats,
-               r-cran-minpack.lm,
-               r-cran-raster,
-               r-cran-rcpp (>= 0.12.5),
-               r-cran-rcpparmadillo,
-               r-cran-readxl,
-               r-cran-shape,
-               r-cran-xml,
-               r-cran-zoo
-Standards-Version: 3.9.8
-Vcs-Browser: https://anonscm.debian.org/viewvc/debian-med/trunk/packages/R/r-cran-luminescence/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-med/trunk/packages/R/r-cran-luminescence/trunk/
-Homepage: https://cran.r-project.org/package=Luminescence
-
-Package: r-cran-luminescence
-Architecture: any
-Depends: ${shlibs:Depends},
-         ${misc:Depends},
-         ${R:Depends}
-Recommends: ${R:Recommends}
-Suggests: ${R:Suggests}
-Description: GNU R comprehensive luminescence dating data analysis
- A collection of various R functions for the purpose of Luminescence
- dating data analysis. This includes, amongst others, data import,
- export, application of age models, curve deconvolution, sequence
- analysis and plotting of equivalent dose distributions.
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index 9137e66..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,28 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: Luminescence
-Upstream-Contact: Sebastian Kreutzer <sebastian.kreutzer at u-bordeaux-montaigne.fr>
-Source: https://cran.r-project.org/package=Luminescence
-
-Files: *
-Copyright: 2013-2016 Sebastian Kreutzer, Michael Dietze, Christoph Burow,
-                     Margret C. Fuchs, Christoph Schmidt, Manfred Fischer,
-                     Johannes Friedrich, Norbert Mercier, Rachel K. Smedley,
-                     Julie Durcan, Georgina King, Markus Fuchs 
-License: GPL-3
-
-Files: debian/*
-Copyright: 2016 Andreas Tille <tille at debian.org>
-License: GPL-3
-
-License: GPL-3
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 3 of the License.
- .
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
- .
- On Debian systems, the complete text of the GNU General Public
- License can be found in `/usr/share/common-licenses/GPL-3'.
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 68d9a36..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/make -f
-
-%:
-	dh $@ --buildsystem R
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index 4a59337..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=4
-http://cran.r-project.org/src/contrib/Luminescence_([-\d.]*)\.tar\.gz
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100644
index 0000000..20e7a45
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,49 @@
+citHeader("To cite the package 'Luminescence' in publications use:")
+
+	 citation(auto = meta)
+
+	 bibentry(bibtype = "Article",
+			    title = "A practical guide to the R package Luminescence",
+				author = "Michael Dietze, Sebastian Kreutzer, Margret C. Fuchs, Christoph Burow, Manfred Fischer, Christoph Schmidt",
+				year = "2013",
+				journal = "Ancient TL",
+				volume = "31",
+				pages = "11-18")
+
+	  bibentry(bibtype = "Article",
+			    title = "Introducing an R package for luminescence dating analysis",
+				author = "Sebastian Kreutzer, Christoph Schmidt, Margret C. Fuchs, Michael Dietze, Manfred Fischer, Markus Fuchs",
+				year = "2012",
+				journal = "Ancient TL",
+				volume = "30",
+				pages = "1-8")
+
+	  bibentry(bibtype = "Article",
+			  	title = "Data processing in luminescence dating analysis: An exemplary workflow using the R package 'Luminescence'",
+				author = "Margret C. Fuchs, Sebastian Kreutzer, Christoph Burow, Michael Dietze, Manfred Fischer, Christoph Schmidt, Markus Fuchs",
+				year = "2015",
+				journal = "Quaternary International",
+				volume = "362",
+				pages = "8-13",
+				doi = "10.1016/j.quaint.2014.06.034")
+
+		bibentry(bibtype = "Article",
+		      title = "A new R function for the Internal External Uncertainty (IEU) model",
+		    author = "Smedley, Rachel K",
+        journal = "Ancient TL",
+        year = "2015",
+        volume = "33",
+        number = "1",
+        pages = "16-21")
+
+		bibentry(bibtype = "Article",
+			  	title = "The abanico plot: visualising chronometric data with individual standard errors",
+				author = "Michael Dietze, Sebastian Kreutzer, Christoph Burow, Margret C. Fuchs, Manfred Fischer, Christoph Schmidt",
+				year = "2016",
+				journal = "Quaternary Geochronology",
+				volume = "31",
+				pages = "12-18",
+				doi = "10.1016/j.quageo.2015.09.003")
+
+
+
diff --git a/inst/NEWS.Rd b/inst/NEWS.Rd
new file mode 100644
index 0000000..8693207
--- /dev/null
+++ b/inst/NEWS.Rd
@@ -0,0 +1,38 @@
+\name{NEWS}
+\title{NEWS for the R Package Luminescence}
+\section{Changes in version 0.6.4 (9th September 2016)}{
+
+
+
+  \subsection{Bugfixes and changes}{
+   \itemize{
+
+
+      \item \code{analyse_baSAR()}
+
+       \itemize{
+          \item Fix problem that causes a function crash if an XLS-file was provided as input
+          for the grain selection.
+       }
+
+      \item \code{analyse_pIRIRSequence()}
+
+       \itemize{
+          \item Account for a minor layout problem while plotting the combined growth curve (y-axis
+          scaling was not sufficient)
+       }
+
+
+      \item \code{plot_AbanicoPlot()}
+
+       \itemize{
+          \item The relative and absolute standard deviation were mixed up in in the summary; fixed.
+       }
+
+
+      }
+
+    }
+
+
+}
diff --git a/inst/doc/S4classObjects.pdf b/inst/doc/S4classObjects.pdf
new file mode 100644
index 0000000..bc9393a
Binary files /dev/null and b/inst/doc/S4classObjects.pdf differ
diff --git a/inst/doc/index.html b/inst/doc/index.html
new file mode 100644
index 0000000..f0070bb
--- /dev/null
+++ b/inst/doc/index.html
@@ -0,0 +1,75 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><title>R package Luminescence - supplementary data</title>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
+<style type="text/css">
+<!--
+BODY{		background: white;
+		color: black }
+
+A:link{         background: white;
+                color: blue }
+A:visited{	background: white;
+		color: rgb(50%, 0%, 50%) }
+
+H1{		background: white;
+		color: rgb(55%, 55%, 55%);
+		font-family: monospace;
+		font-size: x-large;
+		text-align: center }
+
+H2{		background: white;
+		color: rgb(40%, 40%, 40%);
+		font-family: monospace;
+		font-size: large;
+		text-align: center }
+
+H3{		background: white;
+		color: rgb(40%, 40%, 40%);
+		font-family: monospace;
+		font-size: large }
+
+H4{		background: white;
+		color: rgb(40%, 40%, 40%);
+		font-family: monospace;
+		font-style: italic;
+		font-size: large }
+
+H5{		background: white;
+		color: rgb(40%, 40%, 40%);
+		font-family: monospace }
+
+H6{		background: white;
+		color: rgb(40%, 40%, 40%);
+		font-family: monospace;
+		font-style: italic }
+		
+IMG.toplogo{	vertical-align: middle }
+
+IMG.arrow{	width: 30px;
+		height: 30px;
+		border: 0 }
+
+span.acronym{font-size: small}
+span.env{font-family: monospace}
+span.file{font-family: monospace}
+span.option{font-family: monospace}
+span.pkg{font-weight: bold}
+span.samp{font-family: monospace}
+
+div.vignettes a:hover {
+  background: rgb(85%, 85%, 85%);
+}
+-->
+</style>
+</head>
+<body>
+
+<h3>R package Luminescence - supplementary data</h3>
+
+<ul>
+<li> <a href="S4classObjects.pdf">S4-class objects in the R package [PDF]</a>
+</ul>
+
+</body>
+
+</html>
\ No newline at end of file
diff --git a/man/Analyse_SAR.OSLdata.Rd b/man/Analyse_SAR.OSLdata.Rd
new file mode 100644
index 0000000..0112dc3
--- /dev/null
+++ b/man/Analyse_SAR.OSLdata.Rd
@@ -0,0 +1,151 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Analyse_SAR.OSLdata.R
+\name{Analyse_SAR.OSLdata}
+\alias{Analyse_SAR.OSLdata}
+\title{Analyse SAR CW-OSL measurements.}
+\usage{
+Analyse_SAR.OSLdata(input.data, signal.integral, background.integral, position,
+  run, set, dtype, keep.SEL = FALSE,
+  info.measurement = "unkown measurement", output.plot = FALSE,
+  output.plot.single = FALSE, cex.global = 1, ...)
+}
+\arguments{
+\item{input.data}{\link{Risoe.BINfileData-class} (\bold{required}): input
+data from a Risoe BIN file, produced by the function \link{read_BIN2R}.}
+
+\item{signal.integral}{\link{vector} (\bold{required}): channels used for
+the signal integral, e.g. \code{signal.integral=c(1:2)}}
+
+\item{background.integral}{\link{vector} (\bold{required}): channels used
+for the background integral, e.g. \code{background.integral=c(85:100)}}
+
+\item{position}{\link{vector} (optional): reader positions that want to be
+analysed (e.g. \code{position=c(1:48)}. Empty positions are automatically
+omitted. If no value is given all positions are analysed by default.}
+
+\item{run}{\link{vector} (optional): range of runs used for the analysis. If
+no value is given the range of the runs in the sequence is deduced from the
+Risoe.BINfileData object.}
+
+\item{set}{\link{vector} (optional): range of sets used for the analysis. If
+no value is given the range of the sets in the sequence is deduced from the
+\code{Risoe.BINfileData} object.}
+
+\item{dtype}{\code{\link{character}} (optional): allows to further limit the
+curves by their data type (\code{DTYPE}), e.g., \code{dtype = c("Natural",
+"Dose")} limits the curves to this two data types. By default all values are
+allowed. See \link{Risoe.BINfileData-class} for allowed data types.}
+
+\item{keep.SEL}{\code{\link{logical}} (default): option allowing to use the
+\code{SEL} element of the \link{Risoe.BINfileData-class} manually. NOTE: In
+this case any limitation provided by \code{run}, \code{set} and \code{dtype}
+are ignored!}
+
+\item{info.measurement}{\link{character} (with default): option to provide
+information about the measurement on the plot output (e.g. name of the BIN
+or BINX file).}
+
+\item{output.plot}{\link{logical} (with default): plot output
+(\code{TRUE/FALSE})}
+
+\item{output.plot.single}{\link{logical} (with default): single plot output
+(\code{TRUE/FALSE}) to allow for plotting the results in single plot
+windows. Requires \code{output.plot = TRUE}.}
+
+\item{cex.global}{\link{numeric} (with default): global scaling factor.}
+
+\item{\dots}{further arguments that will be passed to the function
+\code{\link{calc_OSLLxTxRatio}} (supported: \code{background.count.distribution}, \code{sigmab},
+\code{sig0}; e.g., for instrumental error)
+and can be used to adjust the plot. Supported" \code{mtext}, \code{log}}
+}
+\value{
+A plot (optional) and \link{list} is returned containing the
+following elements: \item{LnLxTnTx}{\link{data.frame} of all calculated
+Lx/Tx values including signal, background counts and the dose points.}
+\item{RejectionCriteria}{\link{data.frame} with values that might by used as
+rejection criteria. NA is produced if no R0 dose point exists.}
+\item{SARParameters}{\link{data.frame} of additional measurement parameters
+obtained from the BIN file, e.g. preheat or read temperature (not valid for
+all types of measurements).}
+}
+\description{
+The function analyses SAR CW-OSL curve data and provides a summary of the
+measured data for every position. The output of the function is optimised
+for SAR OSL measurements on quartz.
+}
+\details{
+The function works only for standard SAR protocol measurements introduced by
+Murray and Wintle (2000) with CW-OSL curves. For the calculation of the
+Lx/Tx value the function \link{calc_OSLLxTxRatio} is used. \cr\cr
+
+\bold{Provided rejection criteria}\cr\cr \sQuote{recyling ratio}: calculated
+for every repeated regeneration dose point.\cr \sQuote{recuperation}:
+recuperation rate calculated by comparing the Lx/Tx values of the zero
+regeneration point with the Ln/Tn value (the Lx/Tx ratio of the natural
+signal). For methodological background see Aitken and Smith (1988)\cr
+
+\sQuote{IRSL/BOSL}: the integrated counts (\code{signal.integral}) of an
+IRSL curve are compared to the integrated counts of the first regenerated
+dose point. It is assumed that IRSL curves got the same dose as the first
+regenerated dose point. \strong{Note:} This is not the IR depletation ratio
+described by Duller (2003).
+}
+\note{
+Rejection criteria are calculated but not considered during the
+analysis to discard values.\cr\cr
+
+\bold{The analysis of IRSL data is not directly supported}. You may want to
+consider using the functions \code{\link{analyse_SAR.CWOSL}} or
+\code{\link{analyse_pIRIRSequence}} instead.\cr
+
+\bold{The development of this function will not be continued. We recommend
+to use the function \link{analyse_SAR.CWOSL} or instead.}
+}
+\section{Function version}{
+ 0.2.17 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##load data
+data(ExampleData.BINfileData, envir = environment())
+
+##analyse data
+output <- Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data,
+                              signal.integral = c(1:5),
+                              background.integral = c(900:1000),
+                              position = c(1:1),
+                              output.plot = TRUE)
+
+##combine results relevant for further analysis
+output.SAR <- data.frame(Dose = output$LnLxTnTx[[1]]$Dose,
+                         LxTx = output$LnLxTnTx[[1]]$LxTx,
+                         LxTx.Error = output$LnLxTnTx[[1]]$LxTx.Error)
+output.SAR
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Margret C. Fuchs, HZDR, Freiberg (Germany)
+\cr R Luminescence Package Team}
+\references{
+Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
+after bleaching. Quaternary Science Reviews 7, 387-393.
+
+Duller, G., 2003. Distinguishing quartz and feldspar in single grain
+luminescence measurements. Radiation Measurements, 37 (2), 161-165.
+
+Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an
+improved single-aliquot regenerative-dose protocol. Radiation Measurements
+32, 57-73.
+}
+\seealso{
+\link{calc_OSLLxTxRatio}, \link{Risoe.BINfileData-class},
+\link{read_BIN2R}
+
+and for further analysis \link{plot_GrowthCurve}
+}
+\keyword{datagen}
+\keyword{dplot}
+
diff --git a/man/BaseDataSet.CosmicDoseRate.Rd b/man/BaseDataSet.CosmicDoseRate.Rd
new file mode 100644
index 0000000..ee83cda
--- /dev/null
+++ b/man/BaseDataSet.CosmicDoseRate.Rd
@@ -0,0 +1,111 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{BaseDataSet.CosmicDoseRate}
+\alias{BaseDataSet.CosmicDoseRate}
+\title{Base data set for cosmic dose rate calculation}
+\format{\tabular{ll}{
+
+\code{values.cosmic.Softcomp}: \tab data frame containing cosmic dose rates
+for shallow depths (< 167 g cm^-2) obtained using the "AGE" program by
+Rainer Gruen (cf. Gruen 2009). These data essentially reproduce the graph
+shown in Fig. 1 of Prescott & Hutton (1988). \cr
+
+\code{values.factor.Altitude}: \tab data frame containing altitude factors
+for adjusting geomagnetic field-change factors. Values were read from Fig. 1
+in Prescott & Hutton (1994). \cr
+
+\code{values.par.FJH}: \tab data frame containing values for parameters F, J
+and H (read from Fig. 2 in Prescott & Hutton 1994) used in the expression }
+
+\deqn{Dc = D0*(F+J*exp((altitude/1000)/H))}}
+\source{
+The following data were carefully read from figures in mentioned
+sources and used for fitting procedures. The derived expressions are used in
+the function \code{calc_CosmicDoseRate}.
+
+\bold{values.cosmic.Softcomp}
+
+\tabular{ll}{
+
+Program: \tab "AGE"\cr Reference: \tab Gruen (2009) \cr Fit: \tab
+Polynomials in the form of
+
+}
+
+For depths between 40-167 g cm^-2:
+
+\deqn{y = 2*10^-6*x^2-0.0008*x+0.2535}
+
+(For depths <40 g cm^-2)
+
+\deqn{y = -6*10^-8*x^3+2*10^-5*x^2-0.0025*x+0.2969}
+
+\bold{values.factor.Altitude}
+
+\tabular{ll}{
+
+Reference: \tab Prescott & Hutton (1994) \cr Page: \tab 499 \cr Figure: \tab
+1 \cr Fit: \tab 2-degree polynomial in the form of
+
+}
+
+\deqn{y = -0.026*x^2 + 0.6628*x + 1.0435}
+
+\bold{values.par.FJH}
+
+\tabular{ll}{
+
+Reference: \tab Prescott & Hutton (1994) \cr Page: \tab 500 \cr Figure: \tab
+2 \cr Fits: \tab 3-degree polynomials and linear fits
+
+}
+
+F (non-linear part, \eqn{\lambda} < 36.5 deg.):
+
+\deqn{y = -7*10^-7*x^3-8*10^-5*x^2-0.0009*x+0.3988}
+
+F (linear part, \eqn{\lambda} > 36.5 deg.):
+
+\deqn{y = -0.0001*x + 0.2347}
+
+J (non-linear part, \eqn{\lambda} < 34 deg.):
+
+\deqn{y = 5*10^-6*x^3-5*10^-5*x^2+0.0026*x+0.5177}
+
+J (linear part, \eqn{\lambda} > 34 deg.):
+
+\deqn{y = 0.0005*x + 0.7388}
+
+H (non-linear part, \eqn{\lambda} < 36 deg.):
+
+\deqn{y = -3*10^-6*x^3-5*10^-5*x^2-0.0031*x+4.398}
+
+H (linear part, \eqn{\lambda} > 36 deg.):
+
+\deqn{y = 0.0002*x + 4.0914}
+}
+\description{
+Collection of data from various sources needed for cosmic dose rate
+calculation
+}
+\section{Version}{
+ 0.1
+}
+\examples{
+
+##load data
+data(BaseDataSet.CosmicDoseRate)
+}
+\references{
+Gruen, R., 2009. The "AGE" program for the calculation of luminescence age estimates.
+Ancient TL, 27, pp. 45-46.
+
+Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for
+TL and ESR. Nuclear Tracks and Radiation Measurements, 14, pp. 223-227.
+
+Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates
+for luminescence and ESR dating: large depths and long-term time variations.
+Radiation Measurements, 23, pp. 497-500.
+}
+\keyword{datasets}
+
diff --git a/man/CW2pHMi.Rd b/man/CW2pHMi.Rd
new file mode 100644
index 0000000..0eaef0e
--- /dev/null
+++ b/man/CW2pHMi.Rd
@@ -0,0 +1,177 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/CW2pHMi.R
+\name{CW2pHMi}
+\alias{CW2pHMi}
+\title{Transform a CW-OSL curve into a pHM-OSL curve via interpolation under
+hyperbolic modulation conditions}
+\usage{
+CW2pHMi(values, delta)
+}
+\arguments{
+\item{values}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (\bold{required}):
+\code{\linkS4class{RLum.Data.Curve}} or \code{\link{data.frame}} with
+measured curve data of type stimulation time (t) (\code{values[,1]}) and
+measured counts (cts) (\code{values[,2]}).}
+
+\item{delta}{\code{\link{vector}} (optional): stimulation rate parameter, if
+no value is given, the optimal value is estimated automatically (see
+details). Smaller values of delta produce more points in the rising tail of
+the curve.}
+}
+\value{
+The function returns the same data type as the input data type with
+the transformed curve values. \item{list(list("RLum.Data.Curve"))}{package
+\code{\linkS4class{RLum} object} with two additional info elements:
+\tabular{rl}{ $CW2pHMi.x.t \tab: transformed time values \cr $CW2pHMi.method
+\tab: used method for the production of the new data points }}
+\item{list(list("data.frame"))}{with four columns: \tabular{rl}{ $x \tab:
+time\cr $y.t \tab: transformed count values\cr $x.t \tab: transformed time
+values \cr $method \tab: used method for the production of the new data
+points }}
+}
+\description{
+This function transforms a conventionally measured continuous-wave (CW)
+OSL-curve to a pseudo hyperbolic modulated (pHM) curve under hyperbolic
+modulation conditions using the interpolation procedure described by Bos &
+Wallinga (2012).
+}
+\details{
+The complete procedure of the transformation is described in Bos & Wallinga
+(2012). The input \code{data.frame} consists of two columns: time (t) and
+count values (CW(t))\cr\cr
+
+\bold{Internal transformation steps}\cr\cr (1) log(CW-OSL) values\cr\cr (2)
+Calculate t' which is the transformed time:\cr \deqn{t' =
+t-(1/\delta)*log(1+\delta*t)} (3) Interpolate CW(t'), i.e. use the
+log(CW(t)) to obtain the count values for the transformed time (t'). Values
+beyond \code{min(t)} and \code{max(t)} produce \code{NA} values.\cr\cr (4)
+Select all values for t' < \code{min(t)}, i.e. values beyond the time
+resolution of t. Select the first two values of the transformed data set
+which contain no \code{NA} values and use these values for a linear fit
+using \code{\link{lm}}.\cr\cr (5) Extrapolate values for t' < \code{min(t)}
+based on the previously obtained fit parameters.\cr\cr (6) Transform values
+using\cr \deqn{pHM(t) = (\delta*t/(1+\delta*t))*c*CW(t')} \deqn{c =
+(1+\delta*P)/\delta*P} \deqn{P = length(stimulation~period)} (7) Combine all
+values and truncate all values for t' > \code{max(t)} \cr\cr \emph{The
+number of values for t' < \code{min(t)} depends on the stimulation rate
+parameter \code{delta}. To avoid the production of too many artificial data
+at the raising tail of the determined pHM curve, it is recommended to use
+the automatic estimation routine for \code{delta}, i.e. provide no value for
+\code{delta}.}
+}
+\note{
+According to Bos & Wallinga (2012), the number of extrapolated points
+should be limited to avoid artificial intensity data. If \code{delta} is
+provided manually and more than two points are extrapolated, a warning
+message is returned. \cr\cr The function \code{\link{approx}} may produce
+some \code{Inf} and \code{NaN} data. The function tries to manually
+interpolate these values by calculating the \code{mean} using the adjacent
+channels. If two invalid values are succeeding, the values are removed and
+no further interpolation is attempted.\cr In every case a warning message is
+shown.
+}
+\section{Function version}{
+ 0.2.2 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##(1) - simple transformation
+
+##load CW-OSL curve data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+##transform values
+values.transformed<-CW2pHMi(ExampleData.CW_OSL_Curve)
+
+##plot
+plot(values.transformed$x, values.transformed$y.t, log = "x")
+
+##(2) - load CW-OSL curve from BIN-file and plot transformed values
+
+##load BINfile
+#BINfileData<-readBIN2R("[path to BIN-file]")
+data(ExampleData.BINfileData, envir = environment())
+
+##grep first CW-OSL curve from ALQ 1
+curve.ID<-CWOSL.SAR.Data at METADATA[CWOSL.SAR.Data at METADATA[,"LTYPE"]=="OSL" &
+                                    CWOSL.SAR.Data at METADATA[,"POSITION"]==1
+                                  ,"ID"]
+
+curve.HIGH<-CWOSL.SAR.Data at METADATA[CWOSL.SAR.Data at METADATA[,"ID"]==curve.ID[1]
+                                    ,"HIGH"]
+
+curve.NPOINTS<-CWOSL.SAR.Data at METADATA[CWOSL.SAR.Data at METADATA[,"ID"]==curve.ID[1]
+                                       ,"NPOINTS"]
+
+##combine curve to data set
+
+curve<-data.frame(x = seq(curve.HIGH/curve.NPOINTS,curve.HIGH,
+                          by = curve.HIGH/curve.NPOINTS),
+                  y=unlist(CWOSL.SAR.Data at DATA[curve.ID[1]]))
+
+
+##transform values
+
+curve.transformed <- CW2pHMi(curve)
+
+##plot curve
+plot(curve.transformed$x, curve.transformed$y.t, log = "x")
+
+
+##(3) - produce Fig. 4 from Bos & Wallinga (2012)
+
+##load data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+values <- CW_Curve.BosWallinga2012
+
+##open plot area
+plot(NA, NA,
+     xlim=c(0.001,10),
+     ylim=c(0,8000),
+     ylab="pseudo OSL (cts/0.01 s)",
+     xlab="t [s]",
+     log="x",
+     main="Fig. 4 - Bos & Wallinga (2012)")
+
+values.t<-CW2pLMi(values, P=1/20)
+lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P=1/20)[,2],
+      col="red" ,lwd=1.3)
+text(0.03,4500,"LM", col="red" ,cex=.8)
+
+values.t<-CW2pHMi(values, delta=40)
+lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta=40)[,2],
+      col="black", lwd=1.3)
+text(0.005,3000,"HM", cex=.8)
+
+values.t<-CW2pPMi(values, P=1/10)
+lines(values[1:length(values.t[,1]),1],CW2pPMi(values, P=1/10)[,2],
+      col="blue", lwd=1.3)
+text(0.5,6500,"PM", col="blue" ,cex=.8)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France) \cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+Delft University of Technology, The Netherlands\cr
+\cr R Luminescence Package Team}
+\references{
+Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
+signal components. Radiation Measurements, 47, 752-758.\cr
+
+\bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+26, 701-709.
+
+Bulur, E., 2000. A simple transformation for converting CW-OSL curves to
+LM-OSL curves. Radiation Measurements, 32, 141-145.
+}
+\seealso{
+\code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pPMi}},
+\code{\link{fit_LMCurve}}, \code{\link{lm}},
+\code{\linkS4class{RLum.Data.Curve}}
+}
+\keyword{manip}
+
diff --git a/man/CW2pLM.Rd b/man/CW2pLM.Rd
new file mode 100644
index 0000000..493d1aa
--- /dev/null
+++ b/man/CW2pLM.Rd
@@ -0,0 +1,90 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/CW2pLM.R
+\name{CW2pLM}
+\alias{CW2pLM}
+\title{Transform a CW-OSL curve into a pLM-OSL curve}
+\usage{
+CW2pLM(values)
+}
+\arguments{
+\item{values}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (\bold{required}): \code{RLum.Data.Curve} data
+object. Alternatively, a \code{data.frame} of the measured curve data of
+type stimulation time (t) (\code{values[,1]}) and measured counts (cts)
+(\code{values[,2]}) can be provided.}
+}
+\value{
+The function returns the same data type as the input data type with
+the transformed curve values.
+
+\item{list(list("data.frame"))}{generic R data structure}
+\item{list(list("RLum.Data.Curve"))}{package \code{\linkS4class{RLum}
+object}}
+}
+\description{
+Transforms a conventionally measured continuous-wave (CW) curve into a
+pseudo linearly modulated (pLM) curve using the equations given in Bulur
+(2000).
+}
+\details{
+According to Bulur (2000) the curve data are transformed by introducing two
+new parameters P (stimulation period) and u (transformed time):
+\deqn{P=2*max(t)} \deqn{u=\sqrt{(2*t*P)}} The new count values are then
+calculated by \deqn{ctsNEW = cts(u/P)} and the returned \code{data.frame} is
+produced by: \code{data.frame(u,ctsNEW)}
+}
+\note{
+The transformation is recommended for curves recorded with a channel
+resolution of at least 0.05 s/channel.
+}
+\section{Function version}{
+ 0.4.1 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##read curve from CWOSL.SAR.Data transform curve and plot values
+data(ExampleData.BINfileData, envir = environment())
+
+##read id for the 1st OSL curve
+id.OSL <- CWOSL.SAR.Data at METADATA[CWOSL.SAR.Data at METADATA[,"LTYPE"] == "OSL","ID"]
+
+##produce x and y (time and count data for the data set)
+x<-seq(CWOSL.SAR.Data at METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data at METADATA[id.OSL[1],"NPOINTS"],
+       CWOSL.SAR.Data at METADATA[id.OSL[1],"HIGH"],
+       by = CWOSL.SAR.Data at METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data at METADATA[id.OSL[1],"NPOINTS"])
+y <- unlist(CWOSL.SAR.Data at DATA[id.OSL[1]])
+values <- data.frame(x,y)
+
+##transform values
+values.transformed <- CW2pLM(values)
+
+##plot
+plot(values.transformed)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Bulur, E., 2000. A simple transformation for converting CW-OSL
+curves to LM-OSL curves. Radiation Measurements, 32, 141-145.
+
+\bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+26, 701-709.
+}
+\seealso{
+\code{\link{CW2pHMi}}, \code{\link{CW2pLMi}},
+\code{\link{CW2pPMi}}, \code{\link{fit_LMCurve}}, \code{\link{lm}},
+\code{\linkS4class{RLum.Data.Curve}}
+
+The output of the function can be further used for LM-OSL fitting:
+\code{\link{CW2pLMi}}, \code{\link{CW2pHMi}}, \code{\link{CW2pPMi}},
+\code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}},
+\code{\link{plot_RLum}}
+}
+\keyword{manip}
+
diff --git a/man/CW2pLMi.Rd b/man/CW2pLMi.Rd
new file mode 100644
index 0000000..96404f6
--- /dev/null
+++ b/man/CW2pLMi.Rd
@@ -0,0 +1,134 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/CW2pLMi.R
+\name{CW2pLMi}
+\alias{CW2pLMi}
+\title{Transform a CW-OSL curve into a pLM-OSL curve via interpolation under linear
+modulation conditions}
+\usage{
+CW2pLMi(values, P)
+}
+\arguments{
+\item{values}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (\bold{required}):
+\code{\linkS4class{RLum.Data.Curve}} or \code{data.frame} with measured
+curve data of type stimulation time (t) (\code{values[,1]}) and measured
+counts (cts) (\code{values[,2]})}
+
+\item{P}{\code{\link{vector}} (optional): stimulation time in seconds. If no
+value is given the optimal value is estimated automatically (see details).
+Greater values of P produce more points in the rising tail of the curve.}
+}
+\value{
+The function returns the same data type as the input data type with
+the transformed curve values. \item{list(list("RLum.Data.Curve"))}{package
+\code{\linkS4class{RLum}} object with two additional info elements:}
+\tabular{rl}{ $CW2pLMi.x.t \tab: transformed time values \cr $CW2pLMi.method
+\tab: used method for the production of the new data points}
+}
+\description{
+Transforms a conventionally measured continuous-wave (CW) OSL-curve into a
+pseudo linearly modulated (pLM) curve under linear modulation conditions
+using the interpolation procedure described by Bos & Wallinga (2012).
+}
+\details{
+The complete procedure of the transformation is given in Bos & Wallinga
+(2012). The input \code{data.frame} consists of two columns: time (t) and
+count values (CW(t))\cr\cr
+
+\bold{Nomenclature}\cr\cr P = stimulation time (s)\cr 1/P = stimulation rate
+(1/s)\cr\cr
+
+\bold{Internal transformation steps}\cr\cr (1) log(CW-OSL) values\cr (2)
+Calculate t' which is the transformed time: \deqn{t' = 1/2*1/P*t^2}
+
+(3) Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values
+for the transformed time (t'). Values beyond \code{min(t)} and \code{max(t)}
+produce \code{NA} values.\cr\cr (4) Select all values for t' <
+\code{min(t)}, i.e. values beyond the time resolution of t. Select the first
+two values of the transformed data set which contain no \code{NA} values and
+use these values for a linear fit using \code{\link{lm}}.\cr\cr (5)
+Extrapolate values for t' < \code{min(t)} based on the previously obtained
+fit parameters.\cr\cr (6) Transform values using \deqn{pLM(t) = t/P*CW(t')}
+(7) Combine values and truncate all values for t' > \code{max(t)}\cr\cr
+\emph{The number of values for t' < \code{min(t)} depends on the stimulation
+period (P) and therefore on the stimulation rate 1/P. To avoid the
+production of too many artificial data at the raising tail of the determined
+pLM curves it is recommended to use the automatic estimation routine for
+\code{P}, i.e. provide no own value for \code{P}.}
+}
+\note{
+According to Bos & Wallinga (2012) the number of extrapolated points
+should be limited to avoid artificial intensity data. If \code{P} is
+provided manually and more than two points are extrapolated, a warning
+message is returned.
+}
+\section{Function version}{
+ 0.3.1 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##(1)
+##load CW-OSL curve data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+##transform values
+values.transformed <- CW2pLMi(ExampleData.CW_OSL_Curve)
+
+##plot
+plot(values.transformed$x, values.transformed$y.t, log = "x")
+
+##(2) - produce Fig. 4 from Bos & Wallinga (2012)
+##load data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+values <- CW_Curve.BosWallinga2012
+
+##open plot area
+plot(NA, NA,
+     xlim = c(0.001,10),
+     ylim = c(0,8000),
+     ylab = "pseudo OSL (cts/0.01 s)",
+     xlab = "t [s]",
+     log = "x",
+     main = "Fig. 4 - Bos & Wallinga (2012)")
+
+
+values.t <- CW2pLMi(values, P = 1/20)
+lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2],
+      col = "red", lwd = 1.3)
+text(0.03,4500,"LM", col = "red", cex = .8)
+
+values.t <- CW2pHMi(values, delta = 40)
+lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta = 40)[,2],
+      col = "black", lwd = 1.3)
+text(0.005,3000,"HM", cex =.8)
+
+values.t <- CW2pPMi(values, P = 1/10)
+lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2],
+      col = "blue", lwd = 1.3)
+text(0.5,6500,"PM", col = "blue", cex = .8)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux
+Montaigne\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+Delft University of Technology, The Netherlands\cr
+\cr R Luminescence Package Team}
+\references{
+Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
+signal components. Radiation Measurements, 47, 752-758.\cr
+
+\bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+26, 701-709.
+
+Bulur, E., 2000. A simple transformation for converting CW-OSL curves to
+LM-OSL curves. Radiation Measurements, 32, 141-145.
+}
+\seealso{
+\code{\link{CW2pLM}}, \code{\link{CW2pHMi}}, \code{\link{CW2pPMi}},
+\code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}}
+}
+\keyword{manip}
+
diff --git a/man/CW2pPMi.Rd b/man/CW2pPMi.Rd
new file mode 100644
index 0000000..fb8da9e
--- /dev/null
+++ b/man/CW2pPMi.Rd
@@ -0,0 +1,141 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/CW2pPMi.R
+\name{CW2pPMi}
+\alias{CW2pPMi}
+\title{Transform a CW-OSL curve into a pPM-OSL curve via interpolation under
+parabolic modulation conditions}
+\usage{
+CW2pPMi(values, P)
+}
+\arguments{
+\item{values}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (\bold{required}):
+\code{\linkS4class{RLum.Data.Curve}} or \code{data.frame} with measured
+curve data of type stimulation time (t) (\code{values[,1]}) and measured
+counts (cts) (\code{values[,2]})}
+
+\item{P}{\code{\link{vector}} (optional): stimulation period in seconds. If
+no value is given, the optimal value is estimated automatically (see
+details). Greater values of P produce more points in the rising tail of the
+curve.}
+}
+\value{
+The function returns the same data type as the input data type with
+the transformed curve values. \item{list(list("RLum.Data.Curve"))}{package
+\code{\linkS4class{RLum} object} with two additional info elements:
+\tabular{rl}{ $CW2pPMi.x.t \tab: transformed time values \cr $CW2pPMi.method
+\tab: used method for the production of the new data points }}
+
+\item{list(list("data.frame"))}{with four columns: \tabular{rl}{ $x \tab:
+time\cr $y.t \tab: transformed count values\cr $x.t \tab: transformed time
+values \cr $method \tab: used method for the production of the new data
+points }}
+}
+\description{
+Transforms a conventionally measured continuous-wave (CW) OSL-curve into a
+pseudo parabolic modulated (pPM) curve under parabolic modulation conditions
+using the interpolation procedure described by Bos & Wallinga (2012).
+}
+\details{
+The complete procedure of the transformation is given in Bos & Wallinga
+(2012). The input \code{data.frame} consists of two columns: time (t) and
+count values (CW(t))\cr\cr
+
+\bold{Nomenclature}\cr\cr P = stimulation time (s)\cr 1/P = stimulation rate
+(1/s)\cr\cr
+
+\bold{Internal transformation steps}\cr\cr (1) log(CW-OSL) values\cr\cr (2)
+Calculate t' which is the transformed time: \deqn{t' = (1/3)*(1/P^2)t^3} (3)
+Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values for
+the transformed time (t'). Values beyond \code{min(t)} and \code{max(t)}
+produce \code{NA} values.\cr\cr (4) Select all values for t' <
+\code{min(t)}, i.e. values beyond the time resolution of t. Select the first
+two values of the transformed data set which contain no \code{NA} values and
+use these values for a linear fit using \code{\link{lm}}.\cr\cr (5)
+Extrapolate values for t' < \code{min(t)} based on the previously obtained
+fit parameters. The extrapolation is limited to two values. Other values at
+the beginning of the transformed curve are set to 0.\cr\cr (6) Transform
+values using \deqn{pLM(t) = t^2/P^2*CW(t')} (7) Combine all values and
+truncate all values for t' > \code{max(t)}\cr\cr
+
+\emph{The number of values for t' < \code{min(t)} depends on the stimulation
+period \code{P}. To avoid the production of too many artificial data at the
+raising tail of the determined pPM curve, it is recommended to use the
+automatic estimation routine for \code{P}, i.e. provide no value for
+\code{P}.}
+}
+\note{
+According to Bos & Wallinga (2012), the number of extrapolated points
+should be limited to avoid artificial intensity data. If \code{P} is
+provided manually, not more than two points are extrapolated.
+}
+\section{Function version}{
+ 0.2.1 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##(1)
+##load CW-OSL curve data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+##transform values
+values.transformed <- CW2pPMi(ExampleData.CW_OSL_Curve)
+
+##plot
+plot(values.transformed$x,values.transformed$y.t, log = "x")
+
+##(2) - produce Fig. 4 from Bos & Wallinga (2012)
+
+##load data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+values <- CW_Curve.BosWallinga2012
+
+##open plot area
+plot(NA, NA,
+     xlim = c(0.001,10),
+     ylim = c(0,8000),
+     ylab = "pseudo OSL (cts/0.01 s)",
+     xlab = "t [s]",
+     log = "x",
+     main = "Fig. 4 - Bos & Wallinga (2012)")
+
+values.t <- CW2pLMi(values, P = 1/20)
+lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2],
+      col = "red",lwd = 1.3)
+text(0.03,4500,"LM", col = "red", cex = .8)
+
+values.t <- CW2pHMi(values, delta = 40)
+lines(values[1:length(values.t[,1]),1], CW2pHMi(values, delta = 40)[,2],
+      col = "black", lwd = 1.3)
+text(0.005,3000,"HM", cex = .8)
+
+values.t <- CW2pPMi(values, P = 1/10)
+lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2],
+      col = "blue", lwd = 1.3)
+text(0.5,6500,"PM", col = "blue", cex = .8)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+Delft University of Technology, The Netherlands\cr
+\cr R Luminescence Package Team}
+\references{
+Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
+signal components. Radiation Measurements, 47, 752-758.\cr
+
+\bold{Further Reading}\cr\cr Bulur, E., 1996. An Alternative Technique For
+Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements,
+26, 701-709.
+
+Bulur, E., 2000. A simple transformation for converting CW-OSL curves to
+LM-OSL curves. Radiation Measurements, 32, 141-145.
+}
+\seealso{
+\code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pHMi}},
+\code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}}
+}
+\keyword{manip}
+
diff --git a/man/ExampleData.BINfileData.Rd b/man/ExampleData.BINfileData.Rd
new file mode 100644
index 0000000..9960d9f
--- /dev/null
+++ b/man/ExampleData.BINfileData.Rd
@@ -0,0 +1,62 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.BINfileData}
+\alias{ExampleData.BINfileData}
+\title{Example data from a SAR OSL and SAR TL measurement for the package
+Luminescence}
+\format{\code{CWOSL.SAR.Data}: SAR OSL measurement data
+
+\code{TL.SAR.Data}: SAR TL measurement data
+
+Each class object contains two slots: (a) \code{METADATA} is a
+\link{data.frame} with all metadata stored in the BIN file of the
+measurements and (b) \code{DATA} contains a list of vectors of the measured
+data (usually count values).}
+\source{
+\bold{CWOSL.SAR.Data}
+
+\tabular{ll}{
+
+Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab BT607\cr
+Location: \tab Saxony/Germany\cr Material: \tab Middle grain quartz measured
+\cr \tab on aluminum cups on a Risoe TL/OSL DA-15 reader\cr Reference: \tab
+unpublished }
+
+\bold{TL.SAR.Data}
+
+\tabular{ll}{
+
+Lab: \tab Luminescence Laboratory of Cologne\cr Lab-Code: \tab LP1_5\cr
+Location: \tab Spain\cr Material: \tab Flint \cr Setup: \tab Risoe TL/OSL
+DA-20 reader \cr \tab (Filter: Semrock Brightline, \cr \tab HC475/50, N2,
+unpolished steel discs) \cr Reference: \tab unpublished \cr Remarks: \tab
+dataset limited to one position\cr }
+}
+\description{
+Example data from a SAR OSL and TL measurement for package Luminescence
+directly extracted from a Risoe BIN-file and provided in an object of type
+\link{Risoe.BINfileData-class}
+}
+\note{
+Please note that this example data cannot be exported to a BIN-file using the function
+\code{writeR2BIN} as it was generated and implemented in the package long time ago. In the meantime
+the BIN-file format changed.
+}
+\section{Version}{
+ 0.1
+}
+\examples{
+
+##show first 5 elements of the METADATA and DATA elements in the terminal
+data(ExampleData.BINfileData, envir = environment())
+CWOSL.SAR.Data at METADATA[1:5,]
+CWOSL.SAR.Data at DATA[1:5]
+
+}
+\references{
+\bold{CWOSL.SAR.Data}: unpublished data \cr
+
+\bold{TL.SAR.Data}: unpublished data
+}
+\keyword{datasets}
+
diff --git a/man/ExampleData.CW_OSL_Curve.Rd b/man/ExampleData.CW_OSL_Curve.Rd
new file mode 100644
index 0000000..ea1e008
--- /dev/null
+++ b/man/ExampleData.CW_OSL_Curve.Rd
@@ -0,0 +1,44 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\docType{data}
+\name{ExampleData.CW_OSL_Curve}
+\alias{ExampleData.CW_OSL_Curve}
+\title{Example CW-OSL curve data for the package Luminescence}
+\format{Data frame with 1000 observations on the following 2 variables:
+\describe{ \item{list("x")}{a numeric vector, time} \item{list("y")}{a
+numeric vector, counts} }}
+\source{
+\bold{ExampleData.CW_OSL_Curve}
+
+\tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab
+BT607\cr Location: \tab Saxony/Germany\cr Material: \tab Middle grain quartz
+measured on aluminum cups on a Risoe TL/OSL DA-15 reader.\cr Reference: \tab
+unpublished data }
+
+\bold{CW_Curve.BosWallinga2012}
+
+\tabular{ll}{ Lab: \tab Netherlands Centre for Luminescence Dating (NCL)\cr
+Lab-Code: \tab NCL-2108077\cr Location: \tab Guadalentin Basin, Spain\cr
+Material: \tab Coarse grain quartz\cr Reference: \tab Bos & Wallinga (2012)
+and Baartman et al. (2011) }
+}
+\description{
+\code{data.frame} containing CW-OSL curve data (time, counts)
+}
+\examples{
+
+data(ExampleData.CW_OSL_Curve, envir = environment())
+plot(ExampleData.CW_OSL_Curve)
+
+}
+\references{
+Baartman, J.E.M., Veldkamp, A., Schoorl, J.M., Wallinga, J.,
+Cammeraat, L.H., 2011. Unravelling Late Pleistocene and Holocene landscape
+dynamics: The Upper Guadalentin Basin, SE Spain. Geomorphology, 125,
+172-185.
+
+Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal
+components. Radiation Measurements, 47, 752-758.
+}
+\keyword{datasets}
+
diff --git a/man/ExampleData.DeValues.Rd b/man/ExampleData.DeValues.Rd
new file mode 100644
index 0000000..1740fad
--- /dev/null
+++ b/man/ExampleData.DeValues.Rd
@@ -0,0 +1,57 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.DeValues}
+\alias{ExampleData.DeValues}
+\title{Example De data sets for the package Luminescence}
+\format{A \code{\link{list}} with two elements, each containing a two column
+\code{\link{data.frame}}:
+
+\describe{ \code{$BT998}: De and De error values for a fine grain quartz
+sample from a loess section in Rottewitz.\cr\cr \code{$CA1}: Single grain De
+and De error values for a coarse grain quartz sample from a fluvial deposit
+in the rock shelter of Cueva Anton }}
+\description{
+Equivalent dose (De) values measured for a fine grain quartz sample from a
+loess section in Rottewitz (Saxony/Germany) and for a coarse grain quartz
+sample from a fluvial deposit in the rock shelter of Cueva Anton
+(Murcia/Spain).
+}
+\examples{
+
+##(1) plot values as histogram
+data(ExampleData.DeValues, envir = environment())
+plot_Histogram(ExampleData.DeValues$BT998, xlab = "De [s]")
+
+##(2) plot values as histogram (with second to gray conversion)
+data(ExampleData.DeValues, envir = environment())
+
+De.values <- Second2Gray(ExampleData.DeValues$BT998,
+                         dose.rate = c(0.0438, 0.0019))
+
+
+plot_Histogram(De.values, xlab = "De [Gy]")
+
+}
+\references{
+\bold{BT998} \cr\cr Unpublished data \cr\cr
+\bold{CA1} \cr\cr
+Burow, C., Kehl, M., Hilgers, A., Weniger, G.-C., Angelucci, D., Villaverde,
+V., Zapata, J. and Zilhao, J.  (2015). Luminescence dating of fluvial
+deposits in the rock shelter of Cueva Anton, Spain. Geochronometria 52, 107-125.
+
+\bold{BT998} \cr
+\tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr
+Lab-Code: \tab BT998\cr Location: \tab Rottewitz (Saxony/Germany)\cr
+Material: \tab Fine grain quartz measured on aluminum discs on a Risoe
+TL/OSL DA-15 reader\cr Units: \tab Values are given in seconds \cr Dose
+Rate: \tab Dose rate of the beta-source at measurement ca. 0.0438 Gy/s +/-
+0.0019 Gy/s\cr Measurement Date: \tab 2012-01-27 }
+\bold{CA1} \cr
+\tabular{ll}{ Lab: \tab Cologne Luminescence Laboratory (CLL)\cr Lab-Code:
+\tab C-L2941\cr Location: \tab Cueva Anton (Murcia/Spain)\cr Material: \tab
+Coarse grain quartz (200-250 microns) measured on single grain discs on a
+Risoe TL/OSL DA-20 reader\cr Units: \tab Values are given in Gray \cr
+Measurement Date: \tab 2012 }
+}
+\keyword{datasets}
+
diff --git a/man/ExampleData.FittingLM.Rd b/man/ExampleData.FittingLM.Rd
new file mode 100644
index 0000000..cf52a75
--- /dev/null
+++ b/man/ExampleData.FittingLM.Rd
@@ -0,0 +1,29 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.FittingLM}
+\alias{ExampleData.FittingLM}
+\title{Example data for fit_LMCurve() in the package Luminescence}
+\format{Two objects (data.frames) with two columns (time and counts).}
+\source{
+\tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab
+BT900\cr Location: \tab Norway\cr Material: \tab Beach deposit, coarse grain
+quartz measured on aluminum discs on a Risoe TL/OSL DA-15 reader\cr }
+}
+\description{
+Lineraly modulated (LM) measurement data from a quartz sample from Norway
+including background measurement. Measurements carried out in the
+luminescence laboratory at the University of Bayreuth.
+}
+\examples{
+
+##show LM data
+data(ExampleData.FittingLM, envir = environment())
+plot(values.curve,log="x")
+
+}
+\references{
+Fuchs, M., Kreutzer, S., Fischer, M., Sauer, D., Soerensen, R., 2012. OSL and IRSL
+dating of raised beach sand deposits along the southeastern coast of Norway.
+Quaternary Geochronology, 10, 195-200.
+}
+
diff --git a/man/ExampleData.LxTxData.Rd b/man/ExampleData.LxTxData.Rd
new file mode 100644
index 0000000..e1497fd
--- /dev/null
+++ b/man/ExampleData.LxTxData.Rd
@@ -0,0 +1,26 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.LxTxData}
+\alias{ExampleData.LxTxData}
+\title{Example Lx/Tx data from CW-OSL SAR measurement}
+\format{A \code{data.frame} with 4 columns (Dose, LxTx, LxTx.Error, TnTx).}
+\source{
+\tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab
+BT607\cr Location: \tab Ostrau (Saxony-Anhalt/Germany)\cr Material: \tab
+Middle grain (38-63 \eqn{\mu}m) quartz measured on a Risoe TL/OSL DA-15
+reader.\cr }
+}
+\description{
+LxTx data from a SAR measurement for the package Luminescence.
+}
+\examples{
+
+##plot Lx/Tx data vs dose [s]
+data(ExampleData.LxTxData, envir = environment())
+plot(LxTxData$Dose,LxTxData$LxTx)
+
+}
+\references{
+unpublished data
+}
+
diff --git a/man/ExampleData.LxTxOSLData.Rd b/man/ExampleData.LxTxOSLData.Rd
new file mode 100644
index 0000000..ae100a1
--- /dev/null
+++ b/man/ExampleData.LxTxOSLData.Rd
@@ -0,0 +1,26 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.LxTxOSLData}
+\alias{ExampleData.LxTxOSLData}
+\title{Example Lx and Tx curve data from an artificial OSL measurement}
+\format{Two \code{data.frames} containing time and count values.}
+\source{
+Arbitrary OSL measurement.
+}
+\description{
+Lx and Tx data of continous wave (CW-) OSL signal curves.
+}
+\examples{
+
+##load data
+data(ExampleData.LxTxOSLData, envir = environment())
+
+##plot data
+plot(Lx.data)
+plot(Tx.data)
+
+}
+\references{
+unpublished data
+}
+
diff --git a/man/ExampleData.RLum.Analysis.Rd b/man/ExampleData.RLum.Analysis.Rd
new file mode 100644
index 0000000..aa0097f
--- /dev/null
+++ b/man/ExampleData.RLum.Analysis.Rd
@@ -0,0 +1,48 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.RLum.Analysis}
+\alias{ExampleData.RLum.Analysis}
+\title{Example data as \code{\linkS4class{RLum.Analysis}} objects}
+\format{\code{IRSAR.RF.Data}: IRSAR.RF.Data on coarse grain feldspar
+
+Each object contains data needed for the given protocol analysis.}
+\source{
+\bold{IRSAR.RF.Data}
+
+These data were kindly provided by Tobias Lauer and Matthias Krbetschek.
+
+\tabular{ll}{
+
+Lab: \tab Luminescence Laboratory TU Bergakademie Freiberg\cr Lab-Code: \tab
+ZEU/SA1\cr Location: \tab Zeuchfeld (Zeuchfeld Sandur;
+Saxony-Anhalt/Germany)\cr Material: \tab K-feldspar (130-200 \eqn{\mu}m)\cr
+Reference: \tab Kreutzer et al. (2014)\cr
+
+}
+}
+\description{
+Collection of different \code{\linkS4class{RLum.Analysis}} objects for
+protocol analysis.
+}
+\section{Version}{
+ 0.1
+}
+\examples{
+
+##load data
+data(ExampleData.RLum.Analysis, envir = environment())
+
+##plot data
+plot_RLum(IRSAR.RF.Data)
+
+}
+\references{
+\bold{IRSAR.RF.Data}
+
+Kreutzer, S., Lauer, T., Meszner, S., Krbetschek, M.R., Faust, D., Fuchs,
+M., 2014. Chronology of the Quaternary profile Zeuchfeld in Saxony-Anhalt /
+Germany - a preliminary luminescence dating study. Zeitschrift fuer
+Geomorphologie 58, 5-26. doi: 10.1127/0372-8854/2012/S-00112
+}
+\keyword{datasets}
+
diff --git a/man/ExampleData.RLum.Data.Image.Rd b/man/ExampleData.RLum.Data.Image.Rd
new file mode 100644
index 0000000..1416e47
--- /dev/null
+++ b/man/ExampleData.RLum.Data.Image.Rd
@@ -0,0 +1,41 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.RLum.Data.Image}
+\alias{ExampleData.RLum.Data.Image}
+\title{Example data as \code{\linkS4class{RLum.Data.Image}} objects}
+\format{Object of class \code{\linkS4class{RLum.Data.Image}}}
+\source{
+\bold{ExampleData.RLum.Data.Image}
+
+These data were kindly provided by Regina DeWitt.
+
+\tabular{ll}{
+
+Lab.: \tab Department of Physics, East-Carolina University, NC, USA\cr
+Lab-Code: \tab -\cr Location: \tab - \cr Material: \tab - \cr Reference:
+\tab - \cr
+
+}
+
+Image data is a measurement of fluorescent ceiling lights with a cooled
+Princeton Instruments (TM) camera fitted on Risoe DA-20 TL/OSL reader.
+}
+\description{
+Measurement of Princton Instruments camera imported with the function
+\code{\link{read_SPE2R}} to R to produce an
+\code{\linkS4class{RLum.Data.Image}} object.
+}
+\section{Version}{
+ 0.1
+}
+\examples{
+
+##load data
+data(ExampleData.RLum.Data.Image, envir = environment())
+
+##plot data
+plot_RLum(ExampleData.RLum.Data.Image)
+
+}
+\keyword{datasets}
+
diff --git a/man/ExampleData.XSYG.Rd b/man/ExampleData.XSYG.Rd
new file mode 100644
index 0000000..675a617
--- /dev/null
+++ b/man/ExampleData.XSYG.Rd
@@ -0,0 +1,97 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.XSYG}
+\alias{ExampleData.XSYG}
+\title{Example data for a SAR OSL measurement and a TL spectrum using a lexsyg
+reader}
+\format{\code{OSL.SARMeasurement}: SAR OSL measurement data
+
+The data contain two elements: (a) \code{$Sequence.Header} is a
+\link{data.frame} with metadata from the measurement,(b)
+\code{Sequence.Object} contains an \code{\linkS4class{RLum.Analysis}} object
+for further analysis.\cr
+
+\code{TL.Spectrum}: TL spectrum data
+
+\code{\linkS4class{RLum.Data.Spectrum}} object for further analysis. The
+spectrum was cleaned from cosmic-rays using the function
+\code{apply_CosmicRayRemoval}. Note that no quantum efficiency calibration
+was performed.}
+\source{
+\bold{OSL.SARMeasurement}
+
+\tabular{ll}{
+
+Lab: \tab Luminescence Laboratory Giessen\cr Lab-Code: \tab no code\cr
+Location: \tab not specified\cr Material: \tab Coarse grain quartz \cr \tab
+on steel cups on lexsyg research reader\cr Reference: \tab unpublished }
+
+\bold{TL.Spectrum}
+
+\tabular{ll}{
+
+Lab: \tab Luminescence Laboratory Giessen\cr Lab-Code: \tab BT753\cr
+Location: \tab Dolni Vestonice/Czech Republic\cr Material: \tab Fine grain
+polymineral \cr \tab on steel cups on lexsyg rearch reader\cr Reference:
+\tab Fuchs et al., 2013 \cr Spectrum: \tab Integration time 19 s, channel
+time 20 s\cr Heating: \tab 1 K/s, up to 500 deg. C }
+}
+\description{
+Example data from a SAR OSL measurement and a TL spectrum for package
+Luminescence imported from a Freiberg Instruments XSYG file using the
+function \code{\link{read_XSYG2R}}.
+}
+\section{Version}{
+ 0.1
+}
+\examples{
+
+##show data
+data(ExampleData.XSYG, envir = environment())
+
+## =========================================
+##(1) OSL.SARMeasurement
+OSL.SARMeasurement
+
+##show $Sequence.Object
+OSL.SARMeasurement$Sequence.Object
+
+##grep OSL curves and plot the first curve
+OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object,
+recordType="OSL")[[1]]
+plot_RLum(OSLcurve)
+
+## =========================================
+##(2) TL.Spectrum
+TL.Spectrum
+
+##plot simple spectrum (2D)
+plot_RLum.Data.Spectrum(TL.Spectrum,
+                        plot.type="contour",
+                        xlim = c(310,750),
+                        ylim = c(0,300),
+                        bin.rows=10,
+                        bin.cols = 1)
+
+##plot 3d spectrum (uncomment for usage)
+# plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="persp",
+# xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+# bin.cols = 1)
+
+}
+\references{
+Unpublished data measured to serve as example data for that
+package. Location origin of sample BT753 is given here:
+
+Fuchs, M., Kreutzer, S., Rousseau, D.D., Antoine, P., Hatte, C., Lagroix,
+F., Moine, O., Gauthier, C., Svoboda, J., Lisa, L., 2013. The loess sequence
+of Dolni Vestonice, Czech Republic: A new OSL-based chronology of the Last
+Climatic Cycle. Boreas, 42, 664--677.
+}
+\seealso{
+\code{\link{read_XSYG2R}}, \code{\linkS4class{RLum.Analysis}},\cr
+\code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{plot_RLum}},\cr
+\code{\link{plot_RLum.Analysis}}, \code{\link{plot_RLum.Data.Spectrum}}
+}
+\keyword{datasets}
+
diff --git a/man/Luminescence-package.Rd b/man/Luminescence-package.Rd
new file mode 100644
index 0000000..5368a90
--- /dev/null
+++ b/man/Luminescence-package.Rd
@@ -0,0 +1,102 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\docType{package}
+\name{Luminescence-package}
+\alias{Luminescence}
+\alias{Luminescence-package}
+\title{Comprehensive Luminescence Dating Data Analysis}
+\description{
+A collection of various R functions for the purpose of Luminescence dating
+data analysis. This includes, amongst others, data import, export,
+application of age models, curve deconvolution, sequence analysis and
+plotting of equivalent dose distributions.
+}
+\details{
+\tabular{ll}{ Package: \tab Luminescence\cr Type: \tab Package\cr Version:
+\tab 0.6.4 \cr Date: \tab 2016-09-09 \cr License: \tab GPL-3\cr }
+}
+\author{
+\bold{Authors} (alphabetic order)
+
+\tabular{ll}{
+Christoph Burow \tab University of Cologne, Germany \cr
+Michael Dietze \tab GFZ Helmholtz Centre Potsdam, Germany \cr
+Julie Durcan \tab University of Oxford, United Kingdom \cr
+Manfred Fischer\tab University of Bayreuth, Germany \cr
+Margret C. Fuchs \tab Helmholtz-Zentrum Dresden-Rossendorf, Helmholtz-Institute Freiberg for Resource Technology,
+Freiberg, Germany \cr
+Johannes Friedrich \tab University of Bayreuth, Germany \cr
+Georgina King \tab University of Cologne, Germany \cr
+Sebastian Kreutzer \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+Norbert Mercier \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+Christoph Schmidt \tab University of Bayreuth, Germany \cr
+Rachel K. Smedley \tab Aberystwyth University, United Kingdom
+}
+
+\bold{Beta-Tester}
+
+Thomas Kolb, University of Bayreuth, Germany\cr
+
+\bold{Supervisor}
+
+Markus Fuchs, Justus-Liebig-University Giessen, Germany\cr
+
+\bold{Support contact}
+
+\email{developers at r-luminescence.de}\cr
+
+We may further encourage the usage of our support forum. For this please
+visit our project website (link below).
+
+\bold{Bug reporting}
+
+\email{bugtracker at r-luminescence.de} \cr
+
+\bold{Project website}
+
+\url{http://www.r-luminescence.de}\cr
+
+\bold{Project source code repository}\cr
+\url{https://github.com/R-Lum/Luminescence}\cr
+
+\bold{Related package projects}\cr
+\url{https://cran.r-project.org/package=RLumShiny}\cr
+\url{http://shiny.r-luminescence.de}\cr
+\url{https://cran.r-project.org/package=RLumModel}\cr
+\url{http://model.r-luminescence.de}\cr
+
+\bold{Package maintainer}
+
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne, Pessac,
+France, \cr \email{sebastian.kreutzer at u-bordeaux-montaigne.fr}
+
+\bold{Acknowledgement}
+
+Cooperation and personal exchange between the developers is gratefully
+funded by the DFG (SCHM 3051/3-1) in the framework of the program
+"Scientific Networks". Project title: "Lum.Network: Ein
+Wissenschaftsnetzwerk zur Analyse von Lumineszenzdaten mit R" (2014-2017)
+}
+\references{
+Dietze, M., Kreutzer, S., Fuchs, M.C., Burow, C., Fischer, M.,
+Schmidt, C., 2013. A practical guide to the R package Luminescence.
+Ancient TL, 31, 11-18.
+
+Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2016. The abanico plot:
+visualising chronometric data with individual standard errors. Quaternary Geochronology 31, 1-7.
+http://dx.doi.org/10.1016/j.quageo.2015.09.003
+
+Fuchs, M.C., Kreutzer, S., Burow, C., Dietze, M., Fischer, M., Schmidt, C.,
+Fuchs, M., 2015. Data processing in luminescence dating analysis: An
+exemplary workflow using the R package 'Luminescence'. Quaternary
+International, 362,8-13. http://dx.doi.org/10.1016/j.quaint.2014.06.034
+
+Kreutzer, S., Schmidt, C., Fuchs, M.C., Dietze, M., Fischer, M., Fuchs, M.,
+2012. Introducing an R package for luminescence dating analysis. Ancient TL,
+30, 1-8.
+
+Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model.
+Ancient TL 33, 16-21.
+}
+\keyword{package}
+
diff --git a/man/RLum-class.Rd b/man/RLum-class.Rd
new file mode 100644
index 0000000..5e9d679
--- /dev/null
+++ b/man/RLum-class.Rd
@@ -0,0 +1,62 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum-class.R
+\docType{class}
+\name{RLum-class}
+\alias{RLum-class}
+\alias{replicate_RLum,RLum-method}
+\title{Class \code{"RLum"}}
+\usage{
+\S4method{replicate_RLum}{RLum}(object, times = NULL)
+}
+\arguments{
+\item{object}{an object of class \code{\linkS4class{RLum}} (\bold{required})}
+
+\item{times}{\code{\link{integer}} (optional): number for times each element is repeated
+element}
+}
+\description{
+Abstract class for data in the package Luminescence
+}
+\section{Methods (by generic)}{
+\itemize{
+\item \code{replicate_RLum}: Replication method RLum-objects
+}}
+\section{Slots}{
+
+\describe{
+\item{\code{originator}}{Object of class \code{\link{character}} containing the name of the producing
+function for the object. Set automatically by using the function \code{\link{set_RLum}}.}
+
+\item{\code{info}}{Object of class \code{\link{list}} for additional information on the object itself}
+
+\item{\code{.uid}}{Object of class \code{\link{character}} for a unique object identifier. This id is
+usually calculated using the internal function \code{.create_UID()} if the funtion \code{\link{set_RLum}}
+is called.}
+
+\item{\code{.pid}}{Object of class \code{\link{character}} for a parent id. This allows nesting RLum-objects
+at will. The parent id can be the uid of another object.}
+}}
+\note{
+\code{RLum} is a virtual class.
+}
+\section{Objects from the Class}{
+ A virtual Class: No objects can be created
+from it.
+}
+
+\section{Class version}{
+ 0.4.0
+}
+\examples{
+
+showClass("RLum")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+}
+\seealso{
+\code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Analysis}}
+}
+\keyword{classes}
+
diff --git a/man/RLum.Analysis-class.Rd b/man/RLum.Analysis-class.Rd
new file mode 100644
index 0000000..99e8c5d
--- /dev/null
+++ b/man/RLum.Analysis-class.Rd
@@ -0,0 +1,184 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum.Analysis-class.R
+\docType{class}
+\name{RLum.Analysis-class}
+\alias{RLum.Analysis-class}
+\alias{get_RLum,RLum.Analysis-method}
+\alias{length_RLum,RLum.Analysis-method}
+\alias{names_RLum,RLum.Analysis-method}
+\alias{set_RLum,RLum.Analysis-method}
+\alias{show,RLum.Analysis-method}
+\alias{structure_RLum,RLum.Analysis-method}
+\title{Class \code{"RLum.Analysis"}}
+\usage{
+\S4method{show}{RLum.Analysis}(object)
+
+\S4method{set_RLum}{RLum.Analysis}(class, originator, .uid, .pid,
+  protocol = NA_character_, records = list(), info = list())
+
+\S4method{get_RLum}{RLum.Analysis}(object, record.id = NULL,
+  recordType = NULL, curveType = NULL, RLum.type = NULL,
+  protocol = "UNKNOWN", get.index = NULL, drop = TRUE, recursive = TRUE,
+  info.object = NULL)
+
+\S4method{structure_RLum}{RLum.Analysis}(object, fullExtent = FALSE)
+
+\S4method{length_RLum}{RLum.Analysis}(object)
+
+\S4method{names_RLum}{RLum.Analysis}(object)
+}
+\arguments{
+\item{object}{\code{[show_RLum]}\code{[get_RLum]}\code{[names_RLum]}\code{[length_RLum]}
+\code{[structure_RLum]}] an object of class \code{\linkS4class{RLum.Analysis}}
+(\bold{required})}
+
+\item{class}{[\code{set_RLum}] \code{\link{character}} (\bold{required}): name of the \code{RLum} class to be created}
+
+\item{originator}{[\code{set_RLum}] \code{\link{character}} (automatic): contains the name
+of the calling function (the function that produces this object); can be set manually.}
+
+\item{.uid}{[\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+using the internal C++ function \code{.create_UID}.}
+
+\item{.pid}{[\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+at will.}
+
+\item{protocol}{[\code{set_RLum}] \code{\link{character}} (optional): sets protocol type for
+analysis object. Value may be used by subsequent analysis functions.}
+
+\item{records}{[\code{set_RLum}] \code{\link{list}} (\bold{required}): list of \code{\linkS4class{RLum.Analysis}} objects}
+
+\item{info}{[\code{set_RLum}] \code{\link{list}} (optional): a list containing additional
+info data for the object
+
+\bold{\code{set_RLum}}:\cr
+
+Returns an \code{\linkS4class{RLum.Analysis}} object.}
+
+\item{record.id}{[\code{get_RLum}] \code{\link{numeric}} or \code{\link{logical}} (optional): IDs of specific records.
+If of type \code{logical} the entire id range is assuemd and \code{TRUE} and \code{FALSE} indicates the selection.}
+
+\item{recordType}{[\code{get_RLum}] \code{\link{character}} (optional): record type (e.g., "OSL").
+Can be also a vector, for multiple matching, e.g., \code{recordType = c("OSL", "IRSL")}}
+
+\item{curveType}{[\code{get_RLum}] \code{\link{character}} (optional): curve
+type (e.g. "predefined" or "measured")}
+
+\item{RLum.type}{[\code{get_RLum}] \code{\link{character}} (optional): RLum object type.
+Defaults to "RLum.Data.Curve" and "RLum.Data.Spectrum".}
+
+\item{get.index}{[\code{get_RLum}] \code{\link{logical}} (optional): return a numeric
+vector with the index of each element in the RLum.Analysis object.}
+
+\item{drop}{[\code{get_RLum}] \code{\link{logical}} (with default): coerce to the next possible layer
+(which are \code{RLum.Data}-objects), \code{drop = FALSE} keeps the original \code{RLum.Analysis}}
+
+\item{recursive}{[\code{get_RLum}] \code{\link{logical}} (with default): if \code{TRUE} (the default)
+and the result of the 'get_RLum' request is a single object this object will be unlisted, means
+only the object itself and no list containing exactly one object is returned. Mostly this makes things
+easier, however, if this method is used within a loop this might undesired.}
+
+\item{info.object}{[\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
+element}
+
+\item{fullExtent}{[structure_RLum] \code{\link{logical}} (with default): extents the returned \code{data.frame}
+to its full extent, i.e. all info elements are part of the return as well. The default valule
+is \code{FALSE} as the data frame might become rather big.}
+}
+\value{
+\bold{\code{get_RLum}}:\cr
+
+Returns: \cr
+(1) \code{\link{list}} of \code{\linkS4class{RLum.Data}} objects or \cr
+(2) Single \code{\linkS4class{RLum.Data}} object, if only one object is contained and
+\code{recursive = FALSE} or\cr
+(3) \code{\linkS4class{RLum.Analysis}} ojects for \code{drop = FALSE} \cr
+
+\bold{\code{structure_RLum}}:\cr
+
+Returns \code{\linkS4class{data.frame}} showing the structure.
+
+\bold{\code{length_RLum}}\cr
+
+Returns the number records in this object.
+
+\bold{\code{names_RLum}}\cr
+
+Returns the names of the record types (recordType) in this object.
+}
+\description{
+Object class to represent analysis data for protocol analysis, i.e. all curves, spectra etc.
+from one measurements. Objects from this class are produced, by e.g. \code{\link{read_XSYG2R}},
+\code{\link{read_Daybreak2R}}
+}
+\section{Methods (by generic)}{
+\itemize{
+\item \code{show}: Show structure of \code{RLum.Analysis} object
+
+\item \code{set_RLum}: Construction method for \code{\linkS4class{RLum.Analysis}} objects.
+
+\item \code{get_RLum}: Accessor method for RLum.Analysis object.
+
+The slots record.id, recordType, curveType and RLum.type are optional to allow for records
+limited by their id (list index number), their record type (e.g. recordType = "OSL")
+or object type.
+
+Example: curve type (e.g. curveType = "predefined" or curveType ="measured")
+
+The selection of a specific RLum.type object superimposes the default selection.
+Currently supported objects are: RLum.Data.Curve and RLum.Data.Spectrum
+
+\item \code{structure_RLum}: Method to show the structure of an \code{\linkS4class{RLum.Analysis}} object.
+
+\item \code{length_RLum}: Returns the length of the object, i.e., number of stored records.
+
+\item \code{names_RLum}: Returns the names of the \code{\linkS4class{RLum.Data}} objects objects (same as shown with the show method)
+}}
+\section{Slots}{
+
+\describe{
+\item{\code{protocol}}{Object of class \code{\link{character}} describing the applied measurement protocol}
+
+\item{\code{records}}{Object of class \code{\link{list}} containing objects of class \code{\linkS4class{RLum.Data}}}
+}}
+\note{
+The method \code{\link{structure_RLum}} is currently just avaiblable for objects
+containing \code{\linkS4class{RLum.Data.Curve}}.
+}
+\section{Objects from the Class}{
+ Objects can be created by calls of the form
+\code{set_RLum("RLum.Analysis", ...)}.
+}
+
+\section{Class version}{
+ 0.4.6
+}
+\examples{
+
+showClass("RLum.Analysis")
+
+##set empty object
+set_RLum(class = "RLum.Analysis")
+
+###use example data
+##load data
+data(ExampleData.RLum.Analysis, envir = environment())
+
+##show curves in object
+get_RLum(IRSAR.RF.Data)
+
+##show only the first object, but by keeping the object
+get_RLum(IRSAR.RF.Data, record.id = 1, drop = FALSE)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+}
+\seealso{
+\code{\link{Risoe.BINfileData2RLum.Analysis}},
+\code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum}}
+}
+\keyword{classes}
+\keyword{methods}
+
diff --git a/man/RLum.Data-class.Rd b/man/RLum.Data-class.Rd
new file mode 100644
index 0000000..9380293
--- /dev/null
+++ b/man/RLum.Data-class.Rd
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum.Data-class.R
+\docType{class}
+\name{RLum.Data-class}
+\alias{RLum.Data-class}
+\title{Class \code{"RLum.Data"}}
+\description{
+Generalized virtual data class for luminescence data.
+}
+\note{
+Just a virtual class.
+}
+\section{Objects from the Class}{
+ A virtual Class: No objects can be created
+from it.
+}
+
+\section{Class version}{
+ 0.2.1
+}
+\examples{
+
+showClass("RLum.Data")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+}
+\seealso{
+\code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Spectrum}}
+}
+\keyword{classes}
+
diff --git a/man/RLum.Data.Curve-class.Rd b/man/RLum.Data.Curve-class.Rd
new file mode 100644
index 0000000..5f3b2fc
--- /dev/null
+++ b/man/RLum.Data.Curve-class.Rd
@@ -0,0 +1,147 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum.Data.Curve-class.R
+\docType{class}
+\name{RLum.Data.Curve-class}
+\alias{RLum.Data.Curve-class}
+\alias{bin_RLum.Data,RLum.Data.Curve-method}
+\alias{get_RLum,RLum.Data.Curve-method}
+\alias{length_RLum,RLum.Data.Curve-method}
+\alias{names_RLum,RLum.Data.Curve-method}
+\alias{set_RLum,RLum.Data.Curve-method}
+\alias{show,RLum.Data.Curve-method}
+\title{Class \code{"RLum.Data.Curve"}}
+\usage{
+\S4method{show}{RLum.Data.Curve}(object)
+
+\S4method{set_RLum}{RLum.Data.Curve}(class, originator, .uid, .pid,
+  recordType = NA_character_, curveType = NA_character_, data = matrix(0,
+  ncol = 2), info = list())
+
+\S4method{get_RLum}{RLum.Data.Curve}(object, info.object = NULL)
+
+\S4method{length_RLum}{RLum.Data.Curve}(object)
+
+\S4method{names_RLum}{RLum.Data.Curve}(object)
+
+\S4method{bin_RLum.Data}{RLum.Data.Curve}(object, bin_size = 2)
+}
+\arguments{
+\item{object}{[\code{show_RLum}][\code{get_RLum}][\code{length_RLum}][\code{names_RLum}] an object of
+class \code{\linkS4class{RLum.Data.Curve}} (\bold{required})}
+
+\item{class}{[\code{set_RLum}] \code{\link{character}} (\bold{required}): name of the \code{RLum} class to create}
+
+\item{originator}{[\code{set_RLum}] \code{\link{character}} (automatic): contains the name of the calling function
+(the function that produces this object); can be set manually.}
+
+\item{.uid}{[\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+using the internal C++ function \code{.create_UID}.}
+
+\item{.pid}{[\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+at will.}
+
+\item{recordType}{[\code{set_RLum}] \code{\link{character}} (optional): record type (e.g., "OSL")}
+
+\item{curveType}{[\code{set_RLum}] \code{\link{character}} (optional): curve type (e.g., "predefined" or "measured")}
+
+\item{data}{[\code{set_RLum}] \code{\link{matrix}} (\bold{required}): raw curve data.
+If \code{data} itself is a \code{RLum.Data.Curve}-object this can be used to re-construct the object
+(s. Details)}
+
+\item{info}{[\code{set_RLum}] \code{\link{list}} (optional): info elements}
+
+\item{info.object}{[\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
+element}
+
+\item{bin_size}{[\code{bin_RLum}] \code{\link{integer}} (with default): set number of channels
+used for each bin, e.g. \code{bin_size = 2} means that two channels are binned.}
+}
+\value{
+\bold{\code{set_RLum}}\cr
+
+Returns an \code{\linkS4class{RLum.Data.Curve}} object.
+
+\bold{\code{get_RLum}}\cr
+
+(1) A \code{\link{matrix}} with the curve values or \cr
+(2) only the info object if \code{info.object} was set.\cr
+
+\bold{\code{length_RLum}}\cr
+
+Number of channels in the curve (row number of the matrix)
+
+\bold{\code{names_RLum}}\cr
+
+Names of the info elements (slot \code{info})
+
+\bold{\code{bin_RLum.Data}}\cr
+
+Same object as input, after applying the binning.
+}
+\description{
+Class for representing luminescence curve data.
+}
+\section{Methods (by generic)}{
+\itemize{
+\item \code{show}: Show structure of \code{RLum.Data.Curve} object
+
+\item \code{set_RLum}: Construction method for RLum.Data.Curve object. The slot info is optional
+and predefined as empty list by default.
+
+\item \code{get_RLum}: Accessor method for RLum.Data.Curve object. The argument info.object is
+optional to directly access the info elements. If no info element name is
+provided, the raw curve data (matrix) will be returned.
+
+\item \code{length_RLum}: Returns the length of the curve object, which is the maximum of the
+value time/temperature of the curve (corresponding to the stimulation length)
+
+\item \code{names_RLum}: Returns the names info elements coming along with this curve object
+
+\item \code{bin_RLum.Data}: Allows binning of specific objects
+}}
+\section{Slots}{
+
+\describe{
+\item{\code{recordType}}{Object of class "character" containing the type of the curve (e.g. "TL" or "OSL")}
+
+\item{\code{curveType}}{Object of class "character" containing curve type, allowed values are measured or predefined}
+
+\item{\code{data}}{Object of class \code{\link{matrix}} containing curve x and y data.
+'data' can also be of type \code{RLum.Data.Curve} to change object values without deconstructing the object.
+For example: \code{set_RLum(class = 'RLum.Data.Curve',
+data = Your.RLum.Data.Curve, recordType = 'never seen before')}
+would just change the recordType. Missing arguements  the value is taken from the input object
+in 'data' (which is already an RLum.Data.Curve object in this example)}
+}}
+\note{
+The class should only contain data for a single curve. For additional
+elements the slot \code{info} can be used (e.g. providing additional heating
+ramp curve). Objects from the class \code{RLum.Data.Curve} are produced by other
+functions (partyl within \code{\linkS4class{RLum.Analysis}} objects),
+namely: \code{\link{Risoe.BINfileData2RLum.Analysis}}, \code{\link{read_XSYG2R}}
+}
+\section{Create objects from this Class}{
+ Objects can be created by calls of the form
+\code{set_RLum(class = "RLum.Data.Curve", ...)}.
+}
+
+\section{Class version}{
+ 0.4.1
+}
+\examples{
+
+showClass("RLum.Data.Curve")
+
+##set empty curve object
+set_RLum(class = "RLum.Data.Curve")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+}
+\seealso{
+\code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
+\code{\link{plot_RLum}}, \code{\link{merge_RLum}}
+}
+\keyword{classes}
+
diff --git a/man/RLum.Data.Image-class.Rd b/man/RLum.Data.Image-class.Rd
new file mode 100644
index 0000000..0e0c383
--- /dev/null
+++ b/man/RLum.Data.Image-class.Rd
@@ -0,0 +1,120 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum.Data.Image-class.R
+\docType{class}
+\name{RLum.Data.Image-class}
+\alias{RLum.Data.Image-class}
+\alias{get_RLum,RLum.Data.Image-method}
+\alias{names_RLum,RLum.Data.Image-method}
+\alias{set_RLum,RLum.Data.Image-method}
+\alias{show,RLum.Data.Image-method}
+\title{Class \code{"RLum.Data.Image"}}
+\usage{
+\S4method{show}{RLum.Data.Image}(object)
+
+\S4method{set_RLum}{RLum.Data.Image}(class, originator, .uid, .pid,
+  recordType = "Image", curveType = NA_character_,
+  data = raster::brick(raster::raster(matrix())), info = list())
+
+\S4method{get_RLum}{RLum.Data.Image}(object, info.object)
+
+\S4method{names_RLum}{RLum.Data.Image}(object)
+}
+\arguments{
+\item{object}{\code{[show_RLum]}\code{[get_RLum]}\code{[names_RLum]} an object
+of class \code{\linkS4class{RLum.Data.Image}}}
+
+\item{class}{\code{[set_RLum]}\code{\link{character}}: name of the \code{RLum} class to create}
+
+\item{originator}{\code{[set_RLum]} \code{\link{character}} (automatic):
+contains the name of the calling function (the function that produces this object); can be set manually.}
+
+\item{.uid}{[\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+using the internal C++ function \code{.create_UID}.}
+
+\item{.pid}{[\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+at will.}
+
+\item{recordType}{\code{[set_RLum]} \code{\link{character}}: record type (e.g. "OSL")}
+
+\item{curveType}{\code{[set_RLum]} \code{\link{character}}: curve type (e.g. "predefined" or "measured")}
+
+\item{data}{\code{[set_RLum]} \code{\link{matrix}}: raw curve data. If data is of type \code{RLum.Data.Image}
+this can be used to re-construct the object.}
+
+\item{info}{\code{[set_RLum]} \code{\link{list}}: info elements}
+
+\item{info.object}{\code{[get_RLum]} \code{\link{character}} name of the info object to returned}
+}
+\value{
+\bold{\code{set_RLum}}\cr
+
+Returns an object from class \code{RLum.Data.Image}
+
+\bold{\code{get_RLum}}\cr
+
+(1) Returns the data object (\code{\link[raster]{brick}})\cr
+(2) only the info object if \code{info.object} was set.\cr
+
+\bold{\code{names_RLum}}\cr
+
+Returns the names of the info elements
+}
+\description{
+Class for representing luminescence image data (TL/OSL/RF). Such data are for example produced
+by the function \code{\link{read_SPE2R}}
+}
+\section{Methods (by generic)}{
+\itemize{
+\item \code{show}: Show structure of \code{RLum.Data.Image} object
+
+\item \code{set_RLum}: Construction method for RLum.Data.Image object. The slot info is optional
+and predefined as empty list by default..
+
+\item \code{get_RLum}: Accessor method for RLum.Data.Image object. The argument info.object is
+optional to directly access the info elements. If no info element name is
+provided, the raw image data (RasterBrick) will be returned.
+
+\item \code{names_RLum}: Returns the names info elements coming along with this curve object
+}}
+\section{Slots}{
+
+\describe{
+\item{\code{recordType}}{Object of class \code{\link{character}}
+containing the type of the curve (e.g. "OSL image", "TL image")}
+
+\item{\code{curveType}}{Object of class \code{\link{character}} containing curve type, allowed values
+are measured or predefined}
+
+\item{\code{data}}{Object of class \code{\link[raster]{brick}} containing images (raster data).}
+
+\item{\code{info}}{Object of class \code{\link{list}} containing further meta information objects}
+}}
+\note{
+The class should only contain data for a set of images. For additional
+elements the slot \code{info} can be used.
+}
+\section{Objects from the Class}{
+ Objects can be created by calls of the form
+\code{set_RLum("RLum.Data.Image", ...)}.
+}
+
+\section{Class version}{
+ 0.4.0
+}
+\examples{
+
+showClass("RLum.Data.Image")
+
+##create empty RLum.Data.Image object
+set_RLum(class = "RLum.Data.Image")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+}
+\seealso{
+\code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
+\code{\link{plot_RLum}}, \code{\link{read_SPE2R}}
+}
+\keyword{classes}
+
diff --git a/man/RLum.Data.Spectrum-class.Rd b/man/RLum.Data.Spectrum-class.Rd
new file mode 100644
index 0000000..3c930af
--- /dev/null
+++ b/man/RLum.Data.Spectrum-class.Rd
@@ -0,0 +1,129 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum.Data.Spectrum-class.R
+\docType{class}
+\name{RLum.Data.Spectrum-class}
+\alias{RLum.Data.Spectrum-class}
+\alias{get_RLum,RLum.Data.Spectrum-method}
+\alias{names_RLum,RLum.Data.Spectrum-method}
+\alias{set_RLum,RLum.Data.Spectrum-method}
+\alias{show,RLum.Data.Spectrum-method}
+\title{Class \code{"RLum.Data.Spectrum"}}
+\usage{
+\S4method{show}{RLum.Data.Spectrum}(object)
+
+\S4method{set_RLum}{RLum.Data.Spectrum}(class, originator, .uid, .pid,
+  recordType = "Spectrum", curveType = NA_character_, data = matrix(),
+  info = list())
+
+\S4method{get_RLum}{RLum.Data.Spectrum}(object, info.object)
+
+\S4method{names_RLum}{RLum.Data.Spectrum}(object)
+}
+\arguments{
+\item{object}{[\code{show_RLum}][\code{get_RLum}][\code{names_RLum}] an object of
+class \code{\linkS4class{RLum.Data.Spectrum}}}
+
+\item{class}{[\code{set_RLum}] \code{\link{character}} (automatic): name of the \code{RLum} class to create.}
+
+\item{originator}{\code{\link{character}} (automatic): contains the name of the calling function
+(the function that produces this object); can be set manually.}
+
+\item{.uid}{[\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+using the internal C++ function \code{.create_UID}.}
+
+\item{.pid}{[\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+at will.}
+
+\item{recordType}{[\code{set_RLum}] \code{\link{character}}: record type (e.g. "OSL")}
+
+\item{curveType}{[\code{set_RLum}] \code{\link{character}}: curve type (e.g. "predefined" or "measured")}
+
+\item{data}{[\code{set_RLum}] \code{\link{matrix}}: raw curve data. If data is of
+type \code{RLum.Data.Spectrum}, this can be used to re-construct the object.}
+
+\item{info}{[\code{set_RLum}] \code{\link{list}}: info elements}
+
+\item{info.object}{[\code{get_RLum}] \code{\link{character}} (optional): the name of the info
+object to be called}
+}
+\value{
+\bold{\code{[set_RLum]}}\cr
+
+An object from the class \code{RLum.Data.Spectrum}
+
+\bold{\code{get_RLum}}\cr
+
+(1) A \code{\link{matrix}} with the spectrum values or \cr
+(2) only the info object if \code{info.object} was set.\cr
+
+\bold{\code{names_RLum}}\cr
+
+The names of the info objects
+}
+\description{
+Class for representing luminescence spectra data (TL/OSL/RF).
+}
+\section{Methods (by generic)}{
+\itemize{
+\item \code{show}: Show structure of \code{RLum.Data.Spectrum} object
+
+\item \code{set_RLum}: Construction method for RLum.Data.Spectrum object. The slot info is optional
+and predefined as empty list by default
+
+\item \code{get_RLum}: Accessor method for RLum.Data.Spectrum object. The argument info.object
+is optional to directly access the info elements. If no info element name
+is provided, the raw curve data (matrix) will be returned
+
+\item \code{names_RLum}: Returns the names info elements coming along with this curve object
+}}
+\section{Slots}{
+
+\describe{
+\item{\code{recordType}}{Object of class \code{\link{character}} containing the type of the curve (e.g. "TL" or "OSL")}
+
+\item{\code{curveType}}{Object of class \code{\link{character}} containing curve type, allowed values
+are measured or predefined}
+
+\item{\code{data}}{Object of class \code{\link{matrix}} containing spectrum (count) values.
+Row labels indicate wavelength/pixel values, column labels are temperature or time values.}
+
+\item{\code{info}}{Object of class \code{\link{list}} containing further meta information objects}
+}}
+\note{
+The class should only contain data for a single spectra data set. For
+additional elements the slot \code{info} can be used. Objects from this class are automatically
+created by, e.g., \code{\link{read_XSYG2R}}
+}
+\section{Objects from the Class}{
+ Objects can be created by calls of the form
+\code{set_RLum("RLum.Data.Spectrum", ...)}.
+}
+
+\section{Class version}{
+ 0.4.0
+}
+\examples{
+
+showClass("RLum.Data.Spectrum")
+
+##show example data
+data(ExampleData.XSYG, envir = environment())
+TL.Spectrum
+
+##show data matrix
+get_RLum(TL.Spectrum)
+
+##plot spectrum
+\dontrun{
+plot_RLum(TL.Spectrum)
+}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+}
+\seealso{
+\code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
+\code{\link{plot_RLum}}
+}
+\keyword{classes}
+
diff --git a/man/RLum.Results-class.Rd b/man/RLum.Results-class.Rd
new file mode 100644
index 0000000..3d9904c
--- /dev/null
+++ b/man/RLum.Results-class.Rd
@@ -0,0 +1,153 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum.Results-class.R
+\docType{class}
+\name{RLum.Results-class}
+\alias{RLum.Results-class}
+\alias{get_RLum,RLum.Results-method}
+\alias{length_RLum,RLum.Results-method}
+\alias{names_RLum,RLum.Results-method}
+\alias{set_RLum,RLum.Results-method}
+\alias{show,RLum.Results-method}
+\title{Class \code{"RLum.Results"}}
+\usage{
+\S4method{show}{RLum.Results}(object)
+
+\S4method{set_RLum}{RLum.Results}(class, originator, .uid, .pid,
+  data = list(), info = list())
+
+\S4method{get_RLum}{RLum.Results}(object, data.object, info.object = NULL,
+  drop = TRUE)
+
+\S4method{length_RLum}{RLum.Results}(object)
+
+\S4method{names_RLum}{RLum.Results}(object)
+}
+\arguments{
+\item{object}{[\code{get_RLum}] \code{\linkS4class{RLum.Results}} (required): an object of class
+\code{\linkS4class{RLum.Results}} to be evaluated}
+
+\item{class}{[\code{set_RLum}] \code{\link{character}} \bold{(required)}: name of the \code{RLum} class to create}
+
+\item{originator}{[\code{set_RLum}] \code{\link{character}} (automatic): contains the
+name of the calling function
+(the function that produces this object); can be set manually.}
+
+\item{.uid}{[\code{set_RLum}] \code{\link{character}} (automatic): sets an unique ID for this object
+using the internal C++ function \code{.create_UID}.}
+
+\item{.pid}{[\code{set_RLum}] \code{\link{character}} (with default): option to provide a parent id for nesting
+at will.}
+
+\item{data}{[\code{set_RLum}] \code{\link{list}} (optional): a list containing the data to
+be stored in the object}
+
+\item{info}{[\code{set_RLum}] \code{\link{list}} (optional): a list containing additional
+info data for the object}
+
+\item{data.object}{[\code{get_RLum}] \code{\link{character}} or
+\code{\link{numeric}}: name or index of the data slot to be returned}
+
+\item{info.object}{[\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
+element}
+
+\item{drop}{[\code{get_RLum}] \code{\link{logical}} (with default): coerce to the next possible layer
+(which are data objects, \code{drop = FALSE} keeps the original \code{RLum.Results}}
+}
+\value{
+\bold{\code{set_RLum}}:\cr
+
+Returns an object from the class \code{\linkS4class{RLum.Results}}\cr
+
+\bold{\code{get_RLum}}:\cr
+
+Returns: \cr
+(1) Data object from the specified slot \cr
+(2) \code{\link{list}} of data objects from the slots if 'data.object' is vector or \cr
+(3) an \code{\linkS4class{RLum.Results}} for \code{drop = FALSE}.\cr
+
+\bold{\code{length_RLum}}\cr
+
+Returns the number of data elements in the \code{RLum.Results} object.
+
+\bold{\code{names_RLum}}\cr
+
+Returns the names of the data elements in the object.
+}
+\description{
+Object class contains results data from functions (e.g., \code{\link{analyse_SAR.CWOSL}}).
+}
+\section{Methods (by generic)}{
+\itemize{
+\item \code{show}: Show structure of \code{RLum.Results} object
+
+\item \code{set_RLum}: Construction method for an RLum.Results object.
+
+\item \code{get_RLum}: Accessor method for RLum.Results object. The argument data.object allows
+directly accessing objects delivered within the slot data. The default
+return object depends on the object originator (e.g., \code{fit_LMCurve}).
+If nothing is specified always the first \code{data.object} will be returned.
+
+Note: Detailed specification should be made in combination with the originator slot in the
+receiving function if results are pipped.
+
+\item \code{length_RLum}: Returns the length of the object, i.e., number of stored data.objects
+
+\item \code{names_RLum}: Returns the names data.objects
+}}
+\section{Slots}{
+
+\describe{
+\item{\code{data}}{Object of class "list" containing output data}
+}}
+\note{
+The class is intended to store results from functions to be used by
+other functions. The data in the object should always be accessed by the
+method \code{get_RLum}.
+}
+\section{Objects from the Class}{
+ Objects can be created by calls of the form
+\code{new("RLum.Results", ...)}.
+}
+
+\section{Class version}{
+ 0.5.1
+}
+\examples{
+
+showClass("RLum.Results")
+
+##create an empty object from this class
+set_RLum(class = "RLum.Results")
+
+##use another function to show how it works
+
+##Basic calculation of the dose rate for a specific date
+ dose.rate <-  calc_SourceDoseRate(
+   measurement.date = "2012-01-27",
+   calib.date = "2014-12-19",
+   calib.dose.rate = 0.0438,
+   calib.error = 0.0019)
+
+##show object
+dose.rate
+
+##get results
+get_RLum(dose.rate)
+
+##get parameters used for the calcualtion from the same object
+get_RLum(dose.rate, data.object = "parameters")
+
+##alternatively objects can be accessed using S3 generics, such as
+dose.rate$parameters
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+}
+\seealso{
+\code{\linkS4class{RLum}}, \code{\link{plot_RLum}}, \code{\link{merge_RLum}}
+}
+\keyword{classes}
+\keyword{methods}
+
diff --git a/man/Risoe.BINfileData-class.Rd b/man/Risoe.BINfileData-class.Rd
new file mode 100644
index 0000000..4a8eadc
--- /dev/null
+++ b/man/Risoe.BINfileData-class.Rd
@@ -0,0 +1,204 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RisoeBINfileData-class.R
+\docType{class}
+\name{Risoe.BINfileData-class}
+\alias{Risoe.BINfileData-class}
+\alias{get_Risoe.BINfileData,Risoe.BINfileData-method}
+\alias{set_Risoe.BINfileData,data.frame,list-method}
+\alias{show,Risoe.BINfileData-method}
+\title{Class \code{"Risoe.BINfileData"}}
+\usage{
+\S4method{show}{Risoe.BINfileData}(object)
+
+\S4method{set_Risoe.BINfileData}{data.frame,list}(METADATA, DATA, .RESERVED)
+
+\S4method{get_Risoe.BINfileData}{Risoe.BINfileData}(object, ...)
+}
+\arguments{
+\item{object}{an object of class \code{\linkS4class{Risoe.BINfileData}}}
+
+\item{METADATA}{Object of class "data.frame" containing the meta information
+for each curve.}
+
+\item{DATA}{Object of class "list" containing numeric vector with count data.}
+
+\item{.RESERVED}{Object of class "list" containing list of undocumented raw
+values for internal use only.}
+
+\item{...}{other arguments that might be passed}
+}
+\description{
+S4 class object for luminescence data in R. The object is produced as output
+of the function \code{\link{read_BIN2R}}.
+}
+\section{Methods (by generic)}{
+\itemize{
+\item \code{show}: Show structure of RLum and Risoe.BINfile class objects
+
+\item \code{set_Risoe.BINfileData}: The Risoe.BINfileData is normally produced as output of the function read_BIN2R.
+This construction method is intended for internal usage only.
+
+\item \code{get_Risoe.BINfileData}: Formal get-method for Risoe.BINfileData object. It does not allow accessing
+the object directly, it is just showing a terminal message.
+}}
+\section{Slots}{
+
+\describe{
+\item{\code{METADATA}}{Object of class "data.frame" containing the meta information for each curve.}
+
+\item{\code{DATA}}{Object of class "list" containing numeric vector with count data.}
+
+\item{\code{.RESERVED}}{Object of class "list" containing list of undocumented raw values for internal use only.}
+}}
+\note{
+\bold{Internal METADATA - object structure}
+
+\tabular{rllll}{
+\bold{#} \tab \bold{Name} \tab \bold{Data Type} \tab \bold{V} \tab \bold{Description} \cr
+[,1]  \tab ID  \tab \code{numeric} \tab RLum \tab Unique record ID (same ID as in slot \code{DATA})\cr
+[,2]  \tab SEL \tab \code{logic} \tab RLum \tab Record selection, not part official BIN-format, triggered by TAG\cr
+[,3]  \tab VERSION \tab \code{raw} \tab 03-08 \tab BIN-file version number \cr
+[,4]  \tab LENGTH \tab \code{integer} \tab 03-08 \tab Length of this record\cr
+[,5]  \tab PREVIOUS \tab \code{integer} \tab 03-08 \tab Length of previous record\cr
+[,6]  \tab NPOINTS \tab \code{integer} \tab 03-08 \tab Number of data points in the record\cr
+[,7]  \tab RECTYPE \tab \code{integer} \tab 08 \tab Record type \cr
+[,8]  \tab RUN \tab \code{integer} \tab 03-08 \tab Run number\cr
+[,9]  \tab SET \tab \code{integer} \tab 03-08 \tab Set number\cr
+[,10]  \tab POSITION \tab  \code{integer} \tab 03-08 \tab Position number\cr
+[,11] \tab GRAIN \tab \code{integer} \tab 03-04 \tab Grain number\cr
+[,12] \tab GRAINNUMBER \tab \code{integer} \tab 06-08 \tab Grain number\cr
+[,13] \tab CURVENO \tab \code{integer} \tab 06-08 \tab Curve number\cr
+[,14] \tab XCOORD \tab \code{integer} \tab 03-08 \tab X position of a single grain\cr
+[,15] \tab YCOORD \tab \code{integer} \tab 03-08 \tab Y position of a single grain\cr
+[,16] \tab SAMPLE \tab \code{factor} \tab 03-08 \tab Sample name\cr
+[,17] \tab COMMENT \tab \code{factor} \tab 03-08 \tab Comment name\cr
+[,18] \tab SYSTEMID \tab \code{integer} \tab 03-08 \tab Risoe system id\cr
+[,19] \tab FNAME \tab \code{factor} \tab 06-08 \tab File name (*.bin/*.binx)\cr
+[,20] \tab USER \tab \code{facotr} \tab 03-08 \tab User name\cr
+[,21] \tab TIME \tab \code{character} \tab 03-08 \tab Data collection time (hh-mm-ss)\cr
+[,22] \tab DATE \tab \code{factor} \tab 03-08 \tab Data collection date (ddmmyy)\cr
+[,23] \tab DTYPE \tab \code{character} \tab 03-08 \tab Data type\cr
+[,24] \tab BL_TIME \tab \code{numeric} \tab 03-08 \tab Bleaching time\cr
+[,25] \tab BL_UNIT \tab \code{integer} \tab 03-08 \tab Bleaching unit (mJ, J, secs, mins, hrs)\cr
+[,26] \tab NORM1 \tab \code{numeric} \tab 03-08 \tab Normalisation factor (1)\cr
+[,27] \tab NORM2 \tab \code{numeric} \tab 03-08 \tab Normalisation factor (2)\cr
+[,28] \tab NORM3 \tab \code{numeric} \tab 03-08 \tab Normalisation factor (3)\cr
+[,29] \tab BG \tab \code{numeric} \tab 03-08 \tab Background level\cr
+[,30] \tab SHIFT \tab \code{integer} \tab 03-08 \tab Number of channels to shift data\cr
+[,31] \tab TAG \tab \code{integer} \tab 03-08 \tab Tag, triggers SEL\cr
+[,32] \tab LTYPE \tab \code{character} \tab 03-08 \tab Luminescence type\cr
+[,33] \tab LIGHTSOURCE \tab \code{character} \tab 03-08 \tab Light source\cr
+[,34] \tab LPOWER \tab \code{numeric} \tab 03-08 \tab Optical stimulation power\cr
+[,35] \tab LIGHTPOWER \tab \code{numeric} \tab 06-08 \tab Optical stimulation power\cr
+[,36] \tab LOW \tab \code{numeric} \tab 03-08 \tab Low (temperature, time, wavelength)\cr
+[,37] \tab HIGH \tab \code{numeric} \tab 03-08 \tab High (temperature, time, wavelength)\cr
+[,38] \tab RATE \tab \code{numeric} \tab 03-08 \tab Rate (heating rate, scan rate)\cr
+[,39] \tab TEMPERATURE \tab \code{integer} \tab 03-08 \tab Sample temperature\cr
+[,40] \tab MEASTEMP \tab \code{integer} \tab 06-08 \tab Measured temperature\cr
+[,41] \tab AN_TEMP \tab \code{numeric} \tab 03-08 \tab Annealing temperature\cr
+[,42] \tab AN_TIME \tab \code{numeric} \tab 03-08 \tab Annealing time\cr
+[,43] \tab TOLDELAY \tab \code{integer} \tab 03-08 \tab TOL 'delay' channels\cr
+[,44] \tab TOLON \tab \code{integer} \tab 03-08 \tab TOL 'on' channels\cr
+[,45] \tab TOLOFF \tab \code{integer} \tab 03-08 \tab TOL 'off' channels\cr
+[,46] \tab IRR_TIME \tab \code{numeric} \tab 03-08 \tab Irradiation time\cr
+[,47] \tab IRR_TYPE \tab \code{integer} \tab 03-08 \tab Irradiation type (alpha, beta or gamma)\cr
+[,48] \tab IRR_UNIT \tab \code{integer} \tab 03-04 \tab Irradiation unit (Gy, Rads, secs, mins, hrs)\cr
+[,49] \tab IRR_DOSERATE \tab \code{numeric} \tab 06-08 \tab Irradiation dose rate (Gy/s)\cr
+[,50] \tab IRR_DOSERATEERR \tab \code{numeric} \tab 06-08 \tab Irradiation dose rate error (Gy/s)\cr
+[,51] \tab TIMESINCEIRR \tab \code{integer} \tab 06-08 \tab Time since irradiation (s)\cr
+[,52] \tab TIMETICK \tab \code{numeric} \tab 06-08 \tab Time tick for pulsing (s)\cr
+[,53] \tab ONTIME \tab \code{integer} \tab 06-08 \tab On-time for pulsing (in time ticks)\cr
+[,54] \tab STIMPERIOD \tab \code{integer} \tab 06-08 \tab Stimulation period (on+off in time ticks)\cr
+[,55] \tab GATE_ENABLED \tab \code{raw} \tab 06-08 \tab PMT signal gating enabled\cr
+[,56] \tab ENABLE_FLAGS \tab \code{raw} \tab 06-08 \tab PMT signal gating  enabled\cr
+[,57] \tab GATE_START \tab \code{integer} \tab 06-08 \tab Start gating (in time ticks)\cr
+[,58] \tab GATE_STOP \tab \code{ingeter} \tab 06-08 \tab Stop gating (in time ticks), 'Gateend' for version 04, here only GATE_STOP is used\cr
+[,59] \tab PTENABLED \tab \code{raw} \tab 06-08 \tab Photon time enabled\cr
+[,60] \tab DTENABLED \tab \code{raw} \tab 06-08 \tab PMT dead time correction enabled\cr
+[,61] \tab DEADTIME \tab \code{numeric} \tab 06-08 \tab PMT dead time (s)\cr
+[,62] \tab MAXLPOWER \tab \code{numeric} \tab 06-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr
+[,63] \tab XRF_ACQTIME \tab \code{numeric} \tab 06-08 \tab XRF acquisition time (s)\cr
+[,64] \tab XRF_HV \tab \code{numeric} \tab 06-08 \tab XRF X-ray high voltage (V)\cr
+[,65] \tab XRF_CURR \tab \code{integer} \tab 06-08 \tab XRF X-ray current (uA)\cr
+[,66] \tab XRF_DEADTIMEF \tab \code{numeric} \tab 06-08 \tab XRF dead time fraction\cr
+[,67] \tab SEQUENCE \tab \code{character} \tab 03-04 \tab Sequence name\cr
+[,68] \tab DETECTOR_ID \tab \code{raw} \tab 07-08 \tab Detector ID\cr
+[,69] \tab LOWERFILTER_ID \tab \code{integer} \tab 07-08 \tab Lower filter ID in reader\cr
+[,70] \tab UPPERFILTER_ID \tab \code{integer} \tab 07-08 \tab Uper filter ID in reader\cr
+[,71] \tab ENOISEFACTOR \tab \code{numeric} \tab 07-08 \tab Excess noise filter, usage unknown \cr
+[,72] \tab MARKPOS_X1 \tab \code{numeric} \tab 08 \tab Coordinates marker position 1 \cr
+[,73] \tab MARKPOS_Y1 \tab \code{numeric} \tab 08 \tab Coordinates marker position 1 \cr
+[,74] \tab MARKPOS_X2 \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr
+[,75] \tab MARKPOS_Y2 \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr
+[,76] \tab MARKPOS_X3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
+[,77] \tab MARKPOS_Y3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
+[,78] \tab MARKPOS_X4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
+[,79] \tab MARKPOS_Y4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
+[,80] \tab EXTR_START \tab \code{numeric} \tab 08 \tab usage unknown \cr
+[,81] \tab EXTR_END \tab \code{numeric} \tab 08 \tab usage unknown
+} V = BIN-file version (RLum means that it does not depend on a specific BIN
+version)\cr
+
+Note that the \code{Risoe.BINfileData} object combines all values from
+different versions from the BIN-file, reserved bits are skipped, however,
+the function \code{\link{write_R2BIN}} reset arbitrary reserved bits. Invalid
+values for a specific version are set to \code{NA}. Furthermore, the
+internal R data types do not necessarily match the required data types for
+the BIN-file data import! Data types are converted during data import.\cr
+
+\bold{LTYPE} values
+
+\tabular{rll}{ [,0] \tab TL \tab: Thermoluminescence \cr [,1] \tab OSL \tab:
+Optically stimulated luminescence \cr [,2] \tab IRSL \tab: Infrared
+stimulated luminescence \cr [,3] \tab M-IR \tab: Infrared monochromator
+scan\cr [,4] \tab M-VIS \tab: Visible monochromator scan\cr [,5] \tab TOL
+\tab: Thermo-optical luminescence \cr [,6] \tab TRPOSL \tab: Time Resolved
+Pulsed OSL\cr [,7] \tab RIR \tab: Ramped IRSL\cr [,8] \tab RBR \tab: Ramped
+(Blue) LEDs\cr [,9] \tab USER \tab: User defined\cr [,10] \tab POSL \tab:
+Pulsed OSL \cr [,11] \tab SGOSL \tab: Single Grain OSL\cr [,12] \tab RL
+\tab: Radio Luminescence \cr [,13] \tab XRF \tab: X-ray Fluorescence }
+
+\bold{DTYPE} values \tabular{rll}{ [,0] \tab 0 \tab Natural \cr [,1] \tab 1
+\tab N+dose \cr [,2] \tab 2 \tab Bleach \cr [,3] \tab 3 \tab Bleach+dose \cr
+[,4] \tab 4 \tab Natural (Bleach) \cr [,5] \tab 5 \tab N+dose (Bleach) \cr
+[,6] \tab 6 \tab Dose \cr [,7] \tab 7 \tab Background }
+
+\bold{LIGHTSOURCE} values \tabular{rll}{ [,0] \tab 0 \tab Non \cr [,1] \tab
+1 \tab Lamp \cr [,2] \tab 2 \tab IR diodes/IR Laser \cr [,3] \tab 3 \tab
+Calibration LED \cr [,4] \tab 4 \tab Blue Diodes \cr [,5] \tab 5 \tab White
+lite \cr [,6] \tab 6 \tab Green laser (single grain) \cr [,7] \tab 7 \tab IR
+laser (single grain) }
+
+(information on the BIN/BINX file format are kindly provided by Risoe, DTU
+Nutech)
+}
+\section{Objects from the Class}{
+ Objects can be created by calls of the form
+\code{new("Risoe.BINfileData", ...)}.
+}
+
+\section{Function version}{
+ 0.3.0
+}
+\examples{
+
+showClass("Risoe.BINfileData")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Risoe DTU, 2013. The Sequence Editor User Manual - Feb 2013 and Risoe DTU, 2016. The
+Sequence Editor User Manual - Feburar 2016
+
+\code{http://www.nutech.dtu.dk/}
+}
+\seealso{
+\code{\link{plot_Risoe.BINfileData}}, \code{\link{read_BIN2R}},
+\code{\link{write_R2BIN}},\code{\link{merge_Risoe.BINfileData}},
+\code{\link{Risoe.BINfileData2RLum.Analysis}},
+}
+\keyword{classes}
+
diff --git a/man/Risoe.BINfileData2RLum.Analysis.Rd b/man/Risoe.BINfileData2RLum.Analysis.Rd
new file mode 100644
index 0000000..96be78b
--- /dev/null
+++ b/man/Risoe.BINfileData2RLum.Analysis.Rd
@@ -0,0 +1,87 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Risoe.BINfileData2RLum.Analysis.R
+\name{Risoe.BINfileData2RLum.Analysis}
+\alias{Risoe.BINfileData2RLum.Analysis}
+\title{Convert Risoe.BINfileData object to an RLum.Analysis object}
+\usage{
+Risoe.BINfileData2RLum.Analysis(object, pos = NULL, grain = NULL,
+  run = NULL, set = NULL, ltype = NULL, dtype = NULL,
+  protocol = "unknown", txtProgressBar = FALSE)
+}
+\arguments{
+\item{object}{\code{\linkS4class{Risoe.BINfileData}} (\bold{required}):
+\code{Risoe.BINfileData} object}
+
+\item{pos}{\code{\link{numeric}} (optional): position number of the
+\code{Risoe.BINfileData} object for which the curves are stored in the
+\code{RLum.Analysis} object. If \code{length(position)>1} a list of \code{RLum.Analysis} objects
+is returned. If nothing is provided every position will be converted. If the position is not valid \code{NA} is
+returned.}
+
+\item{grain}{\code{\link{vector}, \link{numeric}} (optional): grain number from
+the measurement to limit the converted data set (e.g., \code{grain =
+c(1:48)}). Please be aware that this option may lead to unwanted effects, as the output
+is strictly limited to the choosen grain number for all position numbers}
+
+\item{run}{\code{\link{vector}, \link{numeric}} (optional): run number from
+the measurement to limit the converted data set (e.g., \code{run =
+c(1:48)}).}
+
+\item{set}{\code{\link{vector}, \link{numeric}} (optional): set number from
+the measurement to limit the converted data set (e.g., \code{set =
+c(1:48)}).}
+
+\item{ltype}{\code{\link{vector}, \link{character}} (optional): curve type
+to limit the converted data. Commonly allowed values are: \code{IRSL}, \code{OSL},
+\code{TL}, \code{RIR}, \code{RBR} and \code{USER} (see also \code{\linkS4class{Risoe.BINfileData}})}
+
+\item{dtype}{\code{\link{vector}, \link{character}} (optional): data type to
+limit the converted data. Commonly allowed values are listed in \code{\linkS4class{Risoe.BINfileData}}}
+
+\item{protocol}{\code{\link{character}} (optional): sets protocol type for
+analysis object. Value may be used by subsequent analysis functions.}
+
+\item{txtProgressBar}{\link{logical} (with default): enables or disables
+\code{\link{txtProgressBar}}.}
+}
+\value{
+Returns an \code{\linkS4class{RLum.Analysis}} object.
+}
+\description{
+Converts values from one specific position of a Risoe.BINfileData S4-class
+object to an RLum.Analysis object.
+}
+\details{
+The \code{\linkS4class{RLum.Analysis}} object requires a set of curves for
+specific further protocol analyses. However, the
+\code{\linkS4class{Risoe.BINfileData}} usually contains a set of curves for
+different aliquots and different protocol types that may be mixed up.
+Therefore, a conversion is needed.
+}
+\note{
+The \code{protocol} argument of the \code{\linkS4class{RLum.Analysis}}
+object is set to 'unknown' if not stated otherwise.
+}
+\section{Function version}{
+ 0.4.1 (2016-09-09 10:32:17)
+}
+\examples{
+
+##load data
+data(ExampleData.BINfileData, envir = environment())
+
+##convert values for position 1
+Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+#
+}
+\seealso{
+\code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}}, \code{\link{read_BIN2R}}
+}
+\keyword{manip}
+
diff --git a/man/Second2Gray.Rd b/man/Second2Gray.Rd
new file mode 100644
index 0000000..c7f9253
--- /dev/null
+++ b/man/Second2Gray.Rd
@@ -0,0 +1,106 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Second2Gray.R
+\name{Second2Gray}
+\alias{Second2Gray}
+\title{Converting equivalent dose values from seconds (s) to gray (Gy)}
+\usage{
+Second2Gray(data, dose.rate, error.propagation = "omit")
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} (\bold{required}): input values,
+structure: data (\code{values[,1]}) and data error (\code{values [,2]}) are
+required}
+
+\item{dose.rate}{\code{\linkS4class{RLum.Results}} or \code{\link{data.frame}} or \code{\link{numeric}}
+(\bold{required}): \code{RLum.Results} needs to be orginated from the
+function \code{\link{calc_SourceDoseRate}}, for \code{vector} dose rate in
+Gy/s and dose rate error in Gy/s}
+
+\item{error.propagation}{\code{\link{character}} (with default): error propagation method used for error
+calculation (\code{omit}, \code{gaussian} or \code{absolute}), see details for further
+information}
+}
+\value{
+Returns a \link{data.frame} with converted values.
+}
+\description{
+Conversion of absorbed radiation dose in seconds (s) to the SI unit gray
+(Gy) including error propagation. Normally used for equivalent dose data.
+}
+\details{
+Calculation of De values from seconds (s) to gray (Gy) \deqn{De [Gy] = De
+[s] * Dose Rate [Gy/s])} \cr
+
+Provided calculation error propagation methods for error calculation (with 'se' as the standard error
+and 'DR' of the dose rate of the beta-source):\cr
+
+\bold{(1) \code{omit}} (default)\cr
+
+\deqn{se(De) [Gy] = se(De) [s] * DR [Gy/s]}
+
+In this case the standard error of the dose rate of the beta-source is treated as systematic
+(i.e. non-random), it error propagation is omitted. However, the error must be considered during
+calculation of the final age. (cf. Aitken, 1985, pp. 242). This approach can be seen as
+method (2) (gaussian) for the case the (random) standard error of the beta-source calibration is
+0. Which particular method is requested depends on the situation and cannot be prescriptive.
+
+\bold{(2) \code{gaussian}} error propagation \cr
+
+\deqn{se(De) [Gy] = \sqrt((DR [Gy/s] * se(De) [s])^2 + (De [s] * se(DR) [Gy/s])^2)}
+
+Applicable under the assumption that errors of De and se are uncorrelated.
+
+\bold{(3) \code{absolute}} error propagation \cr
+
+\deqn{se(De) [Gy]= abs(DR [Gy/s] * se(De) [s]) + abs(De [s] * se(DR) [Gy/s])}
+
+Applicable under the assumption that errors of De and se are not uncorrelated.
+}
+\note{
+If no or a wrong error propagation method is given, the execution of the function is
+stopped. Furthermore, if a \code{data.frame} is provided for the dose rate values is has to
+be of the same length as the data frame provided with the argument \code{data}
+}
+\section{Function version}{
+ 0.6.0 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##(A) for known source dose rate at date of measurement
+## - load De data from the example data help file
+data(ExampleData.DeValues, envir = environment())
+## - convert De(s) to De(Gy)
+Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+
+
+
+
+
+##(B) for source dose rate calibration data
+## - calculate source dose rate first
+dose.rate <-  calc_SourceDoseRate(measurement.date = "2012-01-27",
+                                  calib.date = "2014-12-19",
+                                  calib.dose.rate = 0.0438,
+                                  calib.error = 0.0019)
+# read example data
+data(ExampleData.DeValues, envir = environment())
+
+# apply dose.rate to convert De(s) to De(Gy)
+Second2Gray(ExampleData.DeValues$BT998, dose.rate)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France),\cr Michael Dietze, GFZ Potsdam (Germany),\cr Margret C. Fuchs, HZDR,
+Helmholtz-Institute Freiberg for Resource Technology
+(Germany)
+\cr R Luminescence Package Team}
+\references{
+Aitken, M.J., 1985. Thermoluminescence dating. Academic Press.
+}
+\seealso{
+\code{\link{calc_SourceDoseRate}}
+}
+\keyword{manip}
+
diff --git a/man/analyse_IRSAR.RF.Rd b/man/analyse_IRSAR.RF.Rd
new file mode 100644
index 0000000..0385e33
--- /dev/null
+++ b/man/analyse_IRSAR.RF.Rd
@@ -0,0 +1,325 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/analyse_IRSAR.RF.R
+\name{analyse_IRSAR.RF}
+\alias{analyse_IRSAR.RF}
+\title{Analyse IRSAR RF measurements}
+\usage{
+analyse_IRSAR.RF(object, sequence_structure = c("NATURAL", "REGENERATED"),
+  RF_nat.lim = NULL, RF_reg.lim = NULL, method = "FIT",
+  method.control = NULL, test_parameters = NULL, n.MC = 10,
+  txtProgressBar = TRUE, plot = TRUE, plot_reduced = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} or a \code{\link{list}} of \code{RLum.Analysis} objects (\bold{required}): input
+object containing data for protocol analysis. The function expects to find at least two curves in the
+\code{\linkS4class{RLum.Analysis}} object: (1) RF_nat, (2) RF_reg. If a \code{list} is provided as
+input all other parameters can be provided as \code{list} as well to gain full control.}
+
+\item{sequence_structure}{\code{\link{vector}} \link{character} (with
+default): specifies the general sequence structure. Allowed steps are
+\code{NATURAL}, \code{REGENERATED}. In addition any other character is
+allowed in the sequence structure; such curves will be ignored during the analysis.}
+
+\item{RF_nat.lim}{\code{\link{vector}} (with default): set minimum and maximum
+channel range for natural signal fitting and sliding. If only one value is provided this
+will be treated as minimum value and the maximum limit will be added automatically.}
+
+\item{RF_reg.lim}{\code{\link{vector}} (with default): set minimum and maximum
+channel range for regenerated signal fitting and sliding. If only one value is provided this
+will be treated as minimum value and the maximum limit will be added automatically.}
+
+\item{method}{\code{\link{character}} (with default): setting method applied
+for the data analysis. Possible options are \code{"FIT"} or \code{"SLIDE"}.}
+
+\item{method.control}{\code{\link{list}} (optional): parameters to control the method, that can
+be passed to the choosen method. These are for (1) \code{method = "FIT"}: 'trace', 'maxiter', 'warnOnly',
+'minFactor' and for (2) \code{method = "SLIDE"}: 'correct_onset', 'show_density',  'show_fit', 'trace'.
+See details.}
+
+\item{test_parameters}{\code{\link{list} (with default)}: set test parameters.
+Supported parameters are: \code{curves_ratio}, \code{residuals_slope} (only for
+\code{method = "SLIDE"}), \code{curves_bounds}, \code{dynamic_ratio},
+\code{lambda}, \code{beta} and \code{delta.phi}. All input: \code{\link{numeric}}
+values, \code{NA} and \code{NULL} (s. Details)
+
+(see Details for further information)}
+
+\item{n.MC}{\code{\link{numeric}} (with default): set number of Monte
+Carlo runs for start parameter estimation (\code{method = "FIT"}) or
+error estimation (\code{method = "SLIDE"}). Note: Large values will
+significantly increase the computation time}
+
+\item{txtProgressBar}{\code{\link{logical}} (with default): enables \code{TRUE} or
+disables \code{FALSE} the progression bar during MC runs}
+
+\item{plot}{\code{\link{logical}} (with default): plot output (\code{TRUE}
+or \code{FALSE})}
+
+\item{plot_reduced}{\code{\link{logical}} (optional): provides a reduced plot output if enabled
+to allow common R plot combinations, e.g., \code{par(mfrow(...))}. If \code{TRUE} no residual plot
+is returned; it has no effect if \code{plot = FALSE}}
+
+\item{\dots}{further arguments that will be passed to the plot output.
+Currently supported arguments are \code{main}, \code{xlab}, \code{ylab},
+\code{xlim}, \code{ylim}, \code{log}, \code{legend} (\code{TRUE/FALSE}),
+\code{legend.pos}, \code{legend.text} (passes argument to x,y in
+\code{\link[graphics]{legend}}), \code{xaxt}}
+}
+\value{
+A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
+returned:\cr
+
+\bold{@data}\cr
+$ data: \code{\link{data.frame}} table with De and corresponding values\cr
+..$ DE : \code{numeric}: the obtained equivalent dose\cr
+..$ DE.ERROR : \code{numeric}: (only method = "SLIDE") standard deviation obtained from MC runs \cr
+..$ DE.LOWER : \code{numeric}: 2.5\% quantile for De values obtained by MC runs \cr
+..$ DE.UPPER : \code{numeric}: 97.5\% quantile for De values obtained by MC runs  \cr
+..$ DE.STATUS  : \code{character}: test parameter status\cr
+..$ RF_NAT.LIM  : \code{charcter}: used RF_nat curve limits \cr
+..$ RF_REG.LIM : \code{character}: used RF_reg curve limits\cr
+..$ POSITION : \code{integer}: (optional) position of the curves\cr
+..$ DATE : \code{character}: (optional) measurement date\cr
+..$ SEQUENCE_NAME : \code{character}: (optional) sequence name\cr
+..$ UID : \code{character}: unique data set ID \cr
+$ test_parameters : \code{\link{data.frame}} table test parameters \cr
+$ fit : {\code{\link{nls}} \code{nlsModel} object} \cr
+$ slide : \code{\link{list}} data from the sliding process, including the sliding matrix\cr
+
+\bold{@info}\cr
+$ call : \code{\link[methods]{language-class}}: the orignal function call \cr
+
+The output (\code{data}) should be accessed using the
+function \code{\link{get_RLum}}
+}
+\description{
+Function to analyse IRSAR RF measurements on K-feldspar samples, performed
+using the protocol according to Erfurt et al. (2003) and beyond.
+}
+\details{
+The function performs an IRSAR analysis described for K-feldspar samples by
+Erfurt et al. (2003) assuming a negligible sensitivity change of the RF
+signal.\cr
+
+\bold{General Sequence Structure} (according to Erfurt et al.
+(2003)) \enumerate{
+
+\item Measuring IR-RF intensity of the natural dose for a few seconds (\eqn{RF_{nat}})
+\item Bleach the samples under solar conditions for at least 30 min without changing the geometry
+\item Waiting for at least one hour
+\item Regeneration of the IR-RF signal to at least the natural level (measuring (\eqn{RF_{reg}})
+\item Fitting data with a stretched exponential function
+\item Calculate the the palaeodose \eqn{D_{e}} using the parameters from the
+fitting}
+
+Actually two methods are supported to obtain the \eqn{D_{e}}: \code{method = "FIT"} and
+\code{method = "SLIDE"}:
+
+\bold{\code{method = "FIT"}}\cr
+
+The principle is described above and follows the original suggestions by
+Erfurt et al., 2003. For the fitting the mean count value of the RF_nat curve is used.
+
+Function used for the fitting (according to Erfurt et al. (2003)): \cr
+
+\deqn{\phi(D) = \phi_{0}-\Delta\phi(1-exp(-\lambda*D))^\beta}
+with \eqn{\phi(D)} the dose dependent IR-RF flux, \eqn{\phi_{0}} the inital
+IR-RF flux, \eqn{\Delta\phi} the dose dependent change of the IR-RF flux,
+\eqn{\lambda} the exponential parameter, \eqn{D} the dose and \eqn{\beta}
+the dispersive factor.\cr\cr To obtain the palaeodose \eqn{D_{e}} the
+function is changed to:\cr \deqn{D_{e} = ln(-(\phi(D) -
+\phi_{0})/(-\lambda*\phi)^{1/\beta}+1)/-\lambda}\cr The fitting is done
+using the \code{port} algorithm of the \code{\link{nls}} function.\cr
+
+\bold{\code{method = "SLIDE"}}\cr
+
+For this method the natural curve is slided along the x-axis until
+congruence with the regenerated curve is reached. Instead of fitting this
+allows to work with the original data without the need of any physical
+model. This approach was introduced for RF curves by Buylaert et al., 2012
+and Lapp et al., 2012.
+
+Here the sliding is done by searching for the minimum of the squared residuals.\cr
+
+\bold{\code{method.control}}\cr
+
+To keep the generic argument list as clear as possible, arguments to control the methods
+for De estimation are all preset with meaningful default parameters and can be
+handled using the argument \code{method.control} only, e.g.,
+\code{method.control = list(trace = TRUE)}. Supported arguments are:\cr
+
+\tabular{lll}{
+ARGUMENT       \tab METHOD               \tab DESCRIPTION\cr
+\code{trace}   \tab \code{FIT}, \code{SLIDE} \tab as in \code{\link{nls}}; shows sum of squared residuals\cr
+\code{maxiter} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
+\code{warnOnly} \tab \code{FIT}           \tab as in \code{\link{nls}}\cr
+\code{minFactor} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
+\code{correct_onset} \tab \code{SLIDE}      \tab The logical argument literally spoken,
+shifts the curves along the x-axis by the first channel, as light is expected in the first channel.
+ The default value is \code{TRUE}.\cr
+\code{show_density} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
+enables or disables KDE plots for MC run results. If the distribution is too narrow nothing is shown.\cr
+\code{show_fit} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
+enables or disables the plot of the fitted curve rountinly obtained during the evaluation.\cr
+\code{n.MC}                  \tab \code{SLIDE}       \tab    \code{\link{integer}} (wiht default):
+This controls the number of MC runs within the sliding (assesing the possible minimum values).
+The default \code{n.MC = 1000}. Note: This parameter is not the same as controlled by the
+function argument \code{n.MC} \cr
+}
+
+\bold{Error estimation}\cr
+
+For \bold{\code{method = "FIT"}} the asymmetric error range is obtained by using the 2.5 \% (lower) and
+the 97.5 \% (upper) quantiles of the \eqn{RF_{nat}} curve for calculating the \eqn{D_{e}} error range.\cr
+
+For \bold{\code{method = "SLIDE"}} the error is obtained by bootstrapping the residuals of the slided
+curve to construct new natural curves for a Monte Carlo simulation. The error is returned in two
+ways: (a) the standard deviation of the herewith obtained \eqn{D_{e}} from the MC runs and (b) the confidence
+interval using the  2.5 \% (lower) and the 97.5 \% (upper) quantiles. The results of the MC runs
+are returned with the function output. \cr
+
+\bold{Test parameters}\cr
+
+The argument \code{test_parameters} allows to pass some thresholds for several test parameters,
+which will be evaluated during the function run. If a threshold is set and it will be exceeded the
+test parameter status will be set to "FAILED". Intentionally this parameter is not termed
+'rejection criteria' as not all test parameters are evaluated for both methods and some parameters
+are calculated by not evaluated by default. Common for all parameters are the allowed argument options
+\code{NA} and \code{NULL}. If the parameter is set to \code{NA} the value is calculated but the
+result will not be evaluated, means it has no effect on the status ("OK" or "FAILED") of the parameter.
+Setting the parameter to \code{NULL} disables the parameter entirely and the parameter will be
+also removed from the function output. This might be useful in cases where a particular parameter
+asks for long computation times. Currently supported parameters are:
+
+\code{curves_ratio} \code{\link{numeric}} (default: \code{1.001}):\cr
+
+The ratio of \eqn{RF_{nat}} over \eqn{RF_{reg}} in the range of\eqn{RF_{nat}} of is calculated
+and should not exceed the threshold value. \cr
+
+\code{intersection_ratio} \code{\link{numeric}} (default: \code{NA}):\cr
+
+Calculated as absolute difference from 1 of the ratio of the integral of the normalised RF-curves,
+This value indicates intersection of the RF-curves and should be close to 0 if the curves
+have a similar shape. For this calculation first the corresponding time-count pair value on the RF_reg
+curve is obtained using the maximum count value of the RF_nat curve and only this segment (fitting to
+the RF_nat curve) on the RF_reg curve is taken for further calculating this ratio. If nothing is
+found at all, \code{Inf} is returned. \cr
+
+\code{residuals_slope} \code{\link{numeric}} (default: \code{NA}; only for \code{method = "SLIDE"}): \cr
+
+A linear function is fitted on the residuals after sliding.
+The corresponding slope can be used to discard values as a high (positive, negative) slope
+may indicate that both curves are fundamentally different and the method cannot be applied at all.
+Per default the value of this parameter is calculated but not evaluated. \cr
+
+\code{curves_bounds} \code{\link{numeric}} (default: \eqn{max(RF_{reg_counts})}:\cr
+
+This measure uses the maximum time (x) value of the regenerated curve.
+The maximum time (x) value of the natural curve cannot be larger than this value. However, although
+this is not recommended the value can be changed or disabled.\cr
+
+\code{dynamic_ratio} \code{\link{numeric}} (default: \code{NA}):\cr
+
+The dynamic ratio of the regenerated curve is calculated as ratio of the minimum and maximum count values.
+
+\code{lambda}, \code{beta} and \code{delta.phi}
+\code{\link{numeric}} (default: \code{NA}; \code{method = "SLIDE"}): \cr
+
+The stretched exponential function suggested by Erfurt et al. (2003) describing the decay of
+the RF signal, comprises several parameters that might be useful to evaluate the shape of the curves.
+For \code{method = "FIT"} this parameter is obtained during the fitting, for \code{method = "SLIDE"} a
+rather rough estimation is made using the function \code{\link[minpack.lm]{nlsLM}} and the equation
+given above. Note: As this procedure requests more computation time, setting of one of these three parameters
+to \code{NULL} also prevents a calculation of the remaining two.
+}
+\note{
+\bold{[THIS FUNCTION HAS BETA-STATUS]}\cr
+
+This function assumes that there is no sensitivity change during the
+measurements (natural vs. regenerated signal), which is in contrast to the
+findings from Buylaert et al. (2012). Furthermore: In course of ongoing research this function has
+been almost fully re-written, but further thoughtful tests are still pending!
+However, as a lot new package functionality was introduced with the changes made
+for this function and to allow a part of such tests the re-newed code was made part
+of the current package.\cr
+}
+\section{Function version}{
+ 0.6.11 (2016-07-16 11:28:11)
+}
+\examples{
+
+##load data
+data(ExampleData.RLum.Analysis, envir = environment())
+
+##(1) perform analysis using the method 'FIT'
+results <- analyse_IRSAR.RF(object = IRSAR.RF.Data)
+
+##show De results and test paramter results
+get_RLum(results, data.object = "data")
+get_RLum(results, data.object = "test_parameters")
+
+##(2) perform analysis using the method 'SLIDE'
+results <- analyse_IRSAR.RF(object = IRSAR.RF.Data, method = "SLIDE", n.MC = 1)
+
+\dontrun{
+##(3) perform analysis using the method 'SLIDE' and method control option
+## 'trace
+results <- analyse_IRSAR.RF(
+ object = IRSAR.RF.Data,
+ method = "SLIDE",
+ method.control = list(trace = TRUE))
+
+}
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+Buylaert, J.P., Jain, M., Murray, A.S., Thomsen, K.J., Lapp, T.,
+2012. IR-RF dating of sand-sized K-feldspar extracts: A test of accuracy.
+Radiation Measurements 44 (5-6), 560-565. doi: 10.1016/j.radmeas.2012.06.021
+
+Erfurt, G., Krbetschek, M.R., 2003. IRSAR - A single-aliquot
+regenerative-dose dating protocol applied to the infrared radiofluorescence
+(IR-RF) of coarse- grain K-feldspar. Ancient TL 21, 35-42.
+
+Erfurt, G., 2003. Infrared luminescence of Pb+ centres in potassium-rich
+feldspars. physica status solidi (a) 200, 429-438.
+
+Erfurt, G., Krbetschek, M.R., 2003. Studies on the physics of the infrared
+radioluminescence of potassium feldspar and on the methodology of its
+application to sediment dating. Radiation Measurements 37, 505-510.
+
+Erfurt, G., Krbetschek, M.R., Bortolot, V.J., Preusser, F., 2003. A fully
+automated multi-spectral radioluminescence reading system for geochronometry
+and dosimetry. Nuclear Instruments and Methods in Physics Research Section
+B: Beam Interactions with Materials and Atoms 207, 487-499.
+
+Lapp, T., Jain, M., Thomsen, K.J., Murray, A.S., Buylaert, J.P., 2012. New
+luminescence measurement facilities in retrospective dosimetry. Radiation
+Measurements 47, 803-808. doi:10.1016/j.radmeas.2012.02.006
+
+Trautmann, T., 2000. A study of radioluminescence kinetics of natural
+feldspar dosimeters: experiments and simulations. Journal of Physics D:
+Applied Physics 33, 2304-2310.
+
+Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1998.
+Investigations of feldspar radioluminescence: potential for a new dating
+technique. Radiation Measurements 29, 421-425.
+
+Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1999. Feldspar
+radioluminescence: a new dating method and its physical background. Journal
+of Luminescence 85, 45-58.
+
+Trautmann, T., Krbetschek, M.R., Stolz, W., 2000. A systematic study of the
+radioluminescence properties of single feldspar grains. Radiation
+Measurements 32, 685-690.
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}},
+\code{\link{nls}}, \code{\link[minpack.lm]{nlsLM}}
+}
+\keyword{datagen}
+
diff --git a/man/analyse_SAR.CWOSL.Rd b/man/analyse_SAR.CWOSL.Rd
new file mode 100644
index 0000000..e86fc3a
--- /dev/null
+++ b/man/analyse_SAR.CWOSL.Rd
@@ -0,0 +1,211 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/analyse_SAR.CWOSL.R
+\name{analyse_SAR.CWOSL}
+\alias{analyse_SAR.CWOSL}
+\title{Analyse SAR CW-OSL measurements}
+\usage{
+analyse_SAR.CWOSL(object, signal.integral.min, signal.integral.max,
+  background.integral.min, background.integral.max, rejection.criteria = NULL,
+  dose.points = NULL, mtext.outer, plot = TRUE, plot.single = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} (\bold{required}): input
+object containing data for analysis, alternatively a \code{\link{list}} of
+\code{\linkS4class{RLum.Analysis}} objects can be provided.}
+
+\item{signal.integral.min}{\code{\link{integer}} (\bold{required}): lower
+bound of the signal integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+as the minimum signal integral for the Tx curve.}
+
+\item{signal.integral.max}{\code{\link{integer}} (\bold{required}): upper
+bound of the signal integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+as the maximum signal integral for the Tx curve.}
+
+\item{background.integral.min}{\code{\link{integer}} (\bold{required}):
+lower bound of the background integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+as the minimum background integral for the Tx curve.}
+
+\item{background.integral.max}{\code{\link{integer}} (\bold{required}):
+upper bound of the background integral. Can be a \code{\link{list}} of \code{\link{integer}s}, if \code{object} is
+of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted
+as the maximum background integral for the Tx curve.}
+
+\item{rejection.criteria}{\code{\link{list}} (with default): provide a named list
+and set rejection criteria in percentage for further calculation. Can be a \code{\link{list}} in
+a \code{\link{list}}, if \code{object} is of type \code{\link{list}}
+
+Allowed arguments are \code{recycling.ratio}, \code{recuperation.rate},
+\code{palaeodose.error}, \code{testdose.error} and \code{exceed.max.regpoint = TRUE/FALSE}.
+Example: \code{rejection.criteria = list(recycling.ratio = 10)}.
+Per default all numerical values are set to 10, \code{exceed.max.regpoint = TRUE}.
+Every criterium can be set to \code{NA}. In this value are calculated, but not considered, i.e.
+the RC.Status becomes always \code{'OK'}}
+
+\item{dose.points}{\code{\link{numeric}} (optional): a numeric vector
+containg the dose points values Using this argument overwrites dose point
+values in the signal curves. Can be a \code{\link{list}} of \code{\link{numeric}} vectors,
+if \code{object} is of type \code{\link{list}}}
+
+\item{mtext.outer}{\code{\link{character}} (optional): option to provide an
+outer margin mtext. Can be a \code{\link{list}} of \code{\link{character}s},
+if \code{object} is of type \code{\link{list}}}
+
+\item{plot}{\code{\link{logical}} (with default): enables or disables plot
+output.}
+
+\item{plot.single}{\code{\link{logical}} (with default) or
+\code{\link{numeric}} (optional): single plot output (\code{TRUE/FALSE}) to
+allow for plotting the results in single plot windows. If a numerice vector
+is provided the plots can be selected individually, i.e. \code{plot.single =
+c(1,2,3,4)} will plot the TL and Lx, Tx curves but not the legend (5) or the
+growth curve (6), (7) and (8) belong to rejection criteria plots. Requires
+\code{plot = TRUE}.}
+
+\item{\dots}{further arguments that will be passed to the function
+\code{\link{plot_GrowthCurve}} or \code{\link{calc_OSLLxTxRatio}}
+(supported: \code{background.count.distribution}, \code{sigmab}, \code{sig0}). \bold{Please note} that
+if you consider to use the early light subtraction method you should provide your own \code{sigmab}
+value!}
+}
+\value{
+A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
+returned containing the following elements:
+\item{De.values}{\link{data.frame} containing De-values, De-error and
+further parameters} \item{LnLxTnTx.values}{\link{data.frame} of all
+calculated Lx/Tx values including signal, background counts and the dose
+points} \item{rejection.criteria}{\link{data.frame} with values that might
+by used as rejection criteria. NA is produced if no R0 dose point exists.}
+\item{Formula}{\link{formula} formula that have been used for the growth
+curve fitting }\cr The output should be accessed using the function
+\code{\link{get_RLum}}.
+}
+\description{
+The function performs a SAR CW-OSL analysis on an
+\code{\linkS4class{RLum.Analysis}} object including growth curve fitting.
+}
+\details{
+The function performs an analysis for a standard SAR protocol measurements
+introduced by Murray and Wintle (2000) with CW-OSL curves. For the
+calculation of the Lx/Tx value the function \link{calc_OSLLxTxRatio} is
+used. For \bold{changing the way the Lx/Tx error is calculated} use the argument
+\code{background.count.distribution} and \code{sigmab}, which will be passed to the function
+\link{calc_OSLLxTxRatio}.\cr\cr
+
+\bold{Argument \code{object} is of type \code{list}}\cr\cr
+
+If the argument \code{object} is of type \code{\link{list}} containing \bold{only}
+\code{\linkS4class{RLum.Analysis}} objects, the function re-calls itself as often as elements
+are in the list. This is usefull if an entire measurement wanted to be analysed without
+writing separate for-loops. To gain in full control of the parameters (e.g., \code{dose.points}) for
+every aliquot (corresponding to one \code{\linkS4class{RLum.Analysis}} object in the list), in
+this case the arguments can be provided as \code{\link{list}}. This \code{list} should
+be of similar length as the \code{list} provided with the argument \code{object}, otherwise the function
+will create an own list of the requested lenght. Function output will be just one single \code{\linkS4class{RLum.Results}} object.
+
+Please be careful when using this option. It may allow a fast an efficient data analysis, but
+the function may also break with an unclear error message, due to wrong input data.\cr\cr
+
+\bold{Working with IRSL data}\cr\cr
+
+The function was originally designed to work just for 'OSL' curves,
+following the principles of the SAR protocol. An IRSL measurement protocol
+may follow this procedure, e.g., post-IR IRSL protocol (Thomsen et al.,
+2008). Therefore this functions has been enhanced to work with IRSL data,
+however, the function is only capable of analysing curves that follow the
+SAR protocol structure, i.e., to analyse a post-IR IRSL protocol, curve data
+have to be pre-selected by the user to fit the standards of the SAR
+protocol, i.e., Lx,Tx,Lx,Tx and so on. \cr
+
+Example: Imagine the measurement contains pIRIR50 and pIRIR225 IRSL curves.
+Only one curve type can be analysed at the same time: The pIRIR50 curves or
+the pIRIR225 curves.\cr\cr
+
+\bold{Supported rejection criteria}\cr\cr \sQuote{recycling.ratio}:
+calculated for every repeated regeneration dose point.\cr
+
+\sQuote{recuperation.rate}: recuperation rate calculated by comparing the
+Lx/Tx values of the zero regeneration point with the Ln/Tn value (the Lx/Tx
+ratio of the natural signal). For methodological background see Aitken and
+Smith (1988).\cr
+
+\sQuote{testdose.error}: set the allowed error for the testdose, which per
+default should not exceed 10\%. The testdose error is calculated as Tx_net.error/Tx_net.
+
+\sQuote{palaeodose.error}: set the allowed error for the De value, which per
+default should not exceed 10\%.
+}
+\note{
+This function must not be mixed up with the function
+\code{\link{Analyse_SAR.OSLdata}}, which works with
+\link{Risoe.BINfileData-class} objects.\cr
+
+\bold{The function currently does only support 'OSL' or 'IRSL' data!}
+}
+\section{Function version}{
+ 0.7.5 (2016-07-16 11:28:11)
+}
+\examples{
+
+##load data
+##ExampleData.BINfileData contains two BINfileData objects
+##CWOSL.SAR.Data and TL.SAR.Data
+data(ExampleData.BINfileData, envir = environment())
+
+##transform the values from the first position in a RLum.Analysis object
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+
+##perform SAR analysis and set rejection criteria
+results <- analyse_SAR.CWOSL(
+object = object,
+signal.integral.min = 1,
+signal.integral.max = 2,
+background.integral.min = 900,
+background.integral.max = 1000,
+log = "x",
+fit.method = "EXP",
+rejection.criteria = list(
+  recycling.ratio = 10,
+  recuperation.rate = 10,
+  testdose.error = 10,
+  palaeodose.error = 10,
+  exceed.max.regpoint = TRUE)
+)
+
+##show De results
+get_RLum(results)
+
+##show LnTnLxTx table
+get_RLum(results, data.object = "LnLxTnTx.table")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
+after bleaching. Quaternary Science Reviews 7, 387-393.
+
+Duller, G., 2003. Distinguishing quartz and feldspar in single grain
+luminescence measurements. Radiation Measurements, 37 (2), 161-165.
+
+Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an
+improved single-aliquot regenerative-dose protocol. Radiation Measurements
+32, 57-73.
+
+Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory
+fading rates of various luminescence signals from feldspar-rich sediment
+extracts. Radiation Measurements 43, 1474-1486.
+doi:10.1016/j.radmeas.2008.06.002
+}
+\seealso{
+\code{\link{calc_OSLLxTxRatio}}, \code{\link{plot_GrowthCurve}},
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
+\code{\link{get_RLum}}
+}
+\keyword{datagen}
+\keyword{plot}
+
diff --git a/man/analyse_SAR.TL.Rd b/man/analyse_SAR.TL.Rd
new file mode 100644
index 0000000..b4ca750
--- /dev/null
+++ b/man/analyse_SAR.TL.Rd
@@ -0,0 +1,114 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/analyse_SAR.TL.R
+\name{analyse_SAR.TL}
+\alias{analyse_SAR.TL}
+\title{Analyse SAR TL measurements}
+\usage{
+analyse_SAR.TL(object, object.background, signal.integral.min,
+  signal.integral.max, sequence.structure = c("PREHEAT", "SIGNAL",
+  "BACKGROUND"), rejection.criteria = list(recycling.ratio = 10,
+  recuperation.rate = 10), dose.points, log = "", ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}}(\bold{required}): input
+object containing data for analysis}
+
+\item{object.background}{currently not used}
+
+\item{signal.integral.min}{\link{integer} (\bold{required}): requires the
+channel number for the lower signal integral bound (e.g.
+\code{signal.integral.min = 100})}
+
+\item{signal.integral.max}{\link{integer} (\bold{required}): requires the
+channel number for the upper signal integral bound (e.g.
+\code{signal.integral.max = 200})}
+
+\item{sequence.structure}{\link{vector} \link{character} (with default):
+specifies the general sequence structure. Three steps are allowed (
+\code{"PREHEAT"}, \code{"SIGNAL"}, \code{"BACKGROUND"}), in addition a
+parameter \code{"EXCLUDE"}. This allows excluding TL curves which are not
+relevant for the protocol analysis.  (Note: None TL are removed by default)}
+
+\item{rejection.criteria}{\link{list} (with default): list containing
+rejection criteria in percentage for the calculation.}
+
+\item{dose.points}{\code{\link{numeric}} (optional): option set dose points manually}
+
+\item{log}{\link{character} (with default): a character string which
+contains "x" if the x axis is to be logarithmic, "y" if the y axis is to be
+logarithmic and "xy" or "yx" if both axes are to be logarithmic. See
+\link{plot.default}).}
+
+\item{\dots}{further arguments that will be passed to the function
+\code{\link{plot_GrowthCurve}}}
+}
+\value{
+A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
+returned containing the following elements:
+\item{De.values}{\link{data.frame} containing De-values and further
+parameters} \item{LnLxTnTx.values}{\link{data.frame} of all calculated Lx/Tx
+values including signal, background counts and the dose points.}
+\item{rejection.criteria}{\link{data.frame} with values that might by used
+as rejection criteria. NA is produced if no R0 dose point exists.}\cr\cr
+\bold{note:} the output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+The function performs a SAR TL analysis on a
+\code{\linkS4class{RLum.Analysis}} object including growth curve fitting.
+}
+\details{
+This function performs a SAR TL analysis on a set of curves. The SAR
+procedure in general is given by Murray and Wintle (2000). For the
+calculation of the Lx/Tx value the function \link{calc_TLLxTxRatio} is
+used.\cr\cr \bold{Provided rejection criteria}\cr\cr
+\sQuote{recyling.ratio}: calculated for every repeated regeneration dose
+point.\cr \sQuote{recuperation.rate}: recuperation rate calculated by
+comparing the Lx/Tx values of the zero regeneration point with the Ln/Tn
+value (the Lx/Tx ratio of the natural signal).  For methodological
+background see Aitken and Smith (1988)\cr
+}
+\note{
+\bold{THIS IS A BETA VERSION}\cr\cr None TL curves will be removed
+from the input object without further warning.
+}
+\section{Function version}{
+ 0.1.5 (2016-07-16 11:28:11)
+}
+\examples{
+
+
+##load data
+data(ExampleData.BINfileData, envir = environment())
+
+##transform the values from the first position in a RLum.Analysis object
+object <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos=3)
+
+##perform analysis
+analyse_SAR.TL(object,
+               signal.integral.min = 210,
+               signal.integral.max = 220,
+               log = "y",
+               fit.method = "EXP OR LIN",
+               sequence.structure = c("SIGNAL", "BACKGROUND"))
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
+after bleaching.  Quaternary Science Reviews 7, 387-393.
+
+Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an
+improved single-aliquot regenerative-dose protocol. Radiation Measurements
+32, 57-73.
+}
+\seealso{
+\code{\link{calc_TLLxTxRatio}}, \code{\link{plot_GrowthCurve}},
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
+\code{\link{get_RLum}}
+}
+\keyword{datagen}
+\keyword{plot}
+
diff --git a/man/analyse_baSAR.Rd b/man/analyse_baSAR.Rd
new file mode 100644
index 0000000..7525f86
--- /dev/null
+++ b/man/analyse_baSAR.Rd
@@ -0,0 +1,386 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/analyse_baSAR.R
+\name{analyse_baSAR}
+\alias{analyse_baSAR}
+\title{Bayesian models (baSAR) applied on luminescence data}
+\usage{
+analyse_baSAR(object, XLS_file = NULL, aliquot_range = NULL,
+  source_doserate = NULL, signal.integral, signal.integral.Tx = NULL,
+  background.integral, background.integral.Tx = NULL, sigmab = 0,
+  sig0 = 0.025, distribution = "cauchy", baSAR_model = NULL,
+  n.MCMC = 1e+05, fit.method = "EXP", fit.force_through_origin = TRUE,
+  fit.includingRepeatedRegPoints = TRUE, method_control = list(),
+  digits = 3L, plot = TRUE, plot_reduced = TRUE, plot.single = FALSE,
+  verbose = TRUE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{Risoe.BINfileData}} or \code{\linkS4class{RLum.Results}} or
+\code{\link{character}} or \code{\link{list}} (\bold{required}):
+input object used for the Bayesian analysis. If a \code{character} is provided the function
+assumes a file connection and tries to import a BIN-file using the provided path. If a \code{list} is
+provided the list can only contain either \code{Risoe.BINfileData} objects or \code{character}s
+providing a file connection. Mixing of both types is not allowed. If an \code{\linkS4class{RLum.Results}}
+is provided the function directly starts with the Bayesian Analysis (see details)}
+
+\item{XLS_file}{\code{\link{character}} (optional): XLS_file with data for the analysis. This file must contain 3 columns: the name of the file, the disc position and the grain position (the last being 0 for multi-grain measurements)}
+
+\item{aliquot_range}{\code{\link{numeric}} (optional): allows to limit the range of the aliquots
+used for the analysis. This argument has only an effect if the argument \code{XLS_file} is used or
+the input is the previous output (i.e. is \code{\linkS4class{RLum.Results}}). In this case the
+new selection will add the aliquots to the removed aliquots table.}
+
+\item{source_doserate}{\code{\link{numeric}} \bold{(required)}: source dose rate of beta-source used
+for the measuremnt and its uncertainty in Gy/s, e.g., \code{source_doserate = c(0.12, 0.04)}.
+Paramater can be provided as \code{list}, for the case that more than one BIN-file is provided, e.g.,
+\code{source_doserate = list(c(0.04, 0.004), c(0.05, 0.004))}.}
+
+\item{signal.integral}{\code{\link{vector}} (\bold{required}): vector with the
+limits for the signal integral used for the calculation, e.g., \code{signal.integral = c(1:5)}
+Ignored if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+The parameter can be provided as \code{list}, \code{source_doserate}.}
+
+\item{signal.integral.Tx}{\code{\link{vector}} (optional): vector with the
+limits for the signal integral for the Tx curve. If nothing is provided the
+value from \code{signal.integral} is used and it is ignored
+if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+The parameter can be provided as \code{list}, see \code{source_doserate}.}
+
+\item{background.integral}{\code{\link{vector}} (\bold{required}): vector with the
+bounds for the background integral.
+Ignored if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+The parameter can be provided as \code{list}, see \code{source_doserate}.}
+
+\item{background.integral.Tx}{\code{\link{vector}} (optional): vector with the
+limits for the background integral for the Tx curve. If nothing is provided the
+value from \code{background.integral} is used.
+Ignored if \code{object} is an \code{\linkS4class{RLum.Results}} object.
+The parameter can be provided as \code{list}, see \code{source_doserate}.}
+
+\item{sigmab}{\code{\link{numeric}} (with default): option to set a manual value for
+the overdispersion (for LnTx and TnTx), used for the Lx/Tx error
+calculation. The value should be provided as absolute squared count values, cf. \code{\link{calc_OSLLxTxRatio}}.
+The parameter can be provided as \code{list}, see \code{source_doserate}.}
+
+\item{sig0}{\code{\link{numeric}} (with default): allow adding an extra component of error
+to the final Lx/Tx error value (e.g., instrumental errror, see details is \code{\link{calc_OSLLxTxRatio}}).
+The parameter can be provided as \code{list}, see \code{source_doserate}.}
+
+\item{distribution}{\code{\link{character}} (with default): type of distribution that is used during
+Bayesian calculations for determining the Central dose and overdispersion values.
+Allowed inputs are \code{"cauchy"}, \code{"normal"} and \code{"log_normal"}.}
+
+\item{baSAR_model}{\code{\link{character}} (optional): option to provide an own modified or new model for the
+Bayesian calculation (see details). If an own model is provided the argument \code{distribution} is ignored
+and set to \code{'user_defined'}}
+
+\item{n.MCMC}{\code{\link{integer}} (with default): number of iterations for the Markov chain Monte Carlo (MCMC)
+simulations}
+
+\item{fit.method}{\code{\link{character}} (with default): fit method used for fitting the growth
+curve using the function \code{\link{plot_GrowthCurve}}. Here supported methods: \code{EXP},
+\code{EXP+LIN} and \code{LIN}}
+
+\item{fit.force_through_origin}{\code{\link{logical}} (with default): force fitting through origin}
+
+\item{fit.includingRepeatedRegPoints}{\code{\link{logical}} (with default):
+includes the recycling point (assumed to be measured during the last cycle)}
+
+\item{method_control}{\code{\link{list}} (optional): named list of control parameters that can be directly
+passed to the Bayesian analysis, e.g., \code{method_control = list(n.chains = 4)}.
+See details for further information}
+
+\item{digits}{\code{\link{integer}} (with default): round output to the number of given digits}
+
+\item{plot}{\code{\link{logical}} (with default): enables or disables plot output}
+
+\item{plot_reduced}{\code{\link{logical}} (with default): enables or disables the advanced plot output}
+
+\item{plot.single}{\code{\link{logical}} (with default): enables or disables single plots or plots
+arranged by analyse_baSAR}
+
+\item{verbose}{\code{\link{logical}} (with default): enables or disables verbose mode}
+
+\item{...}{parameters that can be passed to the function \code{\link{calc_OSLLxTxRatio}} (almost full support)
+\code{\link[readxl]{read_excel}} (full support), \code{\link{read_BIN2R}} (\code{n.records},
+\code{position}, \code{duplicated.rm}), see details.}
+}
+\value{
+Function returns results numerically and graphically:\cr
+
+-----------------------------------\cr
+[ NUMERICAL OUTPUT ]\cr
+-----------------------------------\cr
+\bold{\code{RLum.Reuslts}}-object\cr
+
+\bold{slot:} \bold{\code{@data}}\cr
+\tabular{lll}{
+\bold{Element} \tab \bold{Type} \tab \bold{Description}\cr
+ \code{$summary} \tab \code{data.frame} \tab statistical summary, including the central dose \cr
+ \code{$mcmc} \tab \code{mcmc} \tab object including raw output of \code{\link[rjags]{rjags}} \cr
+ \code{$models} \tab \code{character} \tab implemented models used in the baSAR-model core \cr
+ \code{$input_object} \tab \code{data.frame} \tab summarising table (same format as the XLS-file) including, e.g., Lx/Tx values\cr
+ \code{$removed_aliquots} \tab \code{data.frame} \tab table with removed aliquots (e.g., NaN, or Inf Lx/Tx values). If nothing was removed \code{NULL} is returned
+}
+
+\bold{slot:} \bold{\code{@info}}\cr
+
+The original function call\cr
+
+------------------------\cr
+[ PLOT OUTPUT ]\cr
+------------------------\cr
+
+\itemize{
+ \item (A) Ln/Tn curves with set integration limits,
+ \item (B) trace plots are returned by the baSAR-model, showing the convergence of the parameters (trace)
+ and the resulting kernel density plots. If \code{plot_reduced = FALSE} for every(!) dose a trace and
+ a density plot is returned (this may take a long time),
+ \item (C) dose plots showing the dose for every aliquot as boxplots and the marked
+ HPD in within. If boxes are coloured 'orange' or 'red' the aliquot itself should be checked,
+ \item (D) the dose response curve resulting from the monitoring of the Bayesian modelling are
+ provided along with the Lx/Tx values and the HPD. Note: The amount for curves displayed
+ is limited to 1000 (random choice) for performance reasons,
+ \item (E) the final plot is the De distribution as calculated using the conventional approach
+ and the central dose with the HPDs marked within.
+
+}
+
+\bold{Please note: If distribution was set to \code{log_normal} the central dose is given
+as geometric mean!}
+}
+\description{
+This function allows the application of Bayesian models on luminescence data, measured
+with the single-aliquot regenerative-dose (SAR, Murray and Wintle, 2000) protocol. In particular,
+it follows the idea proposed by Combes et al., 2015 of using an hierarchical model for estimating
+a central equivalent dose from a set of luminescence measurements. This function is (I) the adaption
+of this approach for the R environment and (II) an extension and a technical refinement of the
+published code.\cr
+}
+\details{
+Internally the function consists of two parts: (I) The Bayesian core for the Bayesian calculations
+and applying the hierchical model and (II) a data pre-processing part. The Bayesian core can be run
+independently, if the input data are sufficient (see below). The data pre-processing part was
+implemented to simplify the analysis for the user as all needed data pre-processing is done
+by the function, i.e. in theory it is enough to provide a BIN/BINX-file with the SAR measurement
+data. For the Bayesian analysis for each aliquot the following information are needed from the SAR analysis.
+LxTx, the LxTx error and the dose values for all regeneration points.
+
+\bold{How the systematic error contribution is calculated?}\cr
+
+Standard errors (so far) provided with the source dose rate are considered as systematic uncertainties
+and added to final central dose by:
+
+\deqn{systematic.error = 1/n \sum SE(source.doserate)}
+
+\deqn{SE(central.dose.final) = \sqrt{SE(central.dose)^2 + systematic.error^2}}
+
+Please note that this approach is rather rough and can only be valid if the source dose rate
+errors, in case different readers had been used, are similar. In cases where more than
+one source dose rate is provided a warning is given.\cr
+
+\bold{Input / output scenarios}\cr
+
+Various inputs are allowed for this function. Unfortunately this makes the function handling rather
+complex, but at the same time very powerful. Available scenarios:\cr
+
+\bold{(1) - \code{object} is BIN-file or link to a BIN-file}
+
+Finally it does not matter how the information of the BIN/BINX file are provided. The function
+supports (a) either a path to a file or directory or a \code{list} of file names or paths or (b)
+a \code{\linkS4class{Risoe.BINfileData}} object or a list of these objects. The latter one can
+be produced by using the function \code{\link{read_BIN2R}}, but this function is called automatically
+if only a filename and/or a path is provided. In both cases it will become the data that can be
+used for the analysis.
+
+\code{[XLS_file = NULL]}\cr
+
+If no XLS file (or data frame with the same format) is provided the functions runs an automatic process that
+consists of the following steps:
+
+\itemize{
+ \item Select all valid aliquots using the function \code{\link{verify_SingleGrainData}}
+ \item Calculate Lx/Tx values using the function \code{\link{calc_OSLLxTxRatio}}
+ \item Calculate De values using the function \code{\link{plot_GrowthCurve}}
+}
+
+These proceeded data are subsequently used in for the Bayesian analysis
+
+\code{[XLS_file != NULL]}\cr
+
+If an XLS-file is provided or a \code{data.frame} providing similar information the pre-processing
+steps consists of the following steps:
+
+\itemize{
+ \item Calculate Lx/Tx values using the function \code{\link{calc_OSLLxTxRatio}}
+ \item Calculate De values using the function \code{\link{plot_GrowthCurve}}
+}
+
+Means, the XLS file should contain a selection of the BIN-file names and the aliquots selected
+for the further analysis. This allows a manual selection of input data, as the automatic selection
+by \code{\link{verify_SingleGrainData}} might be not totally sufficient.\cr
+
+\bold{(2) - \code{object} \code{RLum.Results object}}
+
+If an \code{\linkS4class{RLum.Results}} object is provided as input and(!) this object was
+previously created by the function \code{analyse_baSAR()} itself, the pre-processing part
+is skipped and the function starts directly the Bayesian analysis. This option is very powerful
+as it allows to change parameters for the Bayesian analysis without the need to repeat
+the data pre-processing. If furthermore the argument \code{aliquot_range} is set, aliquots
+can be manually excluded based on previous runs. \cr
+
+\bold{\code{method_control}}\cr
+
+These are arguments that can be passed directly to the Bayesian calculation core, supported arguments
+are:
+
+\tabular{lll}{
+\bold{Parameter} \tab \bold{Type} \tab \bold{Descritpion}\cr
+\code{lower_centralD} \tab \code{\link{numeric}} \tab sets the lower bound for the expected De range. Change it only if you know what you are doing!\cr
+\code{upper_centralD} \tab \code{\link{numeric}} \tab sets the upper bound for the expected De range. Change it only if you know what you are doing!\cr
+\code{n.chains} \tab \code{\link{integer}} \tab sets number of parallel chains for the model (default = 3)
+(cf. \code{\link[rjags]{jags.model}})\cr
+\code{inits} \tab \code{\link{list}} \tab option to set initialisation values (cf. \code{\link[rjags]{jags.model}}) \cr
+\code{thin} \tab \code{\link{numeric}} \tab thinning interval for monitoring the Bayesian process (cf. \code{\link[rjags]{jags.model}})\cr
+\code{variables.names} \tab \code{\link{character}} \tab set the variables to be monitored during the MCMC run, default:
+\code{'central_D'}, \code{'sigma_D'}, \code{'D'}, \code{'Q'}, \code{'a'}, \code{'b'}, \code{'c'}, \code{'g'}.
+Note: only variables present in the model can be monitored.
+}
+
+\bold{User defined models}\cr
+
+The function provides the option to modify and to define own models that can be used for
+the Bayesian calculation. In the case the user wants to modify a model, a new model
+can be piped into the funtion via the argument \code{baSAR_model} as \code{character}.
+The model has to be provided in the JAGS dialect of the BUGS language (cf. \code{\link[rjags]{jags.model}})
+and parameter names given with the pre-defined names have to be respected, otherwise the function
+will break.\cr
+
+\bold{FAQ}\cr
+
+Q: How can I set the seed for the random number generator (RNG)?\cr
+A: Use the argument \code{method_control}, e.g., for three MCMC chains
+(as it is the default):\cr
+\code{method_control = list(
+inits = list(
+ list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 1),
+ list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 2),
+ list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 3)
+))}\cr
+This sets a reproducible set for every chain separately.\cr
+
+Q: How can I modify the output plots?\cr
+A: You can't, but you can use the function output to create own, modified plots.\cr
+
+Q: Can I change the boundaries for the central_D?\cr
+A: Yes, we made it possible, but we DO NOT recommend it, except you know what you are doing!
+Example: \code{method_control = list(lower_centralD = 10))}\cr
+
+\bold{Additional arguments support via the \code{...} argument }\cr
+
+This list summarizes the additional arguments that can be passed to the internally used
+functions.
+
+\tabular{llll}{
+\bold{Supported argument} \tab \bold{Corresponding function} \tab \bold{Default} \tab \bold{Short description }\cr
+\code{threshold} \tab \code{\link{verify_SingleGrainData}} \tab \code{30} \tab change rejection threshold for curve selection \cr
+\code{sheet} \tab \code{\link[readxl]{read_excel}} \tab \code{1} \tab select XLS-sheet for import\cr
+\code{col_names} \tab \code{\link[readxl]{read_excel}} \tab \code{TRUE} \tab first row in XLS-file is header\cr
+\code{col_types} \tab \code{\link[readxl]{read_excel}} \tab \code{NULL} \tab limit import to specific columns\cr
+\code{skip} \tab \code{\link[readxl]{read_excel}} \tab \code{0} \tab number of rows to be skipped during import\cr
+\code{n.records} \tab \code{\link{read_BIN2R}} \tab \code{NULL} \tab limit records during BIN-file import\cr
+\code{duplicated.rm} \tab \code{\link{read_BIN2R}} \tab \code{TRUE} \tab remove duplicated records in the BIN-file\cr
+\code{pattern} \tab \code{\link{read_BIN2R}} \tab \code{TRUE} \tab select BIN-file by name pattern\cr
+\code{position} \tab \code{\link{read_BIN2R}} \tab \code{NULL} \tab limit import to a specific position\cr
+\code{background.count.distribution} \tab \code{\link{calc_OSLLxTxRatio}} \tab \code{"non-poisson"} \tab set assumed count distribution\cr
+\code{fit.weights} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables fit weights\cr
+\code{fit.bounds} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables fit bounds\cr
+\code{NumberIterations.MC} \tab \code{\link{plot_GrowthCurve}} \tab \code{100} \tab number of MC runs for error calculation\cr
+\code{output.plot} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables dose response curve plot\cr
+\code{output.plotExtended} \tab \code{\link{plot_GrowthCurve}} \tab \code{TRUE} \tab enables / disables extended dose response curve plot\cr
+}
+}
+\note{
+\bold{If you provide more than one BIN-file}, it is \bold{strongly} recommanded to provide
+a \code{list} with the same number of elements for the following parameters:\cr
+\code{source_doserate}, \code{signal.integral}, \code{signal.integral.Tx}, \code{background.integral},
+\code{background.integral.Tx}, \code{sigmab}, \code{sig0}.\cr
+
+Example for two BIN-files: \code{source_doserate = list(c(0.04, 0.006), c(0.05, 0.006))}\cr
+
+\bold{The function is currently limited to work with standard Risoe BIN-files only!}
+}
+\section{Function version}{
+ 0.1.25 (2016-09-09 10:32:17)
+}
+\examples{
+
+##(1) load package test data set
+data(ExampleData.BINfileData, envir = environment())
+
+##(2) selecting relevant curves, and limit dataset
+CWOSL.SAR.Data <- subset(
+ CWOSL.SAR.Data,
+ subset = POSITION\%in\%c(1:3) & LTYPE == "OSL")
+
+\dontrun{
+##(3) run analysis
+##please not that the here selected parameters are
+##choosen for performance, not for reliability
+results <- analyse_baSAR(
+ object = CWOSL.SAR.Data,
+ source_doserate = c(0.04, 0.001),
+ signal.integral = c(1:2),
+ background.integral = c(80:100),
+ fit.method = "LIN",
+ plot = FALSE,
+ n.MCMC = 200
+
+)
+
+print(results)
+
+
+##XLS_file template
+##copy and paste this the code below in the terminal
+##you can further use the function write.csv() to export the example
+
+XLS_file <-
+structure(
+list(
+ BIN_FILE = NA_character_,
+ DISC = NA_real_,
+ GRAIN = NA_real_),
+   .Names = c("BIN_FILE", "DISC", "GRAIN"),
+   class = "data.frame",
+   row.names = 1L
+)
+
+}
+
+}
+\author{
+Norbert Mercier, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne (France) \cr
+
+The underlying Bayesian model based on a contribution by Combes et al., 2015.
+\cr R Luminescence Package Team}
+\references{
+Combes, B., Philippe, A., Lanos, P., Mercier, N., Tribolo, C., Guerin, G., Guibert, P., Lahaye, C., 2015.
+A Bayesian central equivalent dose model for optically stimulated luminescence dating.
+Quaternary Geochronology 28, 62-70. doi:10.1016/j.quageo.2015.04.001
+
+\bold{Further reading}
+
+Gelman, A., Carlin, J.B., Stern, H.S., Dunson, D.B., Vehtari, A., Rubin, D.B., 2013.
+Bayesian Data Analysis, Third Edition. CRC Press.
+
+Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot
+regenerative-dose protocol. Radiation Measurements 32, 57-73. doi:10.1016/S1350-4487(99)00253-X
+}
+\seealso{
+\code{\link{read_BIN2R}}, \code{\link{calc_OSLLxTxRatio}}, \code{\link{plot_GrowthCurve}},
+\code{\link[readxl]{read_excel}}, \code{\link{verify_SingleGrainData}},
+\code{\link[rjags]{jags.model}}, \code{\link[rjags]{coda.samples}}, \code{\link{boxplot.default}}
+}
+\keyword{datagen}
+
diff --git a/man/analyse_pIRIRSequence.Rd b/man/analyse_pIRIRSequence.Rd
new file mode 100644
index 0000000..06ef96e
--- /dev/null
+++ b/man/analyse_pIRIRSequence.Rd
@@ -0,0 +1,173 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/analyse_pIRIRSequence.R
+\name{analyse_pIRIRSequence}
+\alias{analyse_pIRIRSequence}
+\title{Analyse post-IR IRSL sequences}
+\usage{
+analyse_pIRIRSequence(object, signal.integral.min, signal.integral.max,
+  background.integral.min, background.integral.max, dose.points = NULL,
+  sequence.structure = c("TL", "IR50", "pIRIR225"), plot = TRUE,
+  plot.single = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} (\bold{required}) or \code{\link{list}} of
+\code{\linkS4class{RLum.Analysis}} objects: input object containing data for analysis. If a \code{\link{list}}
+is provided the functions tries to iteratre over the list.}
+
+\item{signal.integral.min}{\code{\link{integer}} (\bold{required}): lower
+bound of the signal integral. Provide this value as vector for different
+integration limits for the different IRSL curves.}
+
+\item{signal.integral.max}{\code{\link{integer}} (\bold{required}): upper
+bound of the signal integral. Provide this value as vector for different
+integration limits for the different IRSL curves.}
+
+\item{background.integral.min}{\code{\link{integer}} (\bold{required}):
+lower bound of the background integral. Provide this value as vector for
+different integration limits for the different IRSL curves.}
+
+\item{background.integral.max}{\code{\link{integer}} (\bold{required}):
+upper bound of the background integral. Provide this value as vector for
+different integration limits for the different IRSL curves.}
+
+\item{dose.points}{\code{\link{numeric}} (optional): a numeric vector
+containing the dose points values. Using this argument overwrites dose point
+values in the signal curves.}
+
+\item{sequence.structure}{\link{vector} \link{character} (with default):
+specifies the general sequence structure. Allowed values are \code{"TL"} and
+any \code{"IR"} combination (e.g., \code{"IR50"},\code{"pIRIR225"}).
+Additionally a parameter \code{"EXCLUDE"} is allowed to exclude curves from
+the analysis (Note: If a preheat without PMT measurement is used, i.e.
+preheat as non TL, remove the TL step.)}
+
+\item{plot}{\code{\link{logical}} (with default): enables or disables plot
+output.}
+
+\item{plot.single}{\code{\link{logical}} (with default): single plot output
+(\code{TRUE/FALSE}) to allow for plotting the results in single plot
+windows. Requires \code{plot = TRUE}.}
+
+\item{\dots}{further arguments that will be passed to the function
+\code{\link{analyse_SAR.CWOSL}} and \code{\link{plot_GrowthCurve}}}
+}
+\value{
+Plots (optional) and an \code{\linkS4class{RLum.Results}} object is
+returned containing the following elements:
+
+\tabular{lll}{
+\bold{DATA.OBJECT} \tab \bold{TYPE} \tab \bold{DESCRIPTION} \cr
+\code{..$data} : \tab  \code{data.frame} \tab Table with De values \cr
+\code{..$LnLxTnTx.table} : \tab \code{data.frame} \tab with the LnLxTnTx values \cr
+\code{..$rejection.criteria} : \tab \code{\link{data.frame}} \tab rejection criteria \cr
+\code{..$Formula} : \tab \code{\link{list}} \tab Function used for fitting of the dose response curve \cr
+\code{..$call} : \tab \code{\link{call}} \tab the original function call
+}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}.
+}
+\description{
+The function performs an analysis of post-IR IRSL sequences including curve
+fitting on \code{\linkS4class{RLum.Analysis}} objects.
+}
+\details{
+To allow post-IR IRSL protocol (Thomsen et al., 2008) measurement analyses
+this function has been written as extended wrapper function for the function
+\code{\link{analyse_SAR.CWOSL}}, facilitating an entire sequence analysis in
+one run. With this, its functionality is strictly limited by the
+functionality of the function \code{\link{analyse_SAR.CWOSL}}.\cr
+
+\bold{If the input is a \code{list}}\cr
+
+If the input is a list of RLum.Analysis-objects, every argument can be provided as list to allow
+for different sets of parameters for every single input element.
+For further information see \code{\link{analyse_SAR.CWOSL}}.
+}
+\note{
+Best graphical output can be achieved by using the function \code{pdf}
+with the following options:\cr \code{pdf(file = "...", height = 15, width =
+15)}
+}
+\section{Function version}{
+ 0.2.2 (2016-09-07 11:37:34)
+}
+\examples{
+
+
+### NOTE: For this example existing example data are used. These data are non pIRIR data.
+###
+##(1) Compile example data set based on existing example data (SAR quartz measurement)
+##(a) Load example data
+data(ExampleData.BINfileData, envir = environment())
+
+##(b) Transform the values from the first position in a RLum.Analysis object
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+
+##(c) Grep curves and exclude the last two (one TL and one IRSL)
+object <- get_RLum(object, record.id = c(-29,-30))
+
+##(d) Define new sequence structure and set new RLum.Analysis object
+sequence.structure  <- c(1,2,2,3,4,4)
+sequence.structure <- as.vector(sapply(seq(0,length(object)-1,by = 4),
+                                       function(x){sequence.structure + x}))
+
+object <-  sapply(1:length(sequence.structure), function(x){
+
+  object[[sequence.structure[x]]]
+
+})
+
+object <- set_RLum(class = "RLum.Analysis", records = object, protocol = "pIRIR")
+
+##(2) Perform pIRIR analysis (for this example with quartz OSL data!)
+## Note: output as single plots to avoid problems with this example
+results <- analyse_pIRIRSequence(object,
+     signal.integral.min = 1,
+     signal.integral.max = 2,
+     background.integral.min = 900,
+     background.integral.max = 1000,
+     fit.method = "EXP",
+     sequence.structure = c("TL", "pseudoIRSL1", "pseudoIRSL2"),
+     main = "Pseudo pIRIR data set based on quartz OSL",
+     plot.single = TRUE)
+
+
+##(3) Perform pIRIR analysis (for this example with quartz OSL data!)
+## Alternative for PDF output, uncomment and complete for usage
+\dontrun{
+pdf(file = "...", height = 15, width = 15)
+  results <- analyse_pIRIRSequence(object,
+         signal.integral.min = 1,
+         signal.integral.max = 2,
+         background.integral.min = 900,
+         background.integral.max = 1000,
+         fit.method = "EXP",
+         main = "Pseudo pIRIR data set based on quartz OSL")
+
+  dev.off()
+}
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz
+using an improved single-aliquot regenerative-dose protocol. Radiation
+Measurements 32, 57-73. doi:10.1016/S1350-4487(99)00253-X
+
+Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory
+fading rates of various luminescence signals from feldspar-rich sediment
+extracts. Radiation Measurements 43, 1474-1486.
+doi:10.1016/j.radmeas.2008.06.002
+}
+\seealso{
+\code{\link{analyse_SAR.CWOSL}}, \code{\link{calc_OSLLxTxRatio}},
+\code{\link{plot_GrowthCurve}}, \code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}} \code{\link{get_RLum}}
+}
+\keyword{datagen}
+\keyword{plot}
+
diff --git a/man/app_RLum.Rd b/man/app_RLum.Rd
new file mode 100644
index 0000000..3057d91
--- /dev/null
+++ b/man/app_RLum.Rd
@@ -0,0 +1,26 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/app_RLum.R
+\name{app_RLum}
+\alias{app_RLum}
+\title{Run Luminescence shiny apps (wrapper)}
+\usage{
+app_RLum(app, ...)
+}
+\arguments{
+\item{app}{\code{\link{character}} (required): name of the application to start. See details for a list
+of available apps.}
+
+\item{...}{further arguments to pass to \code{\link[shiny]{runApp}}}
+}
+\description{
+Wrapper for the function \code{\link[RLumShiny]{app_RLum}} from the package
+\link[RLumShiny]{RLumShiny-package}. For further details and examples please
+see the manual of this package.
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:36:06)
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+
diff --git a/man/apply_CosmicRayRemoval.Rd b/man/apply_CosmicRayRemoval.Rd
new file mode 100644
index 0000000..4af0297
--- /dev/null
+++ b/man/apply_CosmicRayRemoval.Rd
@@ -0,0 +1,107 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/apply_CosmicRayRemoval.R
+\name{apply_CosmicRayRemoval}
+\alias{apply_CosmicRayRemoval}
+\title{Function to remove cosmic rays from an RLum.Data.Spectrum S4 class object}
+\usage{
+apply_CosmicRayRemoval(object, method = "smooth", method.Pych.smoothing = 2,
+  method.Pych.threshold_factor = 3, MARGIN = 2, verbose = FALSE,
+  plot = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Data.Spectrum}} (\bold{required}): S4
+object of class \code{RLum.Data.Spectrum}}
+
+\item{method}{\code{\link{character}} (with default): Defines method that is
+applied for cosmic ray removal. Allowed methods are \code{smooth}, the default,
+(\code{\link{smooth}}), \code{smooth.spline} (\code{\link{smooth.spline}})
+and \code{Pych}. See details for further information.}
+
+\item{method.Pych.smoothing}{\code{\link{integer}} (with default): Smoothing
+parameter for cosmic ray removal according to Pych (2003). The value defines
+how many neighboring values in each frame are used for smoothing (e.g.,
+\code{2} means that the two previous and two following values are used).}
+
+\item{method.Pych.threshold_factor}{\code{\link{numeric}} (with default): Threshold
+for zero-bins in the histogram. Small values mean that more peaks are removed, but signal
+might be also affected by this removal.}
+
+\item{MARGIN}{\code{\link{integer}} (with default): on which part the function cosmic ray removal
+should be applied on: 1 = along the time axis (line by line), 2 = along the wavelength axis (column by
+column). Note: This argument currently only affects the methods \code{smooth} and \code{smooth.spline}}
+
+\item{verbose}{\code{\link{logical}} (with default): Option to suppress
+terminal output.,}
+
+\item{plot}{\code{\link{logical}} (with default): If \code{TRUE} the
+histograms used for the cosmic-ray removal are returned as plot including
+the used threshold. Note: A separat plot is returned for each frame!
+Currently only for \code{method = "Pych"} a graphical output is provided.}
+
+\item{\dots}{further arguments and graphical parameters that will be passed
+to the \code{smooth} function.}
+}
+\value{
+Returns same object as input
+(\code{\linkS4class{RLum.Data.Spectrum}})
+}
+\description{
+The function provides several methods for cosmic ray removal and spectrum
+smoothing for an RLum.Data.Spectrum S4 class object
+}
+\details{
+\bold{\code{method = "Pych"}} \cr
+
+This method applies the cosmic-ray removal algorithm described by Pych
+(2003). Some aspects that are different to the publication: \itemize{
+\item For interpolation between neighbouring values the median and not the
+mean is used. \item The number of breaks to construct the histogram is set
+to: \code{length(number.of.input.values)/2} } For further details see
+references below.
+
+\bold{\code{method = "smooth"}} \cr
+
+Method uses the function \code{\link{smooth}} to remove cosmic rays.\cr
+
+Arguments that can be passed are: \code{kind}, \code{twiceit}\cr
+
+\bold{\code{method = "smooth.spline"}} \cr Method uses the function
+\code{\link{smooth.spline}} to remove cosmic rays.\cr Arguments that can be
+passed are: \code{spar}\cr
+
+\bold{How to combine methods?}\cr
+
+Different methods can be combined by applying the method repeatedly to the
+dataset (see example).
+}
+\note{
+-
+}
+\section{Function version}{
+ 0.2.1 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##(1) - use with your own data and combine (uncomment for usage)
+## run two times the default method and smooth with another method
+## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych")
+## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych")
+## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "smooth")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Pych, W., 2003. A Fast Algorithm for Cosmic-Ray Removal from
+Single Images. Astrophysics 116, 148-153.
+\url{http://arxiv.org/pdf/astro-ph/0311290.pdf?origin=publication_detail}
+}
+\seealso{
+\code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{smooth}},
+\code{\link{smooth.spline}}, \code{\link{apply_CosmicRayRemoval}}
+}
+\keyword{manip}
+
diff --git a/man/apply_EfficiencyCorrection.Rd b/man/apply_EfficiencyCorrection.Rd
new file mode 100644
index 0000000..2ad18a9
--- /dev/null
+++ b/man/apply_EfficiencyCorrection.Rd
@@ -0,0 +1,63 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/apply_EfficiencyCorrection.R
+\name{apply_EfficiencyCorrection}
+\alias{apply_EfficiencyCorrection}
+\title{Function to apply spectral efficiency correction to RLum.Data.Spectrum S4
+class objects}
+\usage{
+apply_EfficiencyCorrection(object, spectral.efficiency)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Data.Spectrum}} (\bold{required}): S4
+object of class \code{RLum.Data.Spectrum}}
+
+\item{spectral.efficiency}{\code{\link{data.frame}} (\bold{required}): Data
+set containing wavelengths (x-column) and relative spectral response values
+(y-column) in percentage}
+}
+\value{
+Returns same object as input
+(\code{\linkS4class{RLum.Data.Spectrum}})
+}
+\description{
+The function allows spectral efficiency corrections for RLum.Data.Spectrum
+S4 class objects
+}
+\details{
+The efficiency correction is based on a spectral response dataset provided
+by the user. Usually the data set for the quantum efficiency is of lower
+resolution and values are interpolated for the required spectral resolution using
+the function \code{\link[stats]{approx}}
+
+If the energy calibration differes for both data set \code{NA} values are produces that
+will be removed from the matrix.
+}
+\note{
+Please note that the spectral efficiency data from the camera alone may not
+sufficiently correct for spectral efficiency of the entire optical system
+(e.g., spectrometer, camera ...).
+}
+\section{Function version}{
+ 0.1.1 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##(1) - use with your own data (uncomment for usage)
+## spectral.efficiency <- read.csv("your data")
+##
+## your.spectrum <- apply_EfficiencyCorrection(your.spectrum, )
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France),\cr Johannes Friedrich, University of Bayreuth (Germany)
+\cr R Luminescence Package Team}
+\references{
+-
+}
+\seealso{
+\code{\linkS4class{RLum.Data.Spectrum}}
+}
+\keyword{manip}
+
diff --git a/man/as.Rd b/man/as.Rd
new file mode 100644
index 0000000..9a96772
--- /dev/null
+++ b/man/as.Rd
@@ -0,0 +1,77 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/RLum.Analysis-class.R, R/RLum.Data.Curve-class.R, R/RLum.Data.Image-class.R, R/RLum.Data.Spectrum-class.R, R/RLum.Results-class.R
+\name{as}
+\alias{as}
+\title{as() - RLum-object coercion}
+\arguments{
+\item{from}{\code{\linkS4class{RLum}} or \code{\link{list}}, \code{\link{data.frame}}, \code{\link{matrix}}
+(\bold{required}): object to be coerced from}
+
+\item{to}{\code{\link{character}} (\bold{required}): class name to be coerced to}
+}
+\description{
+for \code{[RLum.Analysis]}
+
+for \code{[RLum.Data.Curve]}
+
+for \code{[RLum.Data.Image]}
+
+for \code{[RLum.Data.Spectrum]}
+
+for \code{[RLum.Results]}
+}
+\details{
+\bold{[RLum.Analysis]}\cr
+
+\tabular{ll}{
+ \bold{from} \tab \bold{to}\cr
+  \code{list} \tab \code{list}\cr
+}
+
+Given that the \code{\link{list}} consits of \code{\linkS4class{RLum.Analysis}} objects.
+
+\bold{[RLum.Data.Curve]}\cr
+
+\tabular{ll}{
+ \bold{from} \tab \bold{to}\cr
+  \code{list} \tab \code{list} \cr
+  \code{data.frame} \tab \code{data.frame}\cr
+  \code{matrix} \tab \code{matrix}
+
+}
+
+\bold{[RLum.Data.Image]}\cr
+
+\tabular{ll}{
+ \bold{from} \tab \bold{to}\cr
+  \code{data.frame} \tab \code{data.frame}\cr
+  \code{matrix} \tab \code{matrix}
+
+}
+
+\bold{[RLum.Data.Spectrum]}\cr
+
+\tabular{ll}{
+ \bold{from} \tab \bold{to}\cr
+  \code{data.frame} \tab \code{data.frame}\cr
+  \code{matrix} \tab \code{matrix}
+
+}
+
+\bold{[RLum.Results]}\cr
+
+\tabular{ll}{
+ \bold{from} \tab \bold{to}\cr
+  \code{list} \tab \code{list}\cr
+}
+
+Given that the \code{\link{list}} consits of \code{\linkS4class{RLum.Results}} objects.
+}
+\note{
+Due to the complex structure of the \code{RLum} objects itself a coercing to standard
+R data structures will be always loosely!
+}
+\seealso{
+\code{\link[methods]{as}}
+}
+
diff --git a/man/bin_RLum.Data.Rd b/man/bin_RLum.Data.Rd
new file mode 100644
index 0000000..dce5e12
--- /dev/null
+++ b/man/bin_RLum.Data.Rd
@@ -0,0 +1,60 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/bin_RLum.Data.R
+\name{bin_RLum.Data}
+\alias{bin_RLum.Data}
+\title{Channel binning - method dispatchter}
+\usage{
+bin_RLum.Data(object, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Data}} (\bold{required}): S4 object of
+class \code{RLum.Data}}
+
+\item{...}{further arguments passed to the specifc class method}
+}
+\value{
+An object of the same type as the input object is provided
+}
+\description{
+Function calls the object-specific bin functions for RLum.Data S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{RLum.Data}} objects.\cr Depending on the input object, the
+corresponding function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{RLum.Data}} class.
+}
+\note{
+Currenlty only \code{RLum.Data} objects of class \code{RLum.Data.Curve} are supported!
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:36:06)
+}
+\examples{
+
+##load example data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+##create RLum.Data.Curve object from this example
+curve <-
+  set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = "OSL",
+      data = as.matrix(ExampleData.CW_OSL_Curve)
+  )
+
+##plot data without and with 2 and 4 channel binning
+plot_RLum(curve)
+plot_RLum(bin_RLum.Data(curve, bin_size = 2))
+plot_RLum(bin_RLum.Data(curve, bin_size = 4))
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}}
+}
+\keyword{utilities}
+
diff --git a/man/calc_AliquotSize.Rd b/man/calc_AliquotSize.Rd
new file mode 100644
index 0000000..8f73445
--- /dev/null
+++ b/man/calc_AliquotSize.Rd
@@ -0,0 +1,132 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_AliquotSize.R
+\name{calc_AliquotSize}
+\alias{calc_AliquotSize}
+\title{Estimate the amount of grains on an aliquot}
+\usage{
+calc_AliquotSize(grain.size, sample.diameter, packing.density = 0.65,
+  MC = TRUE, grains.counted, plot = TRUE, ...)
+}
+\arguments{
+\item{grain.size}{\code{\link{numeric}} (\bold{required}): mean grain size
+(microns) or a range of grain sizes from which the mean grain size is
+computed (e.g. \code{c(100,200)}).}
+
+\item{sample.diameter}{\code{\link{numeric}} (\bold{required}): diameter
+(mm) of the targeted area on the sample carrier.}
+
+\item{packing.density}{\code{\link{numeric}} (with default) empirical value
+for mean packing density. \cr If \code{packing.density = "inf"} a hexagonal
+structure on an infinite plane with a packing density of \eqn{0.906\ldots}
+is assumed.}
+
+\item{MC}{\code{\link{logical}} (optional): if \code{TRUE} the function
+performs a monte carlo simulation for estimating the amount of grains on the
+sample carrier and assumes random errors in grain size distribution and
+packing density. Requires a vector with min and max grain size for
+\code{grain.size}. For more information see details.}
+
+\item{grains.counted}{\code{\link{numeric}} (optional) grains counted on a
+sample carrier. If a non-zero positive integer is provided this function
+will calculate the packing density of the aliquot. If more than one value is
+provided the mean packing density and its standard deviation is calculated.
+Note that this overrides \code{packing.density}.}
+
+\item{plot}{\code{\link{logical}} (with default): plot output
+(\code{TRUE}/\code{FALSE})}
+
+\item{\dots}{further arguments to pass (\code{main, xlab, MC.iter}).}
+}
+\value{
+Returns a terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following element:
+
+\item{summary}{\link{data.frame} summary of all relevant calculation
+results.} \item{args}{\link{list} used arguments} \item{call}{\link{call}
+the function call} \item{MC}{\link{list} results of the Monte Carlo
+simulation}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+Estimate the number of grains on an aliquot. Alternatively, the packing
+density of an aliquot is computed.
+}
+\details{
+This function can be used to either estimate the number of grains on an
+aliquot or to compute the packing density depending on the the arguments
+provided. \cr The following function is used to estimate the number of
+grains \code{n}: \cr \deqn{n = (\pi*x^2)/(\pi*y^2)*d} where \code{x} is the
+radius of the aliquot size (microns), \code{y} is the mean radius of the
+mineral grains (mm) and \code{d} is the packing density (value between 0 and
+1). \cr
+
+\bold{Packing density} \cr\cr The default value for \code{packing.density}
+is 0.65, which is the mean of empirical values determined by Heer et al.
+(2012) and unpublished data from the Cologne luminescence laboratory. If
+\code{packing.density = "inf"} a maximum density of \eqn{\pi/\sqrt12 =
+0.9068\ldots} is used. However, note that this value is not appropriate as
+the standard preparation procedure of aliquots resembles a PECC ("Packing
+Equal Circles in a Circle") problem where the maximum packing density is
+asymptotic to about 0.87. \cr
+
+\bold{Monte Carlo simulation} \cr\cr The number of grains on an aliquot can
+be estimated by Monte Carlo simulation when setting \code{MC = TRUE}. Each
+of the parameters necessary to calculate \code{n} (\code{x}, \code{y},
+\code{d}) are assumed to be normally distributed with means \eqn{\mu_x,
+\mu_y, \mu_d} and standard deviations \eqn{\sigma_x, \sigma_y, \sigma_d}.
+\cr\cr For the mean grain size random samples are taken first from
+\eqn{N(\mu_y, \sigma_y)}, where \eqn{\mu_y = mean.grain.size} and
+\eqn{\sigma_y = (max.grain.size-min.grain.size)/4} so that 95\% of all
+grains are within the provided the grain size range. This effectively takes
+into account that after sieving the sample there is still a small chance of
+having grains smaller or larger than the used mesh sizes. For each random
+sample the mean grain size is calculated, from which random subsamples are
+drawn for the Monte Carlo simulation. \cr\cr The packing density is assumed
+to be normally distributed with an empirically determined \eqn{\mu = 0.65}
+(or provided value) and \eqn{\sigma = 0.18}. The normal distribution is
+truncated at \code{d = 0.87} as this is approximately the maximum packing
+density that can be achieved in PECC problem. \cr\cr The sample diameter has
+\eqn{\mu = sample.diameter} and \eqn{\sigma = 0.2} to take into account
+variations in sample disc preparation (i.e. applying silicon spray to the
+disc). A lower truncation point at \code{x = 0.5} is used, which assumes
+that aliqouts with smaller sample diameters of 0.5 mm are discarded.
+Likewise, the normal distribution is truncated at 9.8 mm, which is the
+diameter of the sample disc. \cr\cr For each random sample drawn from the
+normal distributions the amount of grains on the aliquot is calculated. By
+default, \code{10^5} iterations are used, but can be reduced/increased with
+\code{MC.iter} (see \code{...}). The results are visualised in a bar- and
+boxplot together with a statistical summary.
+}
+\section{Function version}{
+ 0.31 (2016-05-16 22:20:28)
+}
+\examples{
+
+## Estimate the amount of grains on a small aliquot
+calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, MC.iter = 100)
+
+## Calculate the mean packing density of large aliquots
+calc_AliquotSize(grain.size = c(100,200), sample.diameter = 8,
+                 grains.counted = c(2525,2312,2880), MC.iter = 100)
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+\references{
+Duller, G.A.T., 2008. Single-grain optical dating of Quaternary
+sediments: why aliquot size matters in luminescence dating. Boreas 37,
+589-612.  \cr\cr Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains
+are there on a single aliquot?. Ancient TL 30, 9-16. \cr\cr \bold{Further
+reading} \cr\cr Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's
+Theorem on Circle Packing. \url{http://arxiv.org/pdf/1009.4322v1.pdf},
+2013-09-13. \cr\cr Graham, R.L., Lubachevsky, B.D., Nurmela, K.J.,
+Oestergard, P.R.J., 1998.  Dense packings of congruent circles in a circle.
+Discrete Mathematics 181, 139-154. \cr\cr Huang, W., Ye, T., 2011. Global
+optimization method for finding dense packings of equal circles in a circle.
+European Journal of Operational Research 210, 474-481.
+}
+
diff --git a/man/calc_CentralDose.Rd b/man/calc_CentralDose.Rd
new file mode 100644
index 0000000..cd6672c
--- /dev/null
+++ b/man/calc_CentralDose.Rd
@@ -0,0 +1,100 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_CentralDose.R
+\name{calc_CentralDose}
+\alias{calc_CentralDose}
+\title{Apply the central age model (CAM) after Galbraith et al. (1999) to a given
+De distribution}
+\usage{
+calc_CentralDose(data, sigmab, log = TRUE, plot = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{sigmab}{\code{\link{numeric}} (with default): spread in De values
+given as a fraction (e.g. 0.2). This value represents the expected
+overdispersion in the data should the sample be well-bleached (Cunningham &
+Walling 2012, p. 100).}
+
+\item{log}{\code{\link{logical}} (with default): fit the (un-)logged central
+age model to De data}
+
+\item{plot}{\code{\link{logical}} (with default): plot output}
+
+\item{\dots}{further arguments (\code{trace, verbose}).}
+}
+\value{
+Returns a plot (optional) and terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following element:
+
+\item{summary}{\link{data.frame} summary of all relevant model results.}
+\item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+used arguments} \item{call}{\link{call} the function call}
+\item{profile}{\link{data.frame} the log likelihood profile for sigma}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+This function calculates the central dose and dispersion of the De
+distribution, their standard errors and the profile log likelihood function
+for sigma.
+}
+\details{
+This function uses the equations of Galbraith & Roberts (2012). The
+parameters \code{delta} and \code{sigma} are estimated by numerically solving
+eq. 15 and 16. Their standard errors are approximated using eq. 17.
+In addition, the profile log-likelihood function for \code{sigma} is
+calculated using eq. 18 and presented as a plot. Numerical values of the 
+maximum likelihood approach are \bold{only} presented in the plot and \bold{not}
+in the console. A detailed explanation on maximum likelihood estimation can be found in the
+appendix of Galbraith & Laslett (1993, 468-470) and Galbraith & Roberts
+(2012, 15)
+}
+\section{Function version}{
+ 1.3.1 (2016-05-02 09:36:06)
+}
+\examples{
+
+##load example data
+data(ExampleData.DeValues, envir = environment())
+
+##apply the central dose model
+calc_CentralDose(ExampleData.DeValues$CA1)
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2010 \cr
+\cr R Luminescence Package Team}
+\references{
+Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for
+mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470.
+\cr \cr Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley,
+J.M., 1999. Optical dating of single grains of quartz from Jinmium rock
+shelter, northern Australia. Part I: experimental design and statistical
+models.  Archaeometry 41, 339-364. \cr \cr Galbraith, R.F. & Roberts, R.G.,
+2012. Statistical aspects of equivalent dose and error calculation and
+display in OSL dating: An overview and some recommendations. Quaternary
+Geochronology 11, 1-27. \cr \cr \bold{Further reading} \cr \cr Arnold, L.J.
+& Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+(De) distributions: Implications for OSL dating of sediment mixtures.
+Quaternary Geochronology 4, 204-230. \cr \cr Bailey, R.M. & Arnold, L.J.,
+2006. Statistical modelling of single grain quartz De distributions and an
+assessment of procedures for estimating burial dose. Quaternary Science
+Reviews 25, 2475-2502. \cr \cr Cunningham, A.C. & Wallinga, J., 2012.
+Realizing the potential of fluvial archives using robust OSL chronologies.
+Quaternary Geochronology 12, 98-106. \cr \cr Rodnight, H., Duller, G.A.T.,
+Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+of optical dating of fluvial deposits.  Quaternary Geochronology, 1 109-120.
+\cr \cr Rodnight, H., 2008. How many equivalent dose values are needed to
+obtain a reproducible distribution?. Ancient TL 26, 3-10.
+}
+\seealso{
+\code{\link{plot}}, \code{\link{calc_CommonDose}},
+\code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
+\code{\link{calc_MinDose}}
+}
+
diff --git a/man/calc_CommonDose.Rd b/man/calc_CommonDose.Rd
new file mode 100644
index 0000000..fccae27
--- /dev/null
+++ b/man/calc_CommonDose.Rd
@@ -0,0 +1,93 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_CommonDose.R
+\name{calc_CommonDose}
+\alias{calc_CommonDose}
+\title{Apply the (un-)logged common age model after Galbraith et al. (1999) to a
+given De distribution}
+\usage{
+calc_CommonDose(data, sigmab, log = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{sigmab}{\code{\link{numeric}} (with default): spread in De values
+given as a fraction (e.g. 0.2). This value represents the expected
+overdispersion in the data should the sample be well-bleached (Cunningham &
+Walling 2012, p. 100).}
+
+\item{log}{\code{\link{logical}} (with default): fit the (un-)logged common
+age model to De data}
+
+\item{\dots}{currently not used.}
+}
+\value{
+Returns a terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following element:
+
+\item{summary}{\link{data.frame} summary of all relevant model results.}
+\item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+used arguments} \item{call}{\link{call} the function call}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+Function to calculate the common dose of a De distribution.
+}
+\details{
+\bold{(Un-)logged model} \cr\cr When \code{log = TRUE} this function
+calculates the weighted mean of logarithmic De values. Each of the estimates
+is weighted by the inverse square of its relative standard error. The
+weighted mean is then transformed back to the dose scale (Galbraith &
+Roberts 2012, p. 14).\cr\cr The log transformation is not applicable if the
+De estimates are close to zero or negative. In this case the un-logged model
+can be applied instead (\code{log = FALSE}). The weighted mean is then
+calculated using the un-logged estimates of De and their absolute standard
+error (Galbraith & Roberts 2012, p. 14).
+}
+\section{Function version}{
+ 0.1 (2016-05-02 09:36:06)
+}
+\examples{
+
+## load example data
+data(ExampleData.DeValues, envir = environment())
+
+## apply the common dose model
+calc_CommonDose(ExampleData.DeValues$CA1)
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+\references{
+Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for
+mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470.
+\cr\cr Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley,
+J.M., 1999. Optical dating of single grains of quartz from Jinmium rock
+shelter, northern Australia. Part I: experimental design and statistical
+models.  Archaeometry 41, 339-364. \cr\cr Galbraith, R.F. & Roberts, R.G.,
+2012. Statistical aspects of equivalent dose and error calculation and
+display in OSL dating: An overview and some recommendations. Quaternary
+Geochronology 11, 1-27. \cr\cr \bold{Further reading} \cr\cr Arnold, L.J. &
+Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+(De) distributions: Implications for OSL dating of sediment mixtures.
+Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold, L.J.,
+2006. Statistical modelling of single grain quartz De distributions and an
+assessment of procedures for estimating burial dose. Quaternary Science
+Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
+Realizing the potential of fluvial archives using robust OSL chronologies.
+Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
+Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+of optical dating of fluvial deposits. Quaternary Geochronology 1,
+109-120.\cr\cr Rodnight, H., 2008. How many equivalent dose values are
+needed to obtain a reproducible distribution?. Ancient TL 26, 3-10.
+}
+\seealso{
+\code{\link{calc_CentralDose}}, \code{\link{calc_FiniteMixture}},
+\code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
+}
+
diff --git a/man/calc_CosmicDoseRate.Rd b/man/calc_CosmicDoseRate.Rd
new file mode 100644
index 0000000..473aa06
--- /dev/null
+++ b/man/calc_CosmicDoseRate.Rd
@@ -0,0 +1,233 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_CosmicDoseRate.R
+\name{calc_CosmicDoseRate}
+\alias{calc_CosmicDoseRate}
+\title{Calculate the cosmic dose rate}
+\usage{
+calc_CosmicDoseRate(depth, density, latitude, longitude, altitude,
+  corr.fieldChanges = FALSE, est.age = NA, half.depth = FALSE,
+  error = 10)
+}
+\arguments{
+\item{depth}{\code{\link{numeric}} (\bold{required}): depth of overburden
+(m).  For more than one absorber use \cr \code{c(depth_1, depth_2, ...,
+depth_n)}}
+
+\item{density}{\code{\link{numeric}} (\bold{required}): average overburden
+density (g/cm^3). For more than one absorber use \cr \code{c(density_1,
+density_2, ..., density_n)}}
+
+\item{latitude}{\code{\link{numeric}} (\bold{required}): latitude (decimal
+degree), N positive}
+
+\item{longitude}{\code{\link{numeric}} (\bold{required}): longitude (decimal
+degree), E positive}
+
+\item{altitude}{\code{\link{numeric}} (\bold{required}): altitude (m above
+sea-level)}
+
+\item{corr.fieldChanges}{\code{\link{logical}} (with default): correct for
+geomagnetic field changes after Prescott & Hutton (1994). Apply only when
+justified by the data.}
+
+\item{est.age}{\code{\link{numeric}} (with default): estimated age range
+(ka) for geomagnetic field change correction (0-80 ka allowed)}
+
+\item{half.depth}{\code{\link{logical}} (with default): How to overcome with
+varying overburden thickness. If \code{TRUE} only half the depth is used for
+calculation. Apply only when justified, i.e. when a constant sedimentation
+rate can safely be assumed.}
+
+\item{error}{\code{\link{numeric}} (with default): general error
+(percentage) to be implemented on corrected cosmic dose rate estimate}
+}
+\value{
+Returns a terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following element:
+
+\item{summary}{\link{data.frame} summary of all relevant calculation
+results.} \item{args}{\link{list} used arguments} \item{call}{\link{call}
+the function call}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+This function calculates the cosmic dose rate taking into account the soft-
+and hard-component of the cosmic ray flux and allows corrections for
+geomagnetic latitude, altitude above sea-level and geomagnetic field
+changes.
+}
+\details{
+This function calculates the total cosmic dose rate considering both the
+soft- and hard-component of the cosmic ray flux.\cr
+
+\bold{Internal calculation steps}
+
+(1) Calculate total depth of all absorber in hg/cm^2 (1 hg/cm^2 = 100
+g/cm^2)
+
+\deqn{absorber = depth_1*density_1 + depth_2*density_2 + ... + depth_n*
+density_n}
+
+(2) If \code{half.depth = TRUE}
+
+\deqn{absorber = absorber/2}
+
+(3) Calculate cosmic dose rate at sea-level and 55 deg. latitude
+
+a) If absorber is > 167 g/cm^2 (only hard-component; Allkofer et al.  1975):
+apply equation given by Prescott & Hutton (1994) (c.f. Barbouti & Rastin
+1983)
+
+\deqn{D0 = C/(((absorber+d)^\alpha+a)*(absober+H))*exp(-B*absorber)}
+
+b) If absorber is < 167 g/cm^2 (soft- and hard-component): derive D0 from
+Fig. 1 in Prescott & Hutton (1988).
+
+(4) Calculate geomagnetic latitude (Prescott & Stephan 1982, Prescott &
+Hutton 1994)
+
+\deqn{\lambda = arcsin(0.203*cos(latitude)*cos(longitude-291)+0.979*
+sin(latitude))}
+
+(5) Apply correction for geomagnetic latitude and altitude above sea-level.
+Values for F, J and H were read from Fig. 3 shown in Prescott & Stephan
+(1982) and fitted with 3-degree polynomials for lambda < 35 degree and a
+linear fit for lambda > 35 degree.
+
+\deqn{Dc = D0*(F+J*exp((altitude/1000)/H))}
+
+(6) Optional: Apply correction for geomagnetic field changes in the last
+0-80 ka (Prescott & Hutton 1994). Correction and altitude factors are given
+in Table 1 and Fig. 1 in Prescott & Hutton (1994). Values for altitude
+factor were fitted with a 2-degree polynomial. The altitude factor is
+operated on the decimal part of the correction factor.
+
+\deqn{Dc' = Dc*correctionFactor}
+
+\bold{Usage of \code{depth} and \code{density}}
+
+(1) If only one value for depth and density is provided, the cosmic dose
+rate is calculated for exactly one sample and one absorber as overburden
+(i.e. \code{depth*density}).
+
+(2) In some cases it might be useful to calculate the cosmic dose rate for a
+sample that is overlain by more than one absorber, e.g. in a profile with
+soil layers of different thickness and a distinct difference in density.
+This can be calculated by providing a matching number of values for
+\code{depth} and \code{density} (e.g. \code{depth = c(1, 2), density =
+c(1.7, 2.4)})
+
+(3) Another possibility is to calculate the cosmic dose rate for more than
+one sample of the same profile. This is done by providing more than one
+values for \code{depth} and only one for \code{density}. For example,
+\code{depth = c(1, 2, 3), density = 1.7} will calculate the cosmic dose rate
+for three samples in 1, 2 and 3 m depth in a sediment of density 1.7 g/cm^3.
+}
+\note{
+Despite its universal use the equation to calculate the cosmic dose
+rate provided by Prescott & Hutton (1994) is falsely stated to be valid from
+the surface to 10^4 hg/cm^2 of standard rock. The original expression by
+Barbouti & Rastin (1983) only considers the muon flux (i.e. hard-component)
+and is by their own definition only valid for depths between 10-10^4
+hg/cm^2.
+
+Thus, for near-surface samples (i.e. for depths < 167 g/cm^2) the equation
+of Prescott & Hutton (1994) underestimates the total cosmic dose rate, as it
+neglects the influence of the soft-component of the cosmic ray flux. For
+samples at zero depth and at sea-level the underestimation can be as large
+as ~0.1 Gy/ka. In a previous article, Prescott & Hutton (1988) give another
+approximation of Barbouti & Rastins equation in the form of
+
+\deqn{D = 0.21*exp(-0.070*absorber+0.0005*absorber^2)}
+
+which is valid for depths between 150-5000 g/cm^2. For shallower depths (<
+150 g/cm^2) they provided a graph (Fig. 1) from which the dose rate can be
+read.
+
+As a result, this function employs the equation of Prescott & Hutton (1994)
+only for depths > 167 g/cm^2, i.e. only for the hard-component of the cosmic
+ray flux. Cosmic dose rate values for depths < 167 g/cm^2 were obtained from
+the "AGE" programm (Gruen 2009) and fitted with a 6-degree polynomial curve
+(and hence reproduces the graph shown in Prescott & Hutton 1988). However,
+these values assume an average overburden density of 2 g/cm^3.
+
+It is currently not possible to obtain more precise cosmic dose rate values
+for near-surface samples as there is no equation known to the author of this
+function at the time of writing.
+}
+\section{Function version}{
+ 0.5.2 (2015-11-29 17:27:48)
+}
+\examples{
+
+##(1) calculate cosmic dose rate (one absorber)
+calc_CosmicDoseRate(depth = 2.78, density = 1.7,
+                    latitude = 38.06451, longitude = 1.49646,
+                    altitude = 364, error = 10)
+
+##(2a) calculate cosmic dose rate (two absorber)
+calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7),
+                    latitude = 38.06451, longitude = 1.49646,
+                    altitude = 364, error = 10)
+
+##(2b) calculate cosmic dose rate (two absorber) and
+##correct for geomagnetic field changes
+calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7),
+                    latitude = 12.04332, longitude = 4.43243,
+                    altitude = 364, corr.fieldChanges = TRUE,
+                    est.age = 67, error = 15)
+
+
+##(3) calculate cosmic dose rate and export results to .csv file
+#calculate cosmic dose rate and save to variable
+results<- calc_CosmicDoseRate(depth = 2.78, density = 1.7,
+                              latitude = 38.06451, longitude = 1.49646,
+                              altitude = 364, error = 10)
+
+# the results can be accessed by
+get_RLum(results, "summary")
+
+#export results to .csv file - uncomment for usage
+#write.csv(results, file = "c:/users/public/results.csv")
+
+##(4) calculate cosmic dose rate for 6 samples from the same profile
+##    and save to .csv file
+#calculate cosmic dose rate and save to variable
+results<- calc_CosmicDoseRate(depth = c(0.1, 0.5 , 2.1, 2.7, 4.2, 6.3),
+                              density = 1.7, latitude = 38.06451,
+                              longitude = 1.49646, altitude = 364,
+                              error = 10)
+
+#export results to .csv file - uncomment for usage
+#write.csv(results, file = "c:/users/public/results_profile.csv")
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+\references{
+Allkofer, O.C., Carstensen, K., Dau, W.D., Jokisch, H., 1975.
+Letter to the editor. The absolute cosmic ray flux at sea level. Journal of
+Physics G: Nuclear and Particle Physics 1, L51-L52. \cr\cr Barbouti, A.I.,
+Rastin, B.C., 1983. A study of the absolute intensity of muons at sea level
+and under various thicknesses of absorber. Journal of Physics G: Nuclear and
+Particle Physics 9, 1577-1595. \cr\cr Crookes, J.N., Rastin, B.C., 1972. An
+investigation of the absolute intensity of muons at sea-level. Nuclear
+Physics B 39, 493-508.  \cr\cr Gruen, R., 2009. The "AGE" program for the
+calculation of luminescence age estimates. Ancient TL 27, 45-46. \cr\cr
+Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for
+TL and ESR. Nuclear Tracks and Radiation Measurements 14, \cr\cr 223-227.
+Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates
+for luminescence and ESR dating: large depths and long-term time variations.
+Radiation Measurements 23, 497-500. \cr\cr Prescott, J.R., Stephan, L.G.,
+1982. The contribution of cosmic radiation to the environmental dose for
+thermoluminescence dating. Latitude, altitude and depth dependences. PACT 6,
+17-25.
+}
+\seealso{
+\code{\link{BaseDataSet.CosmicDoseRate}}
+}
+
diff --git a/man/calc_FadingCorr.Rd b/man/calc_FadingCorr.Rd
new file mode 100644
index 0000000..0b188c5
--- /dev/null
+++ b/man/calc_FadingCorr.Rd
@@ -0,0 +1,172 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_FadingCorr.R
+\name{calc_FadingCorr}
+\alias{calc_FadingCorr}
+\title{Apply a fading correction according to Huntley & Lamothe (2001) for a given
+g-value and a given tc}
+\usage{
+calc_FadingCorr(age.faded, g_value, tc = NULL, tc.g_value = tc,
+  n.MC = 10000, seed = NULL, txtProgressBar = TRUE, verbose = TRUE)
+}
+\arguments{
+\item{age.faded}{\code{\link{numeric}} \code{\link{vector}} (\bold{required}): uncorrected
+age with error in ka (see example)}
+
+\item{g_value}{\code{\link{vector}} (\bold{required}): g-value and error obtained
+from separate fading measurements (see example). Alternatively an \code{\linkS4class{RLum.Results}} object
+can be provided produced by the function \code{analyse_FadingMeasurement}, in this case tc is set
+automatically}
+
+\item{tc}{\code{\link{numeric}} (\bold{required}): time in seconds between
+irradiation and the prompt measurement (cf. Huntley & Lamothe 2001). Argument will be ignored
+if \code{g_value} was an \code{RLum.Results} object}
+
+\item{tc.g_value}{\code{\link{numeric}} (with default): the time in seconds between irradiation
+and the prompt measurement used for estimating the g-value. If the g-value was normalised
+to, e.g., 2 days, this time in seconds (i.e., 172800) should be given here. If nothing is provided
+the time is set to tc, which is usual case for g-values obtained using the SAR method and g-values
+that had been not normalised to 2 days.}
+
+\item{n.MC}{\code{\link{integer}} (with default): number of Monte Carlo
+simulation runs for error estimation. If \code{n.MC = 'auto'} is used the function
+tries to find a 'stable' error for the age. Note: This may take a while!}
+
+\item{seed}{\code{\link{integer}} (optional): sets the seed for the random number generator
+in R using \code{\link{set.seed}}}
+
+\item{txtProgressBar}{\link{logical} (with default): enables or disables
+\code{\link{txtProgressBar}}}
+
+\item{verbose}{\code{\link{logical}} (with default): enables or disables terminal output}
+}
+\value{
+Returns an S4 object of type \code{\linkS4class{RLum.Results}}.\cr
+
+Slot: \bold{@data}\cr
+\tabular{lll}{
+\bold{Object} \tab \bold{Type} \tab \bold{Comment}\cr
+ \code{age.corr} \tab \code{data.frame} \tab Corrected age \cr
+ \code{age.corr.MC} \tab \code{numeric} \tab MC simulation results with all possible ages from
+ that simulation\cr
+}
+
+Slot: \bold{@info}\cr
+
+\tabular{lll}{
+\bold{Object} \tab \bold{Type} \tab \bold{Comment}\cr
+ \code{info} \tab \code{character} \tab the original function call
+
+}
+}
+\description{
+This function solves the equation used for correcting the fading affected age
+including the error for a given g-value according to Huntley & Lamothe (2001).
+}
+\details{
+As the g-value sligthly depends on the time between irradiation and the prompt measurement,
+this is tc, always a tc value needs to be provided. If the g-value was normalised to a distinct
+time or evaluated with a different tc value (e.g., external irradiation), also the tc value
+for the g-value needs to be provided (argument \code{tc.g_value} and then the g-value is recalcualted
+to tc of the measurement used for estimating the age applying the following equation:
+
+\deqn{\kappa_{tc} = \kappa_{tc.g} / (1 - \kappa_{tc.g} * log(tc/tc.g))}
+
+where
+
+\deqn{\kappa_{tc.g} = g / 100 / log(10)}
+
+with \eqn{log} the natural logarithm.
+
+The error of the fading-corrected age is determined using a Monte Carlo
+simulation approach. Solving of the equation is realised using
+\code{\link{uniroot}}. Large values for \code{n.MC} will significantly
+increase the computation time.\cr
+
+\bold{\code{n.MC = 'auto'}}
+
+The error estimation based on a stochastic process, i.e. for a small number of MC runs the calculated
+error varies considerably every time the function is called, even with the same input values.
+The argument option \code{n.MC = 'auto'} tries to find a stable value for the standard error, i.e.
+the standard deviation of values calculated during the MC runs (\code{age.corr.MC}),
+within a given precision (2 digits) by increasing the number of MC runs stepwise and
+calculating the corresponding error.
+
+If the determined error does not differ from the 9 values calculated previously
+within a precision of (here) 3 digits the calculation is stopped as it is assumed that the error
+is stable. Please note that (a) the duration depends on the input values as well as on
+the provided computation ressources and it may take a while, (b) the length (size) of the output
+vector \code{age.corr.MC}, where all the single values produced during the MC runs are stored,
+equals the number of MC runs (here termed observations).
+
+To avoid an endless loop the calculation is stopped if the number of observations exceeds 10^7.
+This limitation can be overwritten by setting the number of MC runs manually,
+e.g. \code{n.MC = 10000001}. Note: For this case the function is not checking whether the calculated
+error is stable.\cr
+
+\bold{\code{seed}}
+
+This option allows to recreate previously calculated results by setting the seed
+for the R random number generator (see \code{\link{set.seed}} for details). This option
+should not be mixed up with the option \bold{\code{n.MC = 'auto'}}. The results may
+appear similar, but they are not comparable!\cr
+
+\bold{FAQ}\cr
+Q: Which tc value is expected?\cr
+A: tc is the time in seconds between irradiation and the prompt measurement applied during your
+De measurement. However, this tc might differ from the tc used for estimating the g-value. In the
+case of an SAR measurement tc should be similar, however, if it differs, you have to provide this
+tc value (the one used for estimating the g-value) using the argument \code{tc.g_value}.\cr
+}
+\note{
+The upper age limit is set to 500 ka! \cr
+Special thanks to Sebastien Huot for his support and clarification via e-mail.
+}
+\section{Function version}{
+ 0.4.1 (2016-07-21 10:36:31)
+}
+\examples{
+
+##run the examples given in the appendix of Huntley and Lamothe, 2001
+
+##(1) faded age: 100 a
+results <- calc_FadingCorr(
+   age.faded = c(0.1,0),
+   g_value = c(5.0, 1.0),
+   tc = 2592000,
+   tc.g_value = 172800,
+   n.MC = 100)
+
+##(2) faded age: 1 ka
+results <- calc_FadingCorr(
+   age.faded = c(1,0),
+   g_value = c(5.0, 1.0),
+   tc = 2592000,
+   tc.g_value = 172800,
+   n.MC = 100)
+
+##(3) faded age: 10.0 ka
+results <- calc_FadingCorr(
+   age.faded = c(10,0),
+   g_value = c(5.0, 1.0),
+   tc = 2592000,
+   tc.g_value = 172800,
+   n.MC = 100)
+
+##access the last output
+get_RLum(results)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading
+in K-feldspars and the measurement and correction for it in optical dating.
+Canadian Journal of Earth Sciences, 38, 1093-1106.
+}
+\seealso{
+\code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}},
+\code{\link{uniroot}}
+}
+\keyword{datagen}
+
diff --git a/man/calc_FastRatio.Rd b/man/calc_FastRatio.Rd
new file mode 100644
index 0000000..ce34947
--- /dev/null
+++ b/man/calc_FastRatio.Rd
@@ -0,0 +1,107 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_FastRatio.R
+\name{calc_FastRatio}
+\alias{calc_FastRatio}
+\title{Calculate the Fast Ratio for CW-OSL curves}
+\usage{
+calc_FastRatio(object, stimulation.power = 30.6, wavelength = 470,
+  sigmaF = 2.6e-17, sigmaM = 4.28e-18, Ch_L1 = 1, x = 1, x2 = 0.1,
+  dead.channels = c(0, 0), fitCW.sigma = FALSE, fitCW.curve = FALSE,
+  plot = TRUE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}}, 
+\code{\linkS4class{RLum.Data.Curve}} or \code{\link{data.frame}} 
+(\bold{required}): x, y data of measured values (time and counts).}
+
+\item{stimulation.power}{\code{\link{numeric}} (with default): Stimulation power in mW/cm^2}
+
+\item{wavelength}{\code{\link{numeric}} (with default): Stimulation wavelength in nm}
+
+\item{sigmaF}{\code{\link{numeric}} (with default): Photoionisation cross-section (cm^2) of the
+fast component. Default value after Durcan & Duller (2011).}
+
+\item{sigmaM}{\code{\link{numeric}} (with default): Photoionisation cross-section (cm^2) of the
+medium component. Default value after Durcan & Duller (2011).}
+
+\item{Ch_L1}{\code{\link{numeric}} (with default): An integer specifying the channel for L1.}
+
+\item{x}{\code{\link{numeric}} (with default): \% of signal remaining from the fast component.
+Used to define the location of L2 and L3 (start).}
+
+\item{x2}{\code{\link{numeric}} (with default): \% of signal remaining from the medium component.
+Used to define the location of L3 (end).}
+
+\item{dead.channels}{\code{\link{numeric}} (with default): Vector of length 2 in the form of
+\code{c(x, y)}. Channels that do not contain OSL data, i.e. at the start or end of
+measurement.}
+
+\item{fitCW.sigma}{\code{\link{logical}} (optional): fit CW-OSL curve using \code{\link{fit_CWCurve}}
+to calculate \code{sigmaF} and \code{sigmaM} (experimental).}
+
+\item{fitCW.curve}{\code{\link{logical}} (optional): fit CW-OSL curve using \code{\link{fit_CWCurve}}
+and derive the counts of L2 and L3 from the fitted OSL curve (experimental).}
+
+\item{plot}{\code{\link{logical}} (with default): plot output (\code{TRUE}/\code{FALSE})}
+
+\item{...}{available options: \code{verbose} (\code{\link{logical}}). Further
+arguments passed to \code{\link{fit_CWCurve}}.}
+}
+\value{
+Returns a plot (optional) and an S4 object of type \code{\linkS4class{RLum.Results}}. 
+The slot \code{data} contains a \code{\link{list}} with the following elements:\cr
+
+\item{summary}{\code{\link{data.frame}} summary of all relevant results}
+\item{data}{the original input data}
+\item{fit}{\code{\linkS4class{RLum.Results}} object if either \code{fitCW.sigma} or \code{fitCW.curve} is \code{TRUE}}
+\item{args}{\code{\link{list}} of used arguments}
+\item{call}{\code{\link{call}} the function call}
+}
+\description{
+Function to calculate the fast ratio of quartz CW-OSL single grain or single 
+aliquot curves after Durcan & Duller (2011).
+}
+\details{
+This function follows the equations of Durcan & Duller (2011). The energy
+required to reduce the fast and medium quartz OSL components to \code{x} and
+\code{x2} \% respectively using eq. 3 to determine channels L2 and L3 (start 
+and end). The fast ratio is then calculated from: \eqn{(L1-L3)/(L2-L3)}.
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:36:06)
+}
+\examples{
+# load example CW-OSL curve
+data("ExampleData.CW_OSL_Curve")
+
+# calculate the fast ratio w/o further adjustments
+res <- calc_FastRatio(ExampleData.CW_OSL_Curve)
+
+# show the summary table
+get_RLum(res)
+
+}
+\author{
+Georgina King, University of Cologne (Germany) \cr
+Julie A. Durcan, University of Oxford (United Kingdom) \cr
+Christoph Burow, University of Cologne (Germany) \cr
+\cr R Luminescence Package Team}
+\references{
+Durcan, J.A. & Duller, G.A.T., 2011. The fast ratio: A rapid measure for testing
+the dominance of the fast component in the initial OSL signal from quartz.
+Radiation Measurements 46, 1065-1072. \cr\cr
+
+Madsen, A.T., Duller, G.A.T., Donnelly, J.P., Roberts, H.M. & Wintle, A.G., 2009.
+A chronology of hurricane landfalls at Little Sippewissett Marsh, Massachusetts, USA,
+using optical dating. Geomorphology 109, 36-45. \cr\cr
+
+\bold{Further reading} \cr\cr
+
+Steffen, D., Preusser, F. & Schlunegger, 2009. OSL quartz age underestimation 
+due to unstable signal components. Quaternary Geochronology 4, 353-362.
+}
+\seealso{
+\code{\link{fit_CWCurve}}, \code{\link{get_RLum}}, \code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}, \code{\linkS4class{RLum.Data.Curve}}
+}
+
diff --git a/man/calc_FiniteMixture.Rd b/man/calc_FiniteMixture.Rd
new file mode 100644
index 0000000..2f5bb59
--- /dev/null
+++ b/man/calc_FiniteMixture.Rd
@@ -0,0 +1,170 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_FiniteMixture.R
+\name{calc_FiniteMixture}
+\alias{calc_FiniteMixture}
+\title{Apply the finite mixture model (FMM) after Galbraith (2005) to a given De
+distribution}
+\usage{
+calc_FiniteMixture(data, sigmab, n.components, grain.probability = FALSE,
+  dose.scale, pdf.weight = TRUE, pdf.sigma = "sigmab",
+  pdf.colors = "gray", pdf.scale, plot.proportions = TRUE, plot = TRUE,
+  ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{sigmab}{\code{\link{numeric}} (\bold{required}): spread in De values
+given as a fraction (e.g. 0.2). This value represents the expected
+overdispersion in the data should the sample be well-bleached (Cunningham &
+Wallinga 2012, p. 100).}
+
+\item{n.components}{\code{\link{numeric}} (\bold{required}): number of
+components to be fitted. If a vector is provided (e.g. \code{c(2:8)}) the
+finite mixtures for 2, 3 ... 8 components are calculated and a plot and a
+statistical evaluation of the model performance (BIC score and maximum
+log-likelihood) is provided.}
+
+\item{grain.probability}{\code{\link{logical}} (with default): prints the
+estimated probabilities of which component each grain is in}
+
+\item{dose.scale}{\code{\link{numeric}}: manually set the scaling of the
+y-axis of the first plot with a vector in the form of \code{c(min,max)}}
+
+\item{pdf.weight}{\code{\link{logical}} (with default): weight the
+probability density functions by the components proportion (applies only
+when a vector is provided for \code{n.components})}
+
+\item{pdf.sigma}{\code{\link{character}} (with default): if \code{"sigmab"}
+the components normal distributions are plotted with a common standard
+deviation (i.e. \code{sigmab}) as assumed by the FFM. Alternatively,
+\code{"se"} takes the standard error of each component for the sigma
+parameter of the normal distribution}
+
+\item{pdf.colors}{\code{\link{character}} (with default): color coding of
+the components in the the plot. Possible options are "gray", "colors" and
+"none"}
+
+\item{pdf.scale}{\code{\link{numeric}}: manually set the max density value
+for proper scaling of the x-axis of the first plot}
+
+\item{plot.proportions}{\code{\link{logical}} (with default): plot barplot
+showing the proportions of components}
+
+\item{plot}{\code{\link{logical}} (with default): plot output}
+
+\item{\dots}{further arguments to pass.  See details for their usage.}
+}
+\value{
+Returns a plot (optional) and terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following elements:
+
+\item{summary}{\link{data.frame} summary of all relevant model results.}
+\item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+used arguments} \item{call}{\link{call} the function call} \item{mle}{
+covariance matrices of the log likelhoods} \item{BIC}{ BIC score}
+\item{llik}{ maximum log likelihood} \item{grain.probability}{ probabilities
+of a grain belonging to a component} \item{components}{\link{matrix}
+estimates of the de, de error and proportion for each component}
+\item{single.comp}{\link{data.frame} single componente FFM estimate}
+
+If a vector for \code{n.components} is provided (e.g.  \code{c(2:8)}),
+\code{mle} and \code{grain.probability} are lists containing matrices of the
+results for each iteration of the model.
+
+The output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+This function fits a k-component mixture to a De distribution with differing
+known standard errors. Parameters (doses and mixing proportions) are
+estimated by maximum likelihood assuming that the log dose estimates are
+from a mixture of normal distributions.
+}
+\details{
+This model uses the maximum likelihood and Bayesian Information Criterion
+(BIC) approaches. \cr\cr Indications of overfitting are: \cr\cr - increasing
+BIC \cr - repeated dose estimates \cr - covariance matrix not positive
+definite \cr - covariance matrix produces NaNs\cr - convergence problems
+\cr\cr \bold{Plot} \cr\cr If a vector (\code{c(k.min:k.max)}) is provided
+for \code{n.components} a plot is generated showing the the k components
+equivalent doses as normal distributions. By default \code{pdf.weight} is
+set to \code{FALSE}, so that the area under each normal distribution is
+always 1. If \code{TRUE}, the probability density functions are weighted by
+the components proportion for each iteration of k components, so the sum of
+areas of each component equals 1. While the density values are on the same
+scale when no weights are used, the y-axis are individually scaled if the
+probability density are weighted by the components proportion. \cr The
+standard deviation (sigma) of the normal distributions is by default
+determined by a common \code{sigmab} (see \code{pdf.sigma}). For
+\code{pdf.sigma = "se"} the standard error of each component is taken
+instead.\cr The stacked barplot shows the proportion of each component (in
+per cent) calculated by the FFM. The last plot shows the achieved BIC scores
+and maximum log-likelihood estimates for each iteration of k.
+}
+\section{Function version}{
+ 0.4 (2016-05-02 09:36:06)
+}
+\examples{
+
+## load example data
+data(ExampleData.DeValues, envir = environment())
+
+## (1) apply the finite mixture model
+## NOTE: the data set is not suitable for the finite mixture model,
+## which is why a very small sigmab is necessary
+calc_FiniteMixture(ExampleData.DeValues$CA1,
+                   sigmab = 0.2, n.components = 2,
+                   grain.probability = TRUE)
+
+## (2) repeat the finite mixture model for 2, 3 and 4 maximum number of fitted
+## components and save results
+## NOTE: The following example is computationally intensive. Please un-comment
+## the following lines to make the example work.
+FMM<- calc_FiniteMixture(ExampleData.DeValues$CA1,
+                         sigmab = 0.2, n.components = c(2:4),
+                         pdf.weight = TRUE, dose.scale = c(0, 100))
+
+## show structure of the results
+FMM
+
+## show the results on equivalent dose, standard error and proportion of
+## fitted components
+get_RLum(object = FMM, data.object = "components")
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2006. \cr
+\cr R Luminescence Package Team}
+\references{
+Galbraith, R.F. & Green, P.F., 1990. Estimating the component
+ages in a finite mixture. Nuclear Tracks and Radiation Measurements 17,
+197-206. \cr\cr Galbraith, R.F. & Laslett, G.M., 1993. Statistical models
+for mixed fission track ages. Nuclear Tracks Radiation Measurements 4,
+459-470.\cr\cr Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of
+equivalent dose and error calculation and display in OSL dating: An overview
+and some recommendations. Quaternary Geochronology 11, 1-27.\cr\cr Roberts,
+R.G., Galbraith, R.F., Yoshida, H., Laslett, G.M. & Olley, J.M., 2000.
+Distinguishing dose populations in sediment mixtures: a test of single-grain
+optical dating procedures using mixtures of laboratory-dosed quartz.
+Radiation Measurements 32, 459-465.\cr\cr Galbraith, R.F., 2005. Statistics
+for Fission Track Analysis, Chapman & Hall/CRC, Boca Raton.\cr\cr
+\bold{Further reading}\cr\cr Arnold, L.J. & Roberts, R.G., 2009. Stochastic
+modelling of multi-grain equivalent dose (De) distributions: Implications
+for OSL dating of sediment mixtures. Quaternary Geochronology 4,
+204-230.\cr\cr Cunningham, A.C. & Wallinga, J., 2012. Realizing the
+potential of fluvial archives using robust OSL chronologies. Quaternary
+Geochronology 12, 98-106.\cr\cr Rodnight, H., Duller, G.A.T., Wintle, A.G. &
+Tooth, S., 2006. Assessing the reproducibility and accuracy of optical
+dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.\cr\cr
+Rodnight, H. 2008. How many equivalent dose values are needed to obtain a
+reproducible distribution?. Ancient TL 26, 3-10.
+}
+\seealso{
+\code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
+\code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
+}
+
diff --git a/man/calc_FuchsLang2001.Rd b/man/calc_FuchsLang2001.Rd
new file mode 100644
index 0000000..bafbd28
--- /dev/null
+++ b/man/calc_FuchsLang2001.Rd
@@ -0,0 +1,93 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_FuchsLang2001.R
+\name{calc_FuchsLang2001}
+\alias{calc_FuchsLang2001}
+\title{Apply the model after Fuchs & Lang (2001) to a given De distribution.}
+\usage{
+calc_FuchsLang2001(data, cvThreshold = 5, startDeValue = 1, plot = TRUE,
+  ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{cvThreshold}{\link{numeric} (with default): coefficient of variation
+in percent, as threshold for the method, e.g. \code{cvThreshold = 3}. See
+details.}
+
+\item{startDeValue}{\link{numeric} (with default): number of the first
+aliquot that is used for the calculations}
+
+\item{plot}{\link{logical} (with default): plot output
+\code{TRUE}/\code{FALSE}}
+
+\item{\dots}{further arguments and graphical parameters passed to
+\code{\link{plot}}}
+}
+\value{
+Returns a plot (optional) and terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following elements:
+
+\item{summary}{\link{data.frame} summary of all relevant model results.}
+\item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+used arguments} \item{call}{\link{call} the function call}
+\item{usedDeValues}{\link{data.frame} containing the used values for the
+calculation}
+}
+\description{
+This function applies the method according to Fuchs & Lang (2001) for
+heterogeneously bleached samples with a given coefficient of variation
+threshold.
+}
+\details{
+\bold{Used values} \cr If the coefficient of variation (c[v]) of the first
+two values is larger than the threshold c[v_threshold], the first value is
+skipped.  Use the \code{startDeValue} argument to define a start value for
+calculation (e.g. 2nd or 3rd value).\cr
+
+\bold{Basic steps of the approach} \cr
+
+(1) Estimate natural relative variation of the sample using a dose recovery
+test\cr (2) Sort the input values ascendingly\cr (3) Calculate a running
+mean, starting with the lowermost two values and add values iteratively.\cr
+(4) Stop if the calculated c[v] exceeds the specified \code{cvThreshold}\cr
+}
+\note{
+Please consider the requirements and the constraints of this method
+(see Fuchs & Lang, 2001)
+}
+\section{Function version}{
+ 0.4.1 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##load example data
+data(ExampleData.DeValues, envir = environment())
+
+##calculate De according to Fuchs & Lang (2001)
+temp<- calc_FuchsLang2001(ExampleData.DeValues$BT998, cvThreshold = 5)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France) Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+\references{
+Fuchs, M. & Lang, A., 2001. OSL dating of coarse-grain fluvial
+quartz using single-aliqout protocols on sediments from NE Peloponnese,
+Greece. In: Quaternary Science Reviews 20, 783-787.
+
+Fuchs, M. & Wagner, G.A., 2003. Recognition of insufficient bleaching by
+small aliquots of quartz for reconstructing soil erosion in Greece.
+Quaternary Science Reviews 22, 1161-1167.
+}
+\seealso{
+\code{\link{plot}}, \code{\link{calc_MinDose}},
+\code{\link{calc_FiniteMixture}}, \code{\link{calc_CentralDose}},
+\code{\link{calc_CommonDose}}, \code{\linkS4class{RLum.Results}}
+}
+\keyword{dplot}
+
diff --git a/man/calc_HomogeneityTest.Rd b/man/calc_HomogeneityTest.Rd
new file mode 100644
index 0000000..99f1690
--- /dev/null
+++ b/man/calc_HomogeneityTest.Rd
@@ -0,0 +1,59 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_HomogeneityTest.R
+\name{calc_HomogeneityTest}
+\alias{calc_HomogeneityTest}
+\title{Apply a simple homogeneity test after Galbraith (2003)}
+\usage{
+calc_HomogeneityTest(data, log = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{log}{\code{\link{logical}} (with default): peform the homogeniety test
+with (un-)logged data}
+
+\item{\dots}{further arguments (for internal compatibility only).}
+}
+\value{
+Returns a terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following element:
+
+\item{summary}{\link{data.frame} summary of all relevant model results.}
+\item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+used arguments} \item{call}{\link{call} the function call}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+A simple homogeneity test for De estimates
+}
+\details{
+For details see Galbraith (2003).
+}
+\section{Function version}{
+ 0.2 (2016-05-02 09:36:06)
+}
+\examples{
+
+## load example data
+data(ExampleData.DeValues, envir = environment())
+
+## apply the homogeneity test
+calc_HomogeneityTest(ExampleData.DeValues$BT998)
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+\references{
+Galbraith, R.F., 2003. A simple homogeneity test for estimates
+of dose obtained using OSL. Ancient TL 21, 75-77.
+}
+\seealso{
+\code{\link{pchisq}}
+}
+
diff --git a/man/calc_IEU.Rd b/man/calc_IEU.Rd
new file mode 100644
index 0000000..9b4aeac
--- /dev/null
+++ b/man/calc_IEU.Rd
@@ -0,0 +1,81 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_IEU.R
+\name{calc_IEU}
+\alias{calc_IEU}
+\title{Apply the internal-external-uncertainty (IEU) model after Thomsen et al.
+(2007) to a given De distribution}
+\usage{
+calc_IEU(data, a, b, interval, decimal.point = 2, plot = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{a}{\code{\link{numeric}}: slope}
+
+\item{b}{\code{\link{numeric}}: intercept}
+
+\item{interval}{\code{\link{numeric}}: fixed interval (e.g. 5 Gy) used for
+iteration of Dbar, from the mean to Lowest.De used to create Graph.IEU
+[Dbar.Fixed vs Z]}
+
+\item{decimal.point}{\code{\link{numeric}} (with default): number of decimal
+points for rounding calculations (e.g. 2)}
+
+\item{plot}{\code{\link{logical}} (with default): plot output}
+
+\item{\dots}{further arguments (\code{trace, verbose}).}
+}
+\value{
+Returns a plot (optional) and terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following element:
+
+\item{summary}{\link{data.frame} summary of all relevant model results.}
+\item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+used arguments} \item{call}{\link{call} the function call}
+\item{tables}{\link{list} a list of data frames containing all calculation
+tables}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}.
+}
+\description{
+Function to calculate the IEU De for a De data set.
+}
+\details{
+This function uses the equations of Thomsen et al. (2007).  The parameters a
+and b are estimated from dose-recovery experiments.
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:36:06)
+}
+\examples{
+
+## load data
+data(ExampleData.DeValues, envir = environment())
+
+## apply the IEU model
+ieu <- calc_IEU(ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1)
+
+}
+\author{
+Rachel Smedley, Geography & Earth Sciences, Aberystwyth University
+(United Kingdom) \cr Based on an excel spreadsheet and accompanying macro
+written by Kristina Thomsen.
+\cr R Luminescence Package Team}
+\references{
+Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model.
+Ancient TL 33, 16-21.
+
+Thomsen, K.J., Murray, A.S., Boetter-Jensen, L. & Kinahan, J.,
+2007. Determination of burial dose in incompletely bleached fluvial samples
+using single grains of quartz. Radiation Measurements 42, 370-379.
+}
+\seealso{
+\code{\link{plot}}, \code{\link{calc_CommonDose}},
+\code{\link{calc_CentralDose}}, \code{\link{calc_FiniteMixture}},
+\code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
+}
+
diff --git a/man/calc_MaxDose.Rd b/man/calc_MaxDose.Rd
new file mode 100644
index 0000000..3cb7e89
--- /dev/null
+++ b/man/calc_MaxDose.Rd
@@ -0,0 +1,112 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_MaxDose.R
+\name{calc_MaxDose}
+\alias{calc_MaxDose}
+\title{Apply the maximum age model to a given De distribution}
+\usage{
+calc_MaxDose(data, sigmab, log = TRUE, par = 3, bootstrap = FALSE,
+  init.values, plot = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{sigmab}{\code{\link{numeric}} (\bold{required}): spread in De values
+given as a fraction (e.g. 0.2). This value represents the expected
+overdispersion in the data should the sample be well-bleached (Cunningham &
+Walling 2012, p. 100).}
+
+\item{log}{\code{\link{logical}} (with default): fit the (un-)logged three
+parameter minimum dose model to De data}
+
+\item{par}{\code{\link{numeric}} (with default): apply the 3- or
+4-parametric minimum age model (\code{par=3} or \code{par=4}).}
+
+\item{bootstrap}{\code{\link{logical}} (with default): apply the recycled
+bootstrap approach of Cunningham & Wallinga (2012).}
+
+\item{init.values}{\code{\link{numeric}} (with default): starting values for
+gamma, sigma, p0 and mu. Custom values need to be provided in a vector of
+length three in the form of \code{c(gamma, sigma, p0)}.}
+
+\item{plot}{\code{\link{logical}} (with default): plot output
+(\code{TRUE}/\code{FALSE})}
+
+\item{\dots}{further arguments for bootstrapping (\code{bs.M, bs.N, bs.h,
+sigmab.sd}).  See details for their usage.}
+}
+\value{
+Please see \code{\link{calc_MinDose}}.
+}
+\description{
+Function to fit the maximum age model to De data. This is a wrapper function
+that calls calc_MinDose() and applies a similiar approach as described in
+Olley et al. (2006).
+}
+\details{
+\bold{Data transformation} \cr\cr To estimate the maximum dose population
+and its standard error, the three parameter minimum age model of Galbraith
+et al. (1999) is adapted. The measured De values are transformed as follows:
+\cr\cr 1. convert De values to natural logs \cr 2. multiply the logged data
+to creat a mirror image of the De distribution\cr 3. shift De values along
+x-axis by the smallest x-value found to obtain only positive values \cr 4.
+combine in quadrature the measurement error associated with each De value
+with a relative error specified by sigmab \cr 5. apply the MAM to these data
+\cr\cr When all calculations are done the results are then converted as
+follows\cr\cr 1. subtract the x-offset \cr 2. multiply the natural logs by
+-1 \cr 3. take the exponent to obtain the maximum dose estimate in Gy \cr\cr
+\bold{Further documentation} \cr\cr Please see \code{\link{calc_MinDose}}.
+}
+\section{Function version}{
+ 0.3 (2015-11-29 17:27:48)
+}
+\examples{
+
+## load example data
+data(ExampleData.DeValues, envir = environment())
+
+# apply the maximum dose model
+calc_MaxDose(ExampleData.DeValues$CA1, sigmab = 0.2, par = 3)
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2010 \cr
+\cr R Luminescence Package Team}
+\references{
+Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
+2009. A revised burial dose estimation procedure for optical dating of young
+and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
+track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
+1999. Optical dating of single grains of quartz from Jinmium rock shelter,
+northern Australia. Part I: experimental design and statistical models.
+Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
+Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
+R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+calculation and display in OSL dating: An overview and some recommendations.
+Quaternary Geochronology 11, 1-27. \cr\cr Olley, J.M., Roberts, R.G.,
+Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
+associated with human burials at Lake Mungo, Australia. Quaternary Science
+Reviews 25, 2469-2474.\cr\cr \bold{Further reading} \cr\cr Arnold, L.J. &
+Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+(De) distributions: Implications for OSL dating of sediment mixtures.
+Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold, L.J.,
+2006. Statistical modelling of single grain quartz De distributions and an
+assessment of procedures for estimating burial dose. Quaternary Science
+Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
+Realizing the potential of fluvial archives using robust OSL chronologies.
+Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
+Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
+\cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
+obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
+}
+\seealso{
+\code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
+\code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
+\code{\link{calc_MinDose}}
+}
+
diff --git a/man/calc_MinDose.Rd b/man/calc_MinDose.Rd
new file mode 100644
index 0000000..c4efe80
--- /dev/null
+++ b/man/calc_MinDose.Rd
@@ -0,0 +1,274 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_MinDose.R
+\name{calc_MinDose}
+\alias{calc_MinDose}
+\title{Apply the (un-)logged minimum age model (MAM) after Galbraith et al. (1999)
+to a given De distribution}
+\usage{
+calc_MinDose(data, sigmab, log = TRUE, par = 3, bootstrap = FALSE,
+  init.values, level = 0.95, plot = TRUE, multicore = FALSE, ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De \code{(data[
+,1])} and De error \code{(values[ ,2])}}
+
+\item{sigmab}{\code{\link{numeric}} (\bold{required}): spread in De values
+given as a fraction (e.g. 0.2). This value represents the expected
+overdispersion in the data should the sample be well-bleached (Cunningham &
+Walling 2012, p. 100).}
+
+\item{log}{\code{\link{logical}} (with default): fit the (un-)logged minimum
+dose model to De data}
+
+\item{par}{\code{\link{numeric}} (with default): apply the 3- or
+4-parametric minimum age model (\code{par=3} or \code{par=4}). The MAM-3 is
+used by default.}
+
+\item{bootstrap}{\code{\link{logical}} (with default): apply the recycled
+bootstrap approach of Cunningham & Wallinga (2012).}
+
+\item{init.values}{\code{\link{numeric}} (optional): a named list with
+starting values for gamma, sigma, p0 and mu (e.g. \code{list(gamma=100
+sigma=1.5, p0=0.1, mu=100)}). If no values are provided reasonable values
+are tried to be estimated from the data.}
+
+\item{level}{\code{\link{logical}} (with default): the confidence level
+required (defaults to 0.95).}
+
+\item{plot}{\code{\link{logical}} (with default): plot output
+(\code{TRUE}/\code{FALSE})}
+
+\item{multicore}{\code{\link{logical}} (with default): enable parallel
+computation of the bootstrap by creating a multicore SNOW cluster. Depending
+on the number of available logical CPU cores this will drastically reduce
+the computation time. Note that this option is highly experimental and not
+work for all machines. (\code{TRUE}/\code{FALSE})}
+
+\item{\dots}{(optional) further arguments for bootstrapping (\code{bs.M,
+bs.N, bs.h, sigmab.sd}).  See details for their usage. Further arguments are
+\code{verbose} to de-/activate console output (logical), \code{debug} for
+extended console output (logical) and \code{cores} (integer) to manually
+specify the number of cores to be used when \code{multicore=TRUE}.}
+}
+\value{
+Returns a plot (optional) and terminal output. In addition an
+\code{\linkS4class{RLum.Results}} object is returned containing the
+following elements:
+
+\item{summary}{\link{data.frame} summary of all relevant model results.}
+\item{data}{\link{data.frame} original input data} \item{args}{\link{list}
+used arguments} \item{call}{\link{call} the function call}
+\item{mle}{\link{mle2} object containing the maximum log likelhood functions
+for all parameters} \item{BIC}{\link{numeric} BIC score}
+\item{confint}{\link{data.frame} confidence intervals for all parameters}
+\item{profile}{\link{profile.mle2} the log likelihood profiles}
+\item{bootstrap}{\link{list} bootstrap results}
+
+The output should be accessed using the function
+\code{\link{get_RLum}}
+}
+\description{
+Function to fit the (un-)logged three or four parameter minimum dose model
+(MAM-3/4) to De data.
+}
+\details{
+\bold{Parameters} \cr\cr This model has four parameters: \cr\cr
+\tabular{rl}{ \code{gamma}: \tab minimum dose on the log scale \cr
+\code{mu}: \tab mean of the non-truncated normal distribution \cr
+\code{sigma}: \tab spread in ages above the minimum \cr \code{p0}: \tab
+proportion of grains at gamma \cr } If \code{par=3} (default) the
+3-parametric minimum age model is applied, where \code{gamma=mu}. For
+\code{par=4} the 4-parametric model is applied instead.\cr\cr
+\bold{(Un-)logged model} \cr\cr In the original version of the
+three-parameter minimum dose model, the basic data are the natural
+logarithms of the De estimates and relative standard errors of the De
+estimates. This model will be applied if \code{log=TRUE}. \cr\cr If
+\code{log=FALSE}, the modified un-logged model will be applied instead. This
+has essentially the same form as the original version.  \code{gamma} and
+\code{sigma} are in Gy and \code{gamma} becomes the minimum true dose in the
+population. \cr\cr While the original (logged) version of the mimimum dose
+model may be appropriate for most samples (i.e. De distributions), the
+modified (un-logged) version is specially designed for modern-age and young
+samples containing negative, zero or near-zero De estimates (Arnold et al.
+2009, p. 323). \cr\cr \bold{Initial values & boundaries} \cr\cr The log
+likelihood calculations use the \link{nlminb} function for box-constrained
+optimisation using PORT routines.  Accordingly, initial values for the four
+parameters can be specified via \code{init.values}. If no values are
+provided for \code{init.values} reasonable starting values are estimated
+from the input data.  If the final estimates of \emph{gamma}, \emph{mu},
+\emph{sigma} and \emph{p0} are totally off target, consider providing custom
+starting values via \code{init.values}. \cr In contrast to previous versions
+of this function the boundaries for the individual model parameters are no
+longer required to be explicitly specified. If you want to override the default
+boundary values use the arguments \code{gamma.lower}, \code{gamma.upper},
+\code{sigma.lower}, \code{sigma.upper}, \code{p0.lower}, \code{p0.upper},
+\code{mu.lower} and \code{mu.upper}.  \cr\cr
+\bold{Bootstrap} \cr\cr When
+\code{bootstrap=TRUE} the function applies the bootstrapping method as
+described in Wallinga & Cunningham (2012). By default, the minimum age model
+produces 1000 first level and 3000 second level bootstrap replicates
+(actually, the number of second level bootstrap replicates is three times
+the number of first level replicates unless specified otherwise).  The
+uncertainty on sigmab is 0.04 by default. These values can be changed by
+using the arguments \code{bs.M} (first level replicates), \code{bs.N}
+(second level replicates) and \code{sigmab.sd} (error on sigmab). With
+\code{bs.h} the bandwidth of the kernel density estimate can be specified.
+By default, \code{h} is calculated as \cr \deqn{h =
+(2*\sigma_{DE})/\sqrt{n}} \cr \bold{Multicore support} \cr\cr This function
+supports parallel computing and can be activated by \code{multicore=TRUE}.
+By default, the number of available logical CPU cores is determined
+automatically, but can be changed with \code{cores}. The multicore support
+is only available when \code{bootstrap=TRUE} and spawns \code{n} R instances
+for each core to get MAM estimates for each of the N and M boostrap
+replicates. Note that this option is highly experimental and may or may not
+work for your machine. Also the performance gain increases for larger number
+of bootstrap replicates. Also note that with each additional core and hence
+R instance and depending on the number of bootstrap replicates the memory
+usage can significantly increase. Make sure that memory is always availabe,
+otherwise there will be a massive perfomance hit.
+}
+\note{
+The default starting values for \emph{gamma}, \emph{mu}, \emph{sigma}
+and \emph{p0} may only be appropriate for some De data sets and may need to
+be changed for other data. This is especially true when the un-logged
+version is applied. \cr Also note that all R warning messages are suppressed
+when running this function. If the results seem odd consider re-running the
+model with \code{debug=TRUE} which provides extended console output and
+forwards all internal warning messages.
+}
+\section{Function version}{
+ 0.4.3 (2016-09-09 10:32:17)
+}
+\examples{
+
+
+## Load example data
+data(ExampleData.DeValues, envir = environment())
+
+# (1) Apply the minimum age model with minimum required parameters.
+# By default, this will apply the un-logged 3-parametric MAM.
+calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1)
+
+# (2) Re-run the model, but save results to a variable and turn
+# plotting of the log-likelihood profiles off.
+mam <- calc_MinDose(data = ExampleData.DeValues$CA1,
+                    sigmab = 0.1,
+                    plot = FALSE)
+
+# Show structure of the RLum.Results object
+mam
+
+# Show summary table that contains the most relevant results
+res <- get_RLum(mam, "summary")
+res
+
+# Plot the log likelihood profiles retroactively, because before
+# we set plot = FALSE
+plot_RLum(mam)
+
+# Plot the dose distribution in an abanico plot and draw a line
+# at the minimum dose estimate
+plot_AbanicoPlot(data = ExampleData.DeValues$CA1,
+                 main = "3-parameter Minimum Age Model",
+                 line = mam,polygon.col = "none",
+                 hist = TRUE,
+                 rug = TRUE,
+                 summary = c("n", "mean", "mean.weighted", "median", "in.ci"),
+                 centrality = res$de,
+                 line.col = "red",
+                 grid.col = "none",
+                 line.label = paste0(round(res$de, 1), "\\U00B1",
+                                     round(res$de_err, 1), " Gy"),
+                 bw = 0.1,
+                 ylim = c(-25, 18),
+                 summary.pos = "topleft",
+                 mtext = bquote("Parameters: " ~
+                                  sigma[b] == .(get_RLum(mam, "args")$sigmab) ~ ", " ~
+                                  gamma == .(round(log(res$de), 1)) ~ ", " ~
+                                  sigma == .(round(res$sig, 1)) ~ ", " ~
+                                  rho == .(round(res$p0, 2))))
+
+
+\dontrun{
+# (3) Run the minimum age model with bootstrap
+# NOTE: Bootstrapping is computationally intensive
+# (3.1) run the minimum age model with default values for bootstrapping
+calc_MinDose(data = ExampleData.DeValues$CA1,
+             sigmab = 0.15,
+             bootstrap = TRUE)
+
+# (3.2) Bootstrap control parameters
+mam <- calc_MinDose(data = ExampleData.DeValues$CA1,
+                    sigmab = 0.15,
+                    bootstrap = TRUE,
+                    bs.M = 300,
+                    bs.N = 500,
+                    bs.h = 4,
+                    sigmab.sd = 0.06,
+                    plot = FALSE)
+
+# Plot the results
+plot_RLum(mam)
+
+# save bootstrap results in a separate variable
+bs <- get_RLum(mam, "bootstrap")
+
+# show structure of the bootstrap results
+str(bs, max.level = 2, give.attr = FALSE)
+
+# print summary of minimum dose and likelihood pairs
+summary(bs$pairs$gamma)
+
+# Show polynomial fits of the bootstrap pairs
+bs$poly.fits$poly.three
+
+# Plot various statistics of the fit using the generic plot() function
+par(mfcol=c(2,2))
+plot(bs$poly.fits$poly.three, ask = FALSE)
+
+# Show the fitted values of the polynomials
+summary(bs$poly.fits$poly.three$fitted.values)
+}
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2010 \cr The bootstrap approach is
+based on a rewritten MATLAB script of Alastair Cunningham. \cr Alastair
+Cunningham is thanked for his help in implementing and cross-checking the
+code.
+\cr R Luminescence Package Team}
+\references{
+Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
+2009. A revised burial dose estimation procedure for optical dating of young
+and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
+track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
+1999. Optical dating of single grains of quartz from Jinmium rock shelter,
+northern Australia. Part I: experimental design and statistical models.
+Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
+Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
+R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+calculation and display in OSL dating: An overview and some recommendations.
+Quaternary Geochronology 11, 1-27. \cr\cr \bold{Further reading} \cr\cr
+Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain
+equivalent dose (De) distributions: Implications for OSL dating of sediment
+mixtures. Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold,
+L.J., 2006. Statistical modelling of single grain quartz De distributions
+and an assessment of procedures for estimating burial dose. Quaternary
+Science Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
+Realizing the potential of fluvial archives using robust OSL chronologies.
+Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
+Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
+\cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
+obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
+}
+\seealso{
+\code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
+\code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
+\code{\link{calc_MaxDose}}
+}
+
diff --git a/man/calc_OSLLxTxRatio.Rd b/man/calc_OSLLxTxRatio.Rd
new file mode 100644
index 0000000..0865d0b
--- /dev/null
+++ b/man/calc_OSLLxTxRatio.Rd
@@ -0,0 +1,162 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_OSLLxTxRatio.R
+\name{calc_OSLLxTxRatio}
+\alias{calc_OSLLxTxRatio}
+\title{Calculate Lx/Tx ratio for CW-OSL curves}
+\usage{
+calc_OSLLxTxRatio(Lx.data, Tx.data, signal.integral,
+  signal.integral.Tx = NULL, background.integral,
+  background.integral.Tx = NULL,
+  background.count.distribution = "non-poisson", sigmab = NULL, sig0 = 0,
+  digits = NULL)
+}
+\arguments{
+\item{Lx.data}{\code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+(\bold{required}): requires a CW-OSL shine down curve (x = time, y = counts)}
+
+\item{Tx.data}{\code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+(optional): requires a CW-OSL shine down curve (x = time, y = counts). If no
+input is given the Tx.data will be treated as \code{NA} and no Lx/Tx ratio
+is calculated.}
+
+\item{signal.integral}{\code{\link{vector}} (\bold{required}): vector with the
+limits for the signal integral.}
+
+\item{signal.integral.Tx}{\code{\link{vector}} (optional): vector with the
+limits for the signal integral for the Tx curve. If nothing is provided the
+value from \code{signal.integral} is used.}
+
+\item{background.integral}{\code{\link{vector}} (\bold{required}): vector with the
+bounds for the background integral.}
+
+\item{background.integral.Tx}{\code{\link{vector}} (optional): vector with the
+limits for the background integral for the Tx curve. If nothing is provided the
+value from \code{background.integral} is used.}
+
+\item{background.count.distribution}{\code{\link{character}} (with default): sets
+the count distribution assumed for the error calculation. Possible arguments
+\code{poisson} or \code{non-poisson}. See details for further information}
+
+\item{sigmab}{\code{\link{numeric}} (optional): option to set a manual value for
+the overdispersion (for LnTx and TnTx), used for the Lx/Tx error
+calculation. The value should be provided as absolute squared count values,
+e.g. \code{sigmab = c(300,300)}. Note: If only one value is provided this
+value is taken for both (LnTx and TnTx) signals.}
+
+\item{sig0}{\code{\link{numeric}} (with default): allow adding an extra component of error
+to the final Lx/Tx error value (e.g., instrumental errror, see details).}
+
+\item{digits}{\code{\link{integer}} (with default): round numbers to the specified digits. If
+digits is set to \code{NULL} nothing is rounded.}
+}
+\value{
+Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+
+Slot \code{data} contains a \code{\link{list}} with the following structure:\cr
+$LxTx.table (data.frame) \cr
+.. $ LnLx \cr
+.. $ LnLx.BG \cr
+.. $ TnTx \cr
+.. $ TnTx.BG \cr
+.. $ Net_LnLx \cr
+.. $ Net_LnLx.Error\cr
+.. $ Net_TnTx.Error\cr
+.. $ LxTx\cr
+.. $ LxTx.Error \cr
+$ calc.parameters (list) \cr
+.. $ sigmab.LnTx\cr
+.. $ sigmab.TnTx\cr
+.. $ k \cr
+$ call (original function call)\cr
+}
+\description{
+Calculate Lx/Tx ratios from a given set of CW-OSL curves assuming late light background subtraction.
+}
+\details{
+The integrity of the chosen values for the signal and background integral is
+checked by the function; the signal integral limits have to be lower than
+the background integral limits. If a \link{vector} is given as input instead
+of a \link{data.frame}, an artificial \code{data.frame} is produced. The
+error calculation is done according to Galbraith (2002).\cr
+
+\bold{Please note:} In cases where the calculation results in \code{NaN} values (for
+example due to zero-signal, and therefore a division of 0 by 0), these \code{NaN} values are replaced
+by 0.
+
+\bold{sigmab}\cr
+
+The default value of \code{sigmab} is calculated assuming the background is
+constant and \bold{would not} applicable when the background varies as,
+e.g., as observed for the early light substraction method.\cr
+
+\bold{sig0}\cr
+
+This argument allows to add an extra component of error to the final Lx/Tx error value.
+The input will be treated as factor that is multiplied with the already calculated
+LxTx and the result is add up by:
+
+\deqn{se(LxTx) = \sqrt(se(LxTx)^2 + (LxTx * sig0)^2)}
+
+\bold{background.count.distribution}\cr
+
+This argument allows selecting the distribution assumption that is used for
+the error calculation. According to Galbraith (2002, 2014) the background
+counts may be overdispersed (i.e. do not follow a poisson distribution,
+which is assumed for the photomultiplier counts). In that case (might be the
+normal case) it has to be accounted for the overdispersion by estimating
+\eqn{\sigma^2} (i.e. the overdispersion value). Therefore the relative
+standard error is calculated as:\cr\cr (a) \code{poisson}\cr
+\deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2)/Y_{0} - Y_{1}/k} (b)
+\code{non-poisson}\cr \deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2 +
+\sigma^2(1+1/k))/Y_{0} - Y_{1}/k}
+
+\bold{Please note} that when using the early background subtraction method in
+combination with the 'non-poisson' distribution argument, the corresponding Lx/Tx error
+may considerably increase due to a high sigmab value.
+Please check whether this is valid for your data set and  if necessary
+consider to provide an own sigmab value using the corresponding argument \code{sigmab}.
+}
+\note{
+The results of this function have been cross-checked with the Analyst
+(vers. 3.24b). Access to the results object via  \code{\link{get_RLum}}.\cr
+
+\bold{Caution:} If you are using early light subtraction (EBG), please either provide your
+own \code{sigmab} value or use \code{background.count.distribution = "poisson"}.
+}
+\section{Function version}{
+ 0.6.3 (2016-09-09 10:32:17)
+}
+\examples{
+
+##load data
+data(ExampleData.LxTxOSLData, envir = environment())
+
+##calculate Lx/Tx ratio
+results <- calc_OSLLxTxRatio(Lx.data, Tx.data, signal.integral = c(1:2),
+                             background.integral = c(85:100))
+
+##get results object
+get_RLum(results)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Duller, G., 2007. Analyst.
+\url{http://www.nutech.dtu.dk/english/~/media/Andre_Universitetsenheder/Nutech/Produkter\%20og\%20services/Dosimetri/radiation_measurement_instruments/tl_osl_reader/Manuals/analyst_manual_v3_22b.ashx}\cr
+
+Galbraith, R.F., 2002. A note on the variance of a background-corrected OSL
+count. Ancient TL, 20 (2), 49-51.
+
+Galbraith, R.F., 2014. A further note on the variance of a
+background-corrected OSL count. Ancient TL, 31 (2), 1-3.
+}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}},
+\code{\link{Analyse_SAR.OSLdata}}, \code{\link{plot_GrowthCurve}},
+\code{\link{analyse_SAR.CWOSL}}
+}
+\keyword{datagen}
+
diff --git a/man/calc_SourceDoseRate.Rd b/man/calc_SourceDoseRate.Rd
new file mode 100644
index 0000000..0312dd7
--- /dev/null
+++ b/man/calc_SourceDoseRate.Rd
@@ -0,0 +1,143 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_SourceDoseRate.R
+\name{calc_SourceDoseRate}
+\alias{calc_SourceDoseRate}
+\title{Calculation of the source dose rate via the date of measurement}
+\usage{
+calc_SourceDoseRate(measurement.date, calib.date, calib.dose.rate, calib.error,
+  source.type = "Sr-90", dose.rate.unit = "Gy/s", predict = NULL)
+}
+\arguments{
+\item{measurement.date}{\code{\link{character}} or \code{\link{Date}} (\bold{required}): date of
+measurement in "YYYY-MM-DD". Exceptionally, if no value is provided, the date will be set to today.
+The argument can be provided as vector.}
+
+\item{calib.date}{\code{\link{character}} or \code{\link{Date}} (\bold{required}): date of source
+calibration in "YYYY-MM-DD"}
+
+\item{calib.dose.rate}{\code{\link{numeric}} (\bold{required}): dose rate at
+date of calibration in Gy/s or Gy/min}
+
+\item{calib.error}{\code{\link{numeric}} (\bold{required}): error of dose
+rate at date of calibration Gy/s or Gy/min}
+
+\item{source.type}{\code{\link{character}} (with default): specify
+irrdiation source (\code{Sr-90} or \code{Co-60} or \code{Am-214}), see
+details for further information}
+
+\item{dose.rate.unit}{\code{\link{character}} (with default): specify dose
+rate unit for input (\code{Gy/min} or \code{Gy/s}), the output is given in
+Gy/s as valid for the function \code{\link{Second2Gray}}}
+
+\item{predict}{\code{\link{integer}} (with default): option allowing to predicit the dose
+rate of the source over time in days set by the provided value. Starting date is the value set
+with \code{measurement.date}, e.g., \code{calc_SourceDoseRate(...,predict = 100)} calculates
+the source dose rate for the next 100 days.}
+}
+\value{
+Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+Slot \code{data} contains a \code{\link{list}} with the following
+structure:\cr
+$ dose.rate (data.frame)\cr
+.. $ dose.rate \cr
+.. $ dose.rate.error \cr
+.. $ date (corresponding measurement date)\cr
+$ parameters (list) \cr
+.. $ source.type\cr
+.. $ halflife\cr
+.. $ dose.rate.unit\cr
+$ call (the original function call)\cr
+
+The output should be accessed using the function \code{\link{get_RLum}}.\cr
+A plot method of the output is provided via \code{\link{plot_RLum}}
+}
+\description{
+Calculating the dose rate of the irradiation source via the date of
+measurement based on: source calibration date, source dose rate, dose rate
+error. The function returns a data.frame that provides the input argument
+dose_rate for the function \code{\link{Second2Gray}}.
+}
+\details{
+Calculation of the source dose rate based on the time elapsed since the last
+calibration of the irradiation source. Decay parameters assume a Sr-90 beta
+source. \deqn{dose.rate = D0 * exp(-log(2) / T.1/2 * t)} \cr with: D0 <-
+calibration dose rate T.1/2 <- half-life of the source nuclide (here in
+days) t <- time since source calibration (in days) log(2) / T.1/2 equals the
+decay constant lambda
+
+Information on the date of measurements may be taken from the data's
+original .BIN file (using e.g., BINfile <- readBIN2R() and the slot
+BINfile at METADATA$DATE)
+
+\bold{Allowed source types and related values}
+
+\tabular{rllll}{ \bold{#} \tab \bold{Source type} \tab \bold{T.1/2} \tab
+\bold{Reference} \cr [1] \tab Sr-90 \tab 28.90 y \tab NNDC, Brookhaven
+National Laboratory \cr [2] \tab Am-214 \tab 432.6 y \tab NNDC, Brookhaven
+National Laboratory \cr [3] \tab Co-60 \tab 5.274 y \tab NNDC, Brookhaven
+National Laboratory }
+}
+\note{
+Please be careful when using the option \code{predict}, especially when a multiple set
+for \code{measurement.date} and \code{calib.date} is provided. For the source dose rate prediction
+the function takes the last value \code{measurement.date} and predicts from that the the source
+source dose rate for the number of days requested,
+means: the (multiple) orignal input will be replaced. However, the function
+do not change entries for the calibration dates, but mix them up. Therefore,
+it is not recommended to use this option when multiple calibration dates (\code{calib.date})
+are provided.
+}
+\section{Function version}{
+ 0.3.0 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##(1) Simple function usage
+##Basic calculation of the dose rate for a specific date
+dose.rate <-  calc_SourceDoseRate(measurement.date = "2012-01-27",
+                                  calib.date = "2014-12-19",
+                                  calib.dose.rate = 0.0438,
+                                  calib.error = 0.0019)
+
+##show results
+get_RLum(dose.rate)
+
+##(2) Usage in combination with another function (e.g., Second2Gray() )
+## load example data
+data(ExampleData.DeValues, envir = environment())
+
+## use the calculated variable dose.rate as input argument
+## to convert De(s) to De(Gy)
+Second2Gray(ExampleData.DeValues$BT998, dose.rate)
+
+##(3) source rate prediction and plotting
+dose.rate <-  calc_SourceDoseRate(measurement.date = "2012-01-27",
+                                  calib.date = "2014-12-19",
+                                  calib.dose.rate = 0.0438,
+                                  calib.error = 0.0019,
+                                  predict = 1000)
+plot_RLum(dose.rate)
+
+
+##(4) export output to a LaTeX table (example using the package 'xtable')
+\dontrun{
+xtable::xtable(get_RLum(dose.rate))
+
+}
+
+
+}
+\author{
+Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany),
+\cr Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+NNDC, Brookhaven National Laboratory
+(\code{http://www.nndc.bnl.gov/})
+}
+\seealso{
+\code{\link{Second2Gray}}, \code{\link{get_RLum}}, \code{\link{plot_RLum}}
+}
+\keyword{manip}
+
diff --git a/man/calc_Statistics.Rd b/man/calc_Statistics.Rd
new file mode 100644
index 0000000..c8f5d24
--- /dev/null
+++ b/man/calc_Statistics.Rd
@@ -0,0 +1,75 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_Statistics.R
+\name{calc_Statistics}
+\alias{calc_Statistics}
+\title{Function to calculate statistic measures}
+\usage{
+calc_Statistics(data, weight.calc = "square", digits = NULL, n.MCM = 1000,
+  na.rm = TRUE)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+object (required): for \code{data.frame} two columns: De (\code{data[,1]})
+and De error (\code{data[,2]}). To plot several data sets in one plot the
+data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.}
+
+\item{weight.calc}{\code{\link{character}}: type of weight calculation. One
+out of \code{"reciprocal"} (weight is 1/error), \code{"square"} (weight is
+1/error^2). Default is \code{"square"}.}
+
+\item{digits}{\code{\link{integer}} (with default): round numbers to the
+specified digits. If digits is set to \code{NULL} nothing is rounded.}
+
+\item{n.MCM}{\code{\link{numeric}} (with default): number of samples drawn
+for Monte Carlo-based statistics. Set to zero to disable this option.}
+
+\item{na.rm}{\code{\link{logical}} (with default): indicating whether NA
+values should be stripped before the computation proceeds.}
+}
+\value{
+Returns a list with weighted and unweighted statistic measures.
+}
+\description{
+This function calculates a number of descriptive statistics for De-data,
+most fundamentally using error-weighted approaches.
+}
+\details{
+The option to use Monte Carlo Methods (\code{n.MCM > 0}) allows calculating
+all descriptive statistics based on random values. The distribution of these
+random values is based on the Normal distribution with \code{De} values as
+means and \code{De_error} values as one standard deviation. Increasing the
+number of MCM-samples linearly increases computation time. On a Lenovo X230
+machine evaluation of 25 Aliquots with n.MCM = 1000 takes 0.01 s, with
+n = 100000, ca. 1.65 s. It might be useful to work with logarithms of these
+values. See Dietze et al. (2016, Quaternary Geochronology) and the function
+\code{\link{plot_AbanicoPlot}} for details.
+}
+\section{Function version}{
+ 0.1.6 (2016-05-16 22:14:31)
+}
+\examples{
+
+## load example data
+data(ExampleData.DeValues, envir = environment())
+
+## show a rough plot of the data to illustrate the non-normal distribution
+plot_KDE(ExampleData.DeValues$BT998)
+
+## calculate statistics and show output
+str(calc_Statistics(ExampleData.DeValues$BT998))
+
+\dontrun{
+## now the same for 10000 normal distributed random numbers with equal errors
+x <- as.data.frame(cbind(rnorm(n = 10^5, mean = 0, sd = 1),
+                         rep(0.001, 10^5)))
+
+## note the congruent results for weighted and unweighted measures
+str(calc_Statistics(x))
+}
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
+\keyword{datagen}
+
diff --git a/man/calc_TLLxTxRatio.Rd b/man/calc_TLLxTxRatio.Rd
new file mode 100644
index 0000000..c59c7c9
--- /dev/null
+++ b/man/calc_TLLxTxRatio.Rd
@@ -0,0 +1,88 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_TLLxTxRatio.R
+\name{calc_TLLxTxRatio}
+\alias{calc_TLLxTxRatio}
+\title{Calculate the Lx/Tx ratio for a given set of TL curves [beta version]}
+\usage{
+calc_TLLxTxRatio(Lx.data.signal, Lx.data.background, Tx.data.signal,
+  Tx.data.background, signal.integral.min, signal.integral.max)
+}
+\arguments{
+\item{Lx.data.signal}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (\bold{required}): TL data (x =
+temperature, y = counts) (TL signal)}
+
+\item{Lx.data.background}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (optional): TL data (x =
+temperature, y = counts). If no data are provided no background subtraction
+is performed.}
+
+\item{Tx.data.signal}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (\bold{required}): TL data (x =
+temperature, y = counts) (TL test signal)}
+
+\item{Tx.data.background}{\code{\linkS4class{RLum.Data.Curve}} or
+\code{\link{data.frame}} (optional): TL data (x =
+temperature, y = counts). If no data are provided no background subtraction
+is performed.}
+
+\item{signal.integral.min}{\code{\link{integer}} (\bold{required}): channel number
+for the lower signal integral bound (e.g. \code{signal.integral.min = 100})}
+
+\item{signal.integral.max}{\code{\link{integer}} (\bold{required}): channel number
+for the upper signal integral bound (e.g. \code{signal.integral.max = 200})}
+}
+\value{
+Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+Slot \code{data} contains a \link{list} with the following structure:\cr\cr
+$ LxTx.table \cr .. $ LnLx \cr .. $ LnLx.BG \cr .. $ TnTx \cr .. $ TnTx.BG
+\cr .. $ Net_LnLx \cr .. $ Net_LnLx.Error\cr
+}
+\description{
+Calculate Lx/Tx ratio for a given set of TL curves.
+}
+\details{
+-
+}
+\note{
+\bold{This function has still BETA status!}
+}
+\section{Function version}{
+ 0.3.0 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##load package example data
+data(ExampleData.BINfileData, envir = environment())
+
+##convert Risoe.BINfileData into a curve object
+temp <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos = 3)
+
+
+Lx.data.signal <- get_RLum(temp, record.id=1)
+Lx.data.background <- get_RLum(temp, record.id=2)
+Tx.data.signal <- get_RLum(temp, record.id=3)
+Tx.data.background <- get_RLum(temp, record.id=4)
+signal.integral.min <- 210
+signal.integral.max <- 230
+
+output <- calc_TLLxTxRatio(Lx.data.signal,
+                           Lx.data.background,
+                           Tx.data.signal, Tx.data.background,
+                           signal.integral.min, signal.integral.max)
+get_RLum(output)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Christoph Schmidt, University of Bayreuth (Germany)
+\cr R Luminescence Package Team}
+\references{
+-
+}
+\seealso{
+\code{\linkS4class{RLum.Results}}, \code{\link{analyse_SAR.TL}}
+}
+\keyword{datagen}
+
diff --git a/man/calc_ThermalLifetime.Rd b/man/calc_ThermalLifetime.Rd
new file mode 100644
index 0000000..2108721
--- /dev/null
+++ b/man/calc_ThermalLifetime.Rd
@@ -0,0 +1,139 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_ThermalLifetime.R
+\name{calc_ThermalLifetime}
+\alias{calc_ThermalLifetime}
+\title{Calculates the Thermal Lifetime using the Arrhenius equation}
+\usage{
+calc_ThermalLifetime(E, s, T = 20, output_unit = "Ma", profiling = FALSE,
+  profiling_config = NULL, verbose = TRUE, plot = TRUE, ...)
+}
+\arguments{
+\item{E}{\code{\link{numeric}} (\bold{required}): vector of trap depths in eV,
+if \code{profiling = TRUE}
+only the first two elements are considered}
+
+\item{s}{\code{\link{numeric}} (\bold{required}): vector of frequency factor in 1/s,
+if \code{profiling = TRUE} only the first two elements are considered}
+
+\item{T}{\code{\link{numeric}} (with default): temperature in deg. C for which the lifetime(s)
+will be calculted. A vector can be provided.}
+
+\item{output_unit}{\code{\link{character}} (with default):
+output unit of the calculated lifetimes, accepted
+entries are: \code{"Ma"}, \code{"ka"}, \code{"a"}, \code{"d"}, \code{"h"}, \code{"min"}, \code{"s"}}
+
+\item{profiling}{\code{\link{logical}} (with default):
+this option allows to estimate uncertainties based on
+given E and s parameters and their corresponding standard error (cf. details and examples section)}
+
+\item{profiling_config}{\code{\link{list}} (optional): allows to set configurate parameters
+used for the profiling (and only have an effect here). Supported parameters are:
+\code{n} (number of MC runs), \code{E.distribution} (distribution used for the resampling for E) and
+\code{s.distribution} (distribution used for the resampling for s). Currently only the normal
+distribution is supported (e.g., \code{profiling_config = list(E.distribution = "norm")}}
+
+\item{verbose}{\code{\link{logical}}: enables/disables verbose mode}
+
+\item{plot}{\code{\link{logical}}: enables/disables output plot, currenlty only in combination
+with \code{profiling = TRUE}.}
+
+\item{\dots}{further arguments that can be passed in combination with the plot output. Standard
+plot parameters are supported (\code{\link{plot.default}})}
+}
+\value{
+A \code{\linkS4class{RLum.Results}} object is returned a along with a plot (for
+\code{profiling = TRUE}). The output object contain the following slots:
+
+\bold{\code{@data}}\cr
+\tabular{lll}{
+ \bold{Object} \tab \bold{Type} \tab \bold{Description} \cr
+ \code{lifetimes} \tab \code{\link{array}} or \code{\link{numeric}} \tab calculated lifetimes \cr
+ \code{profiling_matrix} \tab \code{\link{matrix}} \tab profiling matrix used for the MC runs
+
+}
+
+\bold{\code{@info}}\cr
+\tabular{lll}{
+\bold{Object} \tab \bold{Type} \tab \bold{Description} \cr
+\code{call} \tab \code{call} \tab the original function call
+}
+}
+\description{
+The function calculates the thermal lifetime of charges for given E (in eV), s (in 1/s) and
+T (in deg. C.) parameters. The function can be used in two operational modes:\cr
+}
+\details{
+\bold{Mode 1 \code{(profiling = FALSE)}}
+
+An arbitrary set of input parameters (E, s, T) can be provided and the
+function calculates the thermal lifetimes using the Arrhenius equation for
+all possible combinations of these input parameters. An array with 3-dimensions
+is returned that can be used for further analyses or graphical output (see example 1)
+
+\bold{Mode 2 \code{(profiling = TRUE)}}
+
+This mode tries to profile the variation of the thermal lifetime for a chosen
+temperature by accounting for the provided E and s parameters and their corresponding
+standard errors, e.g., \code{E = c(1.600, 0.001)}
+The calculation based on a Monte Carlo simulation, where values are sampled from a normal
+distribution (for E and s).\cr
+
+\bold{Used equation (Arrhenius equation)}\cr
+
+\deqn{\tau = 1/s exp(E/kT)}
+where: \eqn{\tau} in s as the mean time an electron spends in the trap for a given \eqn{T},
+\eqn{E} trap depth in eV, \eqn{s} the frequency factor in 1/s, \eqn{T} the temperature in K and \eqn{k} the Boltzmann constant in eV/K (cf. Furetta, 2010).
+}
+\note{
+The profiling is currently based on resampling from a normal distribution, this
+distribution assumption might be, however, not valid for given E and s paramters.
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:36:06)
+}
+\examples{
+
+##EXAMPLE 1
+##calculation for two trap-depths with similar frequency factor for different temperatures
+E <- c(1.66, 1.70)
+s <- 1e+13
+T <- 10:20
+temp <- calc_ThermalLifetime(
+  E = E,
+  s = s,
+  T = T,
+  output_unit = "Ma"
+)
+contour(x = E, y = T, z = temp$lifetimes[1,,],
+        ylab = "Temperature [\\u00B0C]",
+        xlab = "Trap depth [eV]",
+        main = "Thermal Lifetime Contour Plot"
+)
+mtext(side = 3, "(values quoted in Ma)")
+
+##EXAMPLE 2
+##profiling of thermal life time for E and s and their standard error
+E <- c(1.600, 0.003)
+s <- c(1e+13,1e+011)
+T <- 20
+calc_ThermalLifetime(
+  E = E,
+  s = s,
+  T = T,
+  profiling = TRUE,
+  output_unit = "Ma"
+)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+Furetta, C., 2010. Handbook of Thermoluminescence, Second Edition. ed.
+World Scientific.
+}
+\seealso{
+\code{\link[graphics]{matplot}}, \code{\link[stats]{rnorm}}, \code{\link{get_RLum}},
+}
+\keyword{datagen}
+
diff --git a/man/calc_gSGC.Rd b/man/calc_gSGC.Rd
new file mode 100644
index 0000000..4a1ffa6
--- /dev/null
+++ b/man/calc_gSGC.Rd
@@ -0,0 +1,83 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_gSGC.R
+\name{calc_gSGC}
+\alias{calc_gSGC}
+\title{Calculate De value based on the gSGC by Li et al., 2015}
+\usage{
+calc_gSGC(data, gSGC.type = "0-250", gSGC.parameters, n.MC = 100,
+  verbose = TRUE, plot = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} (\bold{required}): input data of providing the following
+columns: 'LnTn', 'LnTn.error', Lr1Tr1', 'Lr1Tr1.error', 'Dr1'
+Note: column names are not required. The function expect the input data in the given order}
+
+\item{gSGC.type}{\code{\link{character}} (with default): define the function parameters that
+should be used for the iteration procedure: Li et al., 2015 (Table 2)
+presented function parameters for two dose ranges: \code{"0-450"} and \code{"0-250"}}
+
+\item{gSGC.parameters}{\code{\link{list}} (optional): option to provide own function
+parameters used for #' fitting as named list.
+Nomenclature follows Li et al., 2015, i.e.
+\code{list(A,A.error,D0,D0.error,c,c.error,Y0,Y0.error,range)}, range requires a vector for
+the range the function is considered as valid, e.g. \code{range = c(0,250)}\cr
+Using this option overwrites the default parameter list of the gSGC, meaning the argument
+\code{gSGC.type} will be without effect}
+
+\item{n.MC}{\code{\link{integer}} (with default): number of Monte Carlo simulation runs for
+error estimation, s. details.}
+
+\item{verbose}{\code{\link{logical}}: enable or disable terminal output}
+
+\item{plot}{\code{\link{logical}}: enable or disable graphical feedback as plot}
+
+\item{...}{parameters will be passed to the plot output}
+}
+\value{
+Returns an S4 object of type \code{\linkS4class{RLum.Results}}.\cr
+
+\bold{@data}\cr
+$ De.value (data.frame) \cr
+ .. $ De  \cr
+ .. $ De.error \cr
+ .. $ Eta \cr
+$ De.MC (list) contains the matricies from the error estimation.\cr
+$ uniroot (list) contains the uniroot outputs of the De estimations\cr
+
+\bold{@info}\cr
+$ call (call) the original function call
+}
+\description{
+Function returns De value and De value error using the global standardised growth
+curve (gSGC) assumption proposed by Li et al., 2015 for OSL dating of sedimentary quartz
+}
+\details{
+The error of the De value is determined using a Monte Carlo simulation approach.
+Solving of the equation is realised using \code{\link{uniroot}}.
+Large values for \code{n.MC} will significantly increase the computation time.
+}
+\section{Function version}{
+ 0.1.1 (2016-09-09 10:32:17)
+}
+\examples{
+results <- calc_gSGC(data = data.frame(
+LnTn =  2.361, LnTn.error = 0.087,
+Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091,
+Dr1 = 34.4))
+
+get_RLum(results, data.object = "De")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
+\cr R Luminescence Package Team}
+\references{
+Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., 2015. Potential of establishing
+a 'global standardised growth curve' (gSGC) for optical dating of quartz from sediments.
+Quaternary Geochronology 27, 94-104. doi:10.1016/j.quageo.2015.02.011
+}
+\seealso{
+\code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}}, \code{\link{uniroot}}
+}
+\keyword{datagen}
+
diff --git a/man/extract_IrradiationTimes.Rd b/man/extract_IrradiationTimes.Rd
new file mode 100644
index 0000000..70bfcf4
--- /dev/null
+++ b/man/extract_IrradiationTimes.Rd
@@ -0,0 +1,135 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/extract_IrradiationTimes.R
+\name{extract_IrradiationTimes}
+\alias{extract_IrradiationTimes}
+\title{Extract irradiation times from an XSYG file}
+\usage{
+extract_IrradiationTimes(object, file.BINX, recordType = c("irradiation (NA)",
+  "IRSL (UVVIS)", "OSL (UVVIS)", "TL (UVVIS)"), compatibility.mode = TRUE,
+  txtProgressBar = TRUE)
+}
+\arguments{
+\item{object}{\code{\link{character}} (\bold{required}) or
+\code{\linkS4class{RLum.Analysis}} object or \code{\link{list}}: path and file name of the XSYG
+file or an \code{\linkS4class{RLum.Analysis}} produced by the function
+\code{\link{read_XSYG2R}}; alternatively a \code{list} of \code{\linkS4class{RLum.Analysis}} can
+be provided. \cr
+
+\bold{Note}: If an \code{\linkS4class{RLum.Analysis}} is used, any input for
+the arguments \code{file.BINX} and \code{recordType} will be ignored!}
+
+\item{file.BINX}{\code{\link{character}} (optional): path and file name of
+an existing BINX-file. If a file name is provided the file will be updated
+with the information from the XSYG file in the same folder as the original
+BINX-file.\cr Note: The XSYG and the BINX-file have to be originate from the
+same measurement!}
+
+\item{recordType}{\code{\link{character}} (with default): select relevant
+curves types from the XSYG file or \code{\linkS4class{RLum.Analysis}}
+object. As the XSYG-file format comprises much more information than usually
+needed for routine data analysis and allowed in the BINX-file format, only
+the relevant curves are selected by using the function
+\code{\link{get_RLum}}. The argument \code{recordType} works as
+described for this function. \cr
+
+Note: A wrong selection will causes a function error. Please change this
+argument only if you have reasons to do so.}
+
+\item{compatibility.mode}{\code{\link{logical}} (with default): this option
+is parsed only if a BIN/BINX file is produced and it will reset all position
+values to a max. value of 48, cf.\code{\link{write_R2BIN}}}
+
+\item{txtProgressBar}{\code{\link{logical}} (with default): enables
+\code{TRUE} or disables \code{FALSE} the progression bars during import and
+export}
+}
+\value{
+An \code{\linkS4class{RLum.Results}} object is returned with the
+following structure:\cr .. $irr.times (data.frame)\cr
+
+If a BINX-file path and name is set, the output will be additionally
+transferred into a new BINX-file with the function name as suffix. For the
+output the path of the input BINX-file itself is used. Note that this will
+not work if the input object is a file path to an XSYG-file. In this case
+the argument input is ignored.\cr
+
+In the self call mode (input is a \code{list} of \code{\linkS4class{RLum.Analysis}} objects
+a list of \code{\linkS4class{RLum.Results}} is returned.
+}
+\description{
+Extracts irradiation times, dose and times since last irradiation, from a
+Freiberg Instruments XSYG-file. These information can be further used to
+update an existing BINX-file
+}
+\details{
+The function was written to compensate missing information in the BINX-file
+output of Freiberg Instruments lexsyg readers. As all information are
+available within the XSYG-file anyway, these information can be extracted
+and used for further analysis or/and to stored in a new BINX-file, which can
+be further used by other software, e.g. Analyst (Geoff Duller). \cr
+
+Typical application example: g-value estimation from fading measurements
+using the Analyst or any other self written script.\cr
+
+Beside the some simple data transformation steps the function applies the
+functions \code{\link{read_XSYG2R}}, \code{\link{read_BIN2R}},
+\code{\link{write_R2BIN}} for data import and export.
+}
+\note{
+The produced output object contains still the irradiation steps to
+keep the output transparent. However, for the BINX-file export this steps
+are removed as the BINX-file format description does not allow irradiations
+as separat sequences steps.\cr
+
+Know issue: The 'fading correction' menu in the Analyst will not work appear
+with the produced BIN/BINX-file due to hidden bits, which are not reproduced
+by the function \code{write_R2BIN()} or if it appears it stops with a
+floating point error. \cr
+
+Negative values for \code{TIMESINCELAS.STEP}? Yes, this is possible and no
+bug, as in the XSYG file multiple curves are stored for one step. Example: A
+TL step may comprise three curves: (a) counts vs. time, (b) measured
+temperature vs. time and (c) predefined temperature vs. time. Three curves,
+but they are all belonging to one TL measurement step, but with regard to
+the time stamps this could produce negative values as the important function
+(\code{\link{read_XSYG2R}}) do not change the order of entries for one step
+towards a correct time order.
+}
+\section{Function version}{
+ 0.3.0 (2016-05-03 11:10:26)
+}
+\examples{
+
+
+## (1) - example for your own data
+##
+## set files and run function
+#
+#   file.XSYG <- file.choose()
+#   file.BINX <- file.choose()
+#
+#     output <- extract_IrradiationTimes(file.XSYG = file.XSYG, file.BINX = file.BINX)
+#     get_RLum(output)
+#
+## export results additionally to a CSV.file in the same directory as the XSYG-file
+#       write.table(x = get_RLum(output),
+#                   file = paste0(file.BINX,"_extract_IrradiationTimes.csv"),
+#                   sep = ";",
+#                   row.names = FALSE)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Duller, G., 2007. Analyst.
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}, \code{\linkS4class{Risoe.BINfileData}},
+\code{\link{read_XSYG2R}}, \code{\link{read_BIN2R}}, \code{\link{write_R2BIN}}
+}
+\keyword{IO}
+\keyword{manip}
+
diff --git a/man/fit_CWCurve.Rd b/man/fit_CWCurve.Rd
new file mode 100644
index 0000000..3e8c1d5
--- /dev/null
+++ b/man/fit_CWCurve.Rd
@@ -0,0 +1,176 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/fit_CWCurve.R
+\name{fit_CWCurve}
+\alias{fit_CWCurve}
+\title{Nonlinear Least Squares Fit for CW-OSL curves [beta version]}
+\usage{
+fit_CWCurve(values, n.components.max, fit.failure_threshold = 5,
+  fit.method = "port", fit.trace = FALSE, fit.calcError = FALSE,
+  LED.power = 36, LED.wavelength = 470, cex.global = 0.6,
+  sample_code = "Default", output.path, output.terminal = TRUE,
+  output.terminalAdvanced = TRUE, plot = TRUE, ...)
+}
+\arguments{
+\item{values}{\code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+(\bold{required}): x, y data of measured values (time and counts). See
+examples.}
+
+\item{n.components.max}{\link{vector} (optional): maximum number of
+components that are to be used for fitting. The upper limit is 7.}
+
+\item{fit.failure_threshold}{\link{vector} (with default): limits the failed
+fitting attempts.}
+
+\item{fit.method}{\link{character} (with default): select fit method,
+allowed values: \code{'port'} and \code{'LM'}. \code{'port'} uses the 'port'
+routine usint the funtion \code{\link{nls}} \code{'LM'} utilises the
+function \code{nlsLM} from the package \code{minpack.lm} and with that the
+Levenberg-Marquardt algorithm.}
+
+\item{fit.trace}{\link{logical} (with default): traces the fitting process
+on the terminal.}
+
+\item{fit.calcError}{\link{logical} (with default): calculate 1-sigma error
+range of components using \code{\link{confint}}}
+
+\item{LED.power}{\link{numeric} (with default): LED power (max.) used for
+intensity ramping in mW/cm^2. \bold{Note:} The value is used for the
+calculation of the absolute photoionisation cross section.}
+
+\item{LED.wavelength}{\link{numeric} (with default): LED wavelength used for
+stimulation in nm. \bold{Note:} The value is used for the calculation of the
+absolute photoionisation cross section.}
+
+\item{cex.global}{\link{numeric} (with default): global scaling factor.}
+
+\item{sample_code}{\link{character} (optional): sample code used for the
+plot and the optional output table (mtext).}
+
+\item{output.path}{\link{character} (optional): output path for table output
+containing the results of the fit. The file name is set automatically. If
+the file already exists in the directory, the values are appended.}
+
+\item{output.terminal}{\link{logical} (with default): terminal ouput with
+fitting results.}
+
+\item{output.terminalAdvanced}{\link{logical} (with default): enhanced
+terminal output. Requires \code{output.terminal = TRUE}. If
+\code{output.terminal = FALSE} no advanced output is possible.}
+
+\item{plot}{\link{logical} (with default): returns a plot of the fitted
+curves.}
+
+\item{\dots}{further arguments and graphical parameters passed to
+\code{\link{plot}}.}
+}
+\value{
+\item{plot}{(optional) the fitted CW-OSL curves are returned as
+plot.} \item{table}{(optional) an output table (*.csv) with parameters of
+the fitted components is provided if the \code{output.path} is set.}
+\item{list(list("RLum.Results"))}{beside the plot and table output options,
+an \code{\linkS4class{RLum.Results}} object is returned.\cr\cr \code{fit}:
+an \code{nls} object (\code{$fit}) for which generic R functions are
+provided, e.g. \link{summary}, \link{confint}, \link{profile}. For more
+details, see \link{nls}.\cr\cr \code{output.table}: a \link{data.frame}
+containing the summarised parameters including the error\cr
+\code{component.contribution.matrix}: \link{matrix} containing the values
+for the component to sum contribution plot
+(\code{$component.contribution.matrix}).\cr
+
+Matrix structure:\cr Column 1 and 2: time and \code{rev(time)} values\cr
+Additional columns are used for the components, two for each component,
+containing I0 and n0. The last columns \code{cont.} provide information on
+the relative component contribution for each time interval including the row
+sum for this values. }\item{ object}{beside the plot and table output
+options, an \code{\linkS4class{RLum.Results}} object is returned.\cr\cr
+\code{fit}: an \code{nls} object (\code{$fit}) for which generic R functions
+are provided, e.g. \link{summary}, \link{confint}, \link{profile}. For more
+details, see \link{nls}.\cr\cr \code{output.table}: a \link{data.frame}
+containing the summarised parameters including the error\cr
+\code{component.contribution.matrix}: \link{matrix} containing the values
+for the component to sum contribution plot
+(\code{$component.contribution.matrix}).\cr
+
+Matrix structure:\cr Column 1 and 2: time and \code{rev(time)} values\cr
+Additional columns are used for the components, two for each component,
+containing I0 and n0. The last columns \code{cont.} provide information on
+the relative component contribution for each time interval including the row
+sum for this values. }
+}
+\description{
+The function determines the weighted least-squares estimates of the
+component parameters of a CW-OSL signal for a given maximum number of
+components and returns various component parameters. The fitting procedure
+uses the \code{\link{nls}} function with the \code{port} algorithm.
+}
+\details{
+\bold{Fitting function}\cr\cr The function for the CW-OSL fitting has the
+general form: \deqn{y = I0_{1}*\lambda_{1}*exp(-\lambda_1*x) + ,\ldots, +
+I0_{i}*\lambda_{i}*exp(-\lambda_i*x) } where \eqn{0 < i < 8}\cr\cr and
+\eqn{\lambda} is the decay constant and \eqn{N0} the intial number of
+trapped electrons.\cr (for the used equation cf. Boetter-Jensen et al.,
+2003)\cr\cr \bold{Start values}\cr
+
+Start values are estimated automatically by fitting a linear function to the
+logarithmized input data set. Currently, there is no option to manually
+provide start parameters. \cr\cr \bold{Goodness of fit}\cr\cr The goodness
+of the fit is given as pseudoR^2 value (pseudo coefficient of
+determination). According to Lave (1970), the value is calculated as:
+\deqn{pseudoR^2 = 1 - RSS/TSS} where \eqn{RSS = Residual~Sum~of~Squares} \cr
+and \eqn{TSS = Total~Sum~of~Squares}\cr\cr
+
+\bold{Error of fitted component parameters}\cr\cr The 1-sigma error for the
+components is calculated using the function \code{\link{confint}}. Due to
+considerable calculation time, this option is deactived by default. In
+addition, the error for the components can be estimated by using internal R
+functions like \code{\link{summary}}. See the \code{\link{nls}} help page
+for more information.\cr\cr \emph{For details on the nonlinear regression in
+R, see Ritz & Streibig (2008).}
+}
+\note{
+\bold{Beta version - This function has not been properly tested yet
+and should therefore not be used for publication purposes!}\cr\cr The
+pseudo-R^2 may not be the best parameter to describe the goodness of the
+fit. The trade off between the \code{n.components} and the pseudo-R^2 value
+is currently not considered.\cr\cr The function \bold{does not} ensure that
+the fitting procedure has reached a global minimum rather than a local
+minimum!
+}
+\section{Function version}{
+ 0.5.1 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##load data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+##fit data
+fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve,
+                   main = "CW Curve Fit",
+                   n.components.max = 4,
+                   log = "x")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Boetter-Jensen, L., McKeever, S.W.S., Wintle, A.G., 2003.
+Optically Stimulated Luminescence Dosimetry. Elsevier Science B.V.
+
+Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of
+Economics and Statistics, 52 (3), 320-323.
+
+Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. In: R.
+Gentleman, K. Hornik, G. Parmigiani, eds., Springer, p. 150.
+}
+\seealso{
+\code{\link{fit_LMCurve}}, \code{\link{plot}},\code{\link{nls}},
+\code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Results}},
+\code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}
+}
+\keyword{dplot}
+\keyword{models}
+
diff --git a/man/fit_LMCurve.Rd b/man/fit_LMCurve.Rd
new file mode 100644
index 0000000..fd60eb5
--- /dev/null
+++ b/man/fit_LMCurve.Rd
@@ -0,0 +1,218 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/fit_LMCurve.R
+\name{fit_LMCurve}
+\alias{fit_LMCurve}
+\title{Nonlinear Least Squares Fit for LM-OSL curves}
+\usage{
+fit_LMCurve(values, values.bg, n.components = 3, start_values,
+  input.dataType = "LM", fit.method = "port", sample_code = "",
+  sample_ID = "", LED.power = 36, LED.wavelength = 470,
+  fit.trace = FALSE, fit.advanced = FALSE, fit.calcError = FALSE,
+  bg.subtraction = "polynomial", verbose = TRUE, plot = TRUE,
+  plot.BG = FALSE, ...)
+}
+\arguments{
+\item{values}{\code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+(\bold{required}): x,y data of measured values (time and counts). See
+examples.}
+
+\item{values.bg}{\code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
+(optional): x,y data of measured values (time and counts) for background
+subtraction.}
+
+\item{n.components}{\link{integer} (with default): fixed number of
+components that are to be recognised during fitting (min = 1, max = 7).}
+
+\item{start_values}{\link{data.frame} (optional): start parameters for lm
+and xm data for the fit. If no start values are given, an automatic start
+value estimation is attempted (see details).}
+
+\item{input.dataType}{\link{character} (with default): alter the plot output
+depending on the input data: "LM" or "pLM" (pseudo-LM). See: \link{CW2pLM}}
+
+\item{fit.method}{\code{\link{character}} (with default): select fit method,
+allowed values: \code{'port'} and \code{'LM'}. \code{'port'} uses the 'port'
+routine usint the funtion \code{\link{nls}} \code{'LM'} utilises the
+function \code{nlsLM} from the package \code{minpack.lm} and with that the
+Levenberg-Marquardt algorithm.}
+
+\item{sample_code}{\link{character} (optional): sample code used for the
+plot and the optional output table (mtext).}
+
+\item{sample_ID}{\link{character} (optional): additional identifier used as
+column header for the table output.}
+
+\item{LED.power}{\link{numeric} (with default): LED power (max.) used for
+intensity ramping in mW/cm^2. \bold{Note:} This value is used for the
+calculation of the absolute photoionisation cross section.}
+
+\item{LED.wavelength}{\link{numeric} (with default): LED wavelength in nm
+used for stimulation. \bold{Note:} This value is used for the calculation of
+the absolute photoionisation cross section.}
+
+\item{fit.trace}{\link{logical} (with default): traces the fitting process
+on the terminal.}
+
+\item{fit.advanced}{\link{logical} (with default): enables advanced fitting
+attempt for automatic start parameter recognition. Works only if no start
+parameters are provided. \bold{Note:} It may take a while and it is not
+compatible with \code{fit.method = "LM"}.}
+
+\item{fit.calcError}{\link{logical} (with default): calculate 1-sigma error
+range of components using \link{confint}.}
+
+\item{bg.subtraction}{\link{character} (with default): specifies method for
+background subtraction (\code{polynomial}, \code{linear}, \code{channel},
+see Details). \bold{Note:} requires input for \code{values.bg}.}
+
+\item{verbose}{\link{logical} (with default): terminal output with
+fitting results.}
+
+\item{plot}{\link{logical} (with default): returns a plot of the
+fitted curves.}
+
+\item{plot.BG}{\link{logical} (with default): returns a plot of the
+background values with the fit used for the background subtraction.}
+
+\item{\dots}{Further arguments that may be passed to the plot output, e.g.
+\code{xlab}, \code{xlab}, \code{main}, \code{log}.}
+}
+\value{
+Various types of plots are returned. For details see above.\cr
+Furthermore an \code{RLum.Results} object is returned with the following structure:\cr
+
+data:\cr
+.. $fit : \code{nls} (nls object)\cr
+.. $output.table : \code{data.frame} with fitting results\cr
+.. $component.contribution.matrix : \code{list} component distribution matrix\cr
+.. $call : \code{call} the original function call
+
+Matrix structure for the distribution matrix:\cr
+
+Column 1 and 2: time and \code{rev(time)} values\cr
+Additional columns are used for the components, two for each component,
+containing I0 and n0. The last columns \code{cont.} provide information on
+the relative component contribution for each time interval including the row
+sum for this values.
+}
+\description{
+The function determines weighted nonlinear least-squares estimates of the
+component parameters of an LM-OSL curve (Bulur 1996) for a given number of
+components and returns various component parameters. The fitting procedure
+uses the function \code{\link{nls}} with the \code{port} algorithm.
+}
+\details{
+\bold{Fitting function}\cr\cr The function for the fitting has the general
+form: \deqn{y = (exp(0.5)*Im_1*x/xm_1)*exp(-x^2/(2*xm_1^2)) + ,\ldots, +
+exp(0.5)*Im_i*x/xm_i)*exp(-x^2/(2*xm_i^2))} where \eqn{1 < i < 8}\cr This
+function and the equations for the conversion to b (detrapping probability)
+and n0 (proportional to initially trapped charge) have been taken from Kitis
+et al. (2008): \deqn{xm_i=\sqrt{max(t)/b_i}} \deqn{Im_i=exp(-0.5)n0/xm_i}\cr
+\bold{Background subtraction}\cr\cr Three methods for background subtraction
+are provided for a given background signal (\code{values.bg}).\cr
+\code{polynomial}: default method. A polynomial function is fitted using
+\link{glm} and the resulting function is used for background subtraction:
+\deqn{y = a*x^4 + b*x^3 + c*x^2 + d*x + e}\cr \code{linear}: a linear
+function is fitted using \link{glm} and the resulting function is used for
+background subtraction: \deqn{y = a*x + b}\cr \code{channel}: the measured
+background signal is subtracted channelwise from the measured signal.\cr\cr
+\bold{Start values}\cr
+
+The choice of the initial parameters for the \code{nls}-fitting is a crucial
+point and the fitting procedure may mainly fail due to ill chosen start
+parameters. Here, three options are provided:\cr\cr \bold{(a)} If no start
+values (\code{start_values}) are provided by the user, a cheap guess is made
+by using the detrapping values found by Jain et al. (2003) for quartz for a
+maximum of 7 components. Based on these values, the pseudo start parameters
+xm and Im are recalculated for the given data set. In all cases, the fitting
+starts with the ultra-fast component and (depending on \code{n.components})
+steps through the following values. If no fit could be achieved, an error
+plot (for \code{plot = TRUE}) with the pseudo curve (based on the
+pseudo start parameters) is provided. This may give the opportunity to
+identify appropriate start parameters visually.\cr\cr \bold{(b)} If start
+values are provided, the function works like a simple \code{\link{nls}}
+fitting approach.\cr\cr \bold{(c)} If no start parameters are provided and
+the option \code{fit.advanced = TRUE} is chosen, an advanced start paramter
+estimation is applied using a stochastical attempt. Therefore, the
+recalculated start parameters \bold{(a)} are used to construct a normal
+distribution. The start parameters are then sampled randomly from this
+distribution. A maximum of 100 attempts will be made. \bold{Note:} This
+process may be time consuming. \cr\cr \bold{Goodness of fit}\cr\cr The
+goodness of the fit is given by a pseudoR^2 value (pseudo coefficient of
+determination). According to Lave (1970), the value is calculated as:
+\deqn{pseudoR^2 = 1 - RSS/TSS} where \eqn{RSS = Residual~Sum~of~Squares} \cr
+and \eqn{TSS = Total~Sum~of~Squares}\cr\cr \bold{Error of fitted component
+parameters}\cr\cr The 1-sigma error for the components is calculated using
+the function \link{confint}. Due to considerable calculation time, this
+option is deactived by default. In addition, the error for the components
+can be estimated by using internal R functions like \link{summary}. See the
+\link{nls} help page for more information.\cr \emph{For more details on the
+nonlinear regression in R, see Ritz & Streibig (2008).}
+}
+\note{
+The pseudo-R^2 may not be the best parameter to describe the goodness
+of the fit. The trade off between the \code{n.components} and the pseudo-R^2
+value currently remains unconsidered. \cr
+
+The function \bold{does not} ensure that the fitting procedure has reached a
+global minimum rather than a local minimum! In any case of doubt, the use of
+manual start values is highly recommended.
+}
+\section{Function version}{
+ 0.3.1 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##(1) fit LM data without background subtraction
+data(ExampleData.FittingLM, envir = environment())
+fit_LMCurve(values = values.curve, n.components = 3, log = "x")
+
+##(2) fit LM data with background subtraction and export as JPEG
+## -alter file path for your preferred system
+##jpeg(file = "~/Desktop/Fit_Output\\\%03d.jpg", quality = 100,
+## height = 3000, width = 3000, res = 300)
+data(ExampleData.FittingLM, envir = environment())
+fit_LMCurve(values = values.curve, values.bg = values.curveBG,
+            n.components = 2, log = "x", plot.BG = TRUE)
+##dev.off()
+
+##(3) fit LM data with manual start parameters
+data(ExampleData.FittingLM, envir = environment())
+fit_LMCurve(values = values.curve,
+            values.bg = values.curveBG,
+            n.components = 3,
+            log = "x",
+            start_values = data.frame(Im = c(170,25,400), xm = c(56,200,1500)))
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Bulur, E., 1996. An Alternative Technique For Optically
+Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 5,
+701-709.
+
+Jain, M., Murray, A.S., Boetter-Jensen, L., 2003. Characterisation of
+blue-light stimulated luminescence components in different quartz samples:
+implications for dose measurement. Radiation Measurements, 37 (4-5),
+441-449.
+
+Kitis, G. & Pagonis, V., 2008. Computerized curve deconvolution analysis for
+LM-OSL. Radiation Measurements, 43, 737-741.
+
+Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of
+Economics and Statistics, 52 (3), 320-323.
+
+Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. R. Gentleman,
+K. Hornik, & G. Parmigiani, eds., Springer, p. 150.
+}
+\seealso{
+\code{\link{fit_CWCurve}}, \code{\link{plot}}, \code{\link{nls}},
+\code{\link[minpack.lm]{nlsLM}}, \code{\link{get_RLum}}
+}
+\keyword{dplot}
+\keyword{models}
+
diff --git a/man/get_Layout.Rd b/man/get_Layout.Rd
new file mode 100644
index 0000000..5798a84
--- /dev/null
+++ b/man/get_Layout.Rd
@@ -0,0 +1,60 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/get_Layout.R
+\name{get_Layout}
+\alias{get_Layout}
+\title{Collection of layout definitions}
+\usage{
+get_Layout(layout)
+}
+\arguments{
+\item{layout}{\code{\link{character}} or \code{\link{list}} object
+(required): name of the layout definition to be returned. If name is
+provided the respective definition is returned. One of the following
+supported layout definitions is possible: \code{"default"},
+\code{"journal.1"}, \code{"small"}, \code{"empty"}. User-specific layout
+definitions must be provided as a list object of predefined structure, see
+details.}
+}
+\value{
+A list object with layout definitions for plot functions.
+}
+\description{
+This helper function returns a list with layout definitions for homogeneous
+plotting.
+}
+\details{
+The easiest way to create a user-specific layout definition is perhaps to
+create either an empty or a default layout object and fill/modify the
+definitions (\code{user.layout <- get_Layout(data = "empty")}).
+}
+\section{Function version}{
+ 0.1 (2016-05-17 22:39:50)
+}
+\examples{
+
+## read example data set
+data(ExampleData.DeValues, envir = environment())
+
+## show structure of the default layout definition
+layout.default <- get_Layout(layout = "default")
+str(layout.default)
+
+## show colour definitions for Abanico plot, only
+layout.default$abanico$colour
+
+## set Abanico plot title colour to orange
+layout.default$abanico$colour$main <- "orange"
+
+## create Abanico plot with modofied layout definition
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 layout = layout.default)
+
+## create Abanico plot with predefined layout "journal"
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 layout = "journal")
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
+
diff --git a/man/get_Quote.Rd b/man/get_Quote.Rd
new file mode 100644
index 0000000..c4d9da2
--- /dev/null
+++ b/man/get_Quote.Rd
@@ -0,0 +1,36 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/get_Quote.R
+\name{get_Quote}
+\alias{get_Quote}
+\title{Function to return essential quotes}
+\usage{
+get_Quote(ID, author, separated = FALSE)
+}
+\arguments{
+\item{ID}{\code{\link{character}}, qoute ID to be returned.}
+
+\item{author}{\code{\link{character}}, all quotes by specified author.}
+
+\item{separated}{\code{\link{logical}}, return result in separated form.}
+}
+\value{
+Returns a character with quote and respective (false) author.
+}
+\description{
+This function returns one of the collected essential quotes in the
+growing library. If called without any parameters, a random quote is
+returned.
+}
+\section{Function version}{
+ 0.1.1 (2016-09-09 10:32:17)
+}
+\examples{
+
+## ask for an arbitrary qoute
+get_Quote()
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
+
diff --git a/man/get_RLum.Rd b/man/get_RLum.Rd
new file mode 100644
index 0000000..a86b0e3
--- /dev/null
+++ b/man/get_RLum.Rd
@@ -0,0 +1,73 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/get_RLum.R
+\docType{methods}
+\name{get_RLum}
+\alias{get_RLum}
+\alias{get_RLum,list-method}
+\title{General accessor function for RLum S4 class objects}
+\usage{
+get_RLum(object, ...)
+
+\S4method{get_RLum}{list}(object, null.rm = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum}} (\bold{required}): S4 object of
+class \code{RLum} or an object of type \code{\link{list}} containing only objects of type
+\code{\linkS4class{RLum}}}
+
+\item{null.rm}{\code{\link{logical}} (with default): option to get rid of empty and NULL objects}
+
+\item{\dots}{further arguments that will be passed to the object specific methods. For
+furter details on the supported arguments please see the class
+documentation: \code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Spectrum}}, \code{\linkS4class{RLum.Data.Image}},
+\code{\linkS4class{RLum.Analysis}} and \code{\linkS4class{RLum.Results}}}
+}
+\value{
+Return is the same as input objects as provided in the list.
+}
+\description{
+Function calls object-specific get functions for RLum S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+corresponding get function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+}
+\section{Methods (by class)}{
+\itemize{
+\item \code{list}: Returns a list of \code{\linkS4class{RLum}} objects that had been passed to \code{\link{get_RLum}}
+}}
+\section{Function version}{
+ 0.3.0 (2016-05-02 09:40:57)
+}
+\examples{
+
+
+##Example based using data and from the calc_CentralDose() function
+
+##load example data
+data(ExampleData.DeValues, envir = environment())
+
+##apply the central dose model 1st time
+temp1 <- calc_CentralDose(ExampleData.DeValues$CA1)
+
+##get results and store them in a new object
+temp.get <- get_RLum(object = temp1)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Image}},
+\code{\linkS4class{RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}
+}
+\keyword{utilities}
+
diff --git a/man/get_Risoe.BINfileData.Rd b/man/get_Risoe.BINfileData.Rd
new file mode 100644
index 0000000..360f8c1
--- /dev/null
+++ b/man/get_Risoe.BINfileData.Rd
@@ -0,0 +1,39 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/get_Risoe.BINfileData.R
+\name{get_Risoe.BINfileData}
+\alias{get_Risoe.BINfileData}
+\title{General accessor function for RLum S4 class objects}
+\usage{
+get_Risoe.BINfileData(object, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{Risoe.BINfileData}} (\bold{required}): S4 object of
+class \code{RLum}}
+
+\item{\dots}{further arguments that one might want to pass to the specific
+get function}
+}
+\value{
+Return is the same as input objects as provided in the list.
+}
+\description{
+Function calls object-specific get functions for RisoeBINfileData S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{Risoe.BINfileData}} objects.\cr Depending on the input object, the
+corresponding get function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{Risoe.BINfileData}} class.
+}
+\section{Function version}{
+ 0.1.0 (2015-11-29 17:27:48)
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{Risoe.BINfileData}}
+}
+\keyword{utilities}
+
diff --git a/man/get_rightAnswer.Rd b/man/get_rightAnswer.Rd
new file mode 100644
index 0000000..9c18680
--- /dev/null
+++ b/man/get_rightAnswer.Rd
@@ -0,0 +1,30 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/get_rightAnswer.R
+\name{get_rightAnswer}
+\alias{get_rightAnswer}
+\title{Function to get the right answer}
+\usage{
+get_rightAnswer(...)
+}
+\arguments{
+\item{...}{you can pass an infinite number of further arguments}
+}
+\value{
+Returns the right answer
+}
+\description{
+This function returns just the right answer
+}
+\section{Function version}{
+ 0.1.0 (2015-11-29 17:27:48)
+}
+\examples{
+
+## you really want to know?
+get_rightAnswer()
+
+}
+\author{
+inspired by R.G.
+\cr R Luminescence Package Team}
+
diff --git a/man/length_RLum.Rd b/man/length_RLum.Rd
new file mode 100644
index 0000000..9ff3cf7
--- /dev/null
+++ b/man/length_RLum.Rd
@@ -0,0 +1,40 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/length_RLum.R
+\name{length_RLum}
+\alias{length_RLum}
+\title{General accessor function for RLum S4 class objects}
+\usage{
+length_RLum(object)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum}} (\bold{required}): S4 object of
+class \code{RLum}}
+}
+\value{
+Return is the same as input objects as provided in the list.
+}
+\description{
+Function calls object-specific get functions for RLum S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+corresponding get function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:36:06)
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Image}},
+\code{\linkS4class{RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}
+}
+\keyword{utilities}
+
diff --git a/man/merge_RLum.Analysis.Rd b/man/merge_RLum.Analysis.Rd
new file mode 100644
index 0000000..f062708
--- /dev/null
+++ b/man/merge_RLum.Analysis.Rd
@@ -0,0 +1,69 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/merge_RLum.Analysis.R
+\name{merge_RLum.Analysis}
+\alias{merge_RLum.Analysis}
+\title{Merge function for RLum.Analysis S4 class objects}
+\usage{
+merge_RLum.Analysis(objects)
+}
+\arguments{
+\item{objects}{\code{\link{list}} of \code{\linkS4class{RLum.Analysis}}
+(\bold{required}): list of S4 objects of class \code{RLum.Analysis}.
+Furthermore other objects of class \code{\linkS4class{RLum}} can be added,
+see details.}
+}
+\value{
+Return an \code{\linkS4class{RLum.Analysis}} object.
+}
+\description{
+Function allows merging of RLum.Analysis objects and adding of allowed
+objects to an RLum.Analysis.
+}
+\details{
+This function simply allowing to merge \code{\linkS4class{RLum.Analysis}}
+objects.  Additionally other \code{\linkS4class{RLum}} objects can be added
+to an existing \code{\linkS4class{RLum.Analysis}} object. Supported objects
+to be added are: \code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Spectrum}} and
+\code{\linkS4class{RLum.Data.Image}}.\cr
+
+The order in the new \code{\linkS4class{RLum.Analysis}} object is the object
+order provided with the input list.
+}
+\note{
+The information for the slot 'protocol' is taken from the first
+\code{\linkS4class{RLum.Analysis}} object in the input list. Therefore at
+least one object of type \code{\linkS4class{RLum.Analysis}} has to be
+provided.
+}
+\section{Function version}{
+ 0.2.0 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##merge different RLum objects from the example data
+data(ExampleData.RLum.Analysis, envir = environment())
+data(ExampleData.BINfileData, envir = environment())
+
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+curve <- get_RLum(object)[[2]]
+
+temp.merged <- merge_RLum.Analysis(list(curve, IRSAR.RF.Data, IRSAR.RF.Data))
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+-
+}
+\seealso{
+\code{\link{merge_RLum}}, \code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Data.Image}}, \code{\linkS4class{RLum}}
+}
+\keyword{utilities}
+
diff --git a/man/merge_RLum.Data.Curve.Rd b/man/merge_RLum.Data.Curve.Rd
new file mode 100644
index 0000000..ce1ee48
--- /dev/null
+++ b/man/merge_RLum.Data.Curve.Rd
@@ -0,0 +1,130 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/merge_RLum.Data.Curve.R
+\name{merge_RLum.Data.Curve}
+\alias{merge_RLum.Data.Curve}
+\title{Merge function for RLum.Data.Curve S4 class objects}
+\usage{
+merge_RLum.Data.Curve(object, merge.method = "mean", method.info)
+}
+\arguments{
+\item{object}{\code{\link{list}} of \code{\linkS4class{RLum.Data.Curve}}
+(\bold{required}): list of S4 objects of class \code{RLum.Curve}.}
+
+\item{merge.method}{\code{\link{character}} (\bold{required}): method for
+combining of the objects, e.g.  \code{'mean'}, \code{'sum'}, see details for
+further information and allowed methods.  Note: Elements in slot info will
+be taken from the first curve in the list.}
+
+\item{method.info}{\code{\link{numeric}} (optional): allows to specify how
+info elements of the input objects are combined, e.g. \code{1} means that
+just the elements from the first object are kept, \code{2} keeps only the
+info elements from the 2 object etc.  If nothing is provided all elements
+are combined.}
+}
+\value{
+Returns an \code{\linkS4class{RLum.Data.Curve}} object.
+}
+\description{
+Function allows merging of RLum.Data.Curve objects in different ways
+}
+\details{
+This function simply allowing to merge \code{\linkS4class{RLum.Data.Curve}}
+objects without touching the objects itself. Merging is always applied on
+the 2nd colum of the data matrix of the object.\cr
+
+\bold{Supported merge operations are
+\code{\linkS4class{RLum.Data.Curve}}}\cr
+
+\code{"sum"}\cr
+
+All count values will be summed up using the function \code{\link{rowSums}}.
+
+\code{"mean"}\cr
+
+The mean over the count values is calculated using the function
+\code{\link{rowMeans}}.
+
+\code{"median"}\cr
+
+The median over the count values is calculated using the function
+\code{\link[matrixStats]{rowMedians}}.
+
+\code{"sd"}\cr
+
+The standard deviation over the count values is calculated using the function
+\code{\link[matrixStats]{rowSds}}.
+
+\code{"var"}\cr
+
+The variance over the count values is calculated using the function
+\code{\link[matrixStats]{rowVars}}.
+
+\code{"min"}\cr
+
+The min values from the count values is chosen using the function
+\code{\link[matrixStats]{rowMins}}.
+
+\code{"max"}\cr
+
+The max values from the count values is chosen using the function
+\code{\link[matrixStats]{rowMins}}.
+
+\code{"-"}\cr
+
+The row sums of the last objects are subtracted from the first object.
+
+\code{"*"}\cr
+
+The row sums of the last objects are mutliplied with the first object.
+
+\code{"/"}\cr
+
+Values of the first object are divided by row sums of the last objects.
+}
+\note{
+The information from the slot 'recordType' is taken from the first
+\code{\linkS4class{RLum.Data.Curve}} object in the input list. The slot
+'curveType' is filled with the name \code{merged}.
+}
+\section{S3-generic support}{
+
+
+This function is fully operational via S3-generics:
+\code{`+`}, \code{`-`}, \code{`/`}, \code{`*`}, \code{merge}
+}
+
+\section{Function version}{
+ 0.2.0 (2016-09-09 10:32:17)
+}
+\examples{
+
+
+##load example data
+data(ExampleData.XSYG, envir = environment())
+
+##grep first and 3d TL curves
+TL.curves  <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType = "TL (UVVIS)")
+TL.curve.1 <- TL.curves[[1]]
+TL.curve.3 <- TL.curves[[3]]
+
+##plot single curves
+plot_RLum(TL.curve.1)
+plot_RLum(TL.curve.3)
+
+##subtract the 1st curve from the 2nd and plot
+TL.curve.merged <- merge_RLum.Data.Curve(list(TL.curve.3, TL.curve.1), merge.method = "/")
+plot_RLum(TL.curve.merged)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+-
+}
+\seealso{
+\code{\link{merge_RLum}}, \code{\linkS4class{RLum.Data.Curve}}
+}
+\keyword{utilities}
+
diff --git a/man/merge_RLum.Rd b/man/merge_RLum.Rd
new file mode 100644
index 0000000..a425ee1
--- /dev/null
+++ b/man/merge_RLum.Rd
@@ -0,0 +1,74 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/merge_RLum.R
+\name{merge_RLum}
+\alias{merge_RLum}
+\title{General merge function for RLum S4 class objects}
+\usage{
+merge_RLum(objects, ...)
+}
+\arguments{
+\item{objects}{\code{\link{list}} of \code{\linkS4class{RLum}}
+(\bold{required}): list of S4 object of class \code{RLum}}
+
+\item{\dots}{further arguments that one might want to pass to the specific
+merge function}
+}
+\value{
+Return is the same as input objects as provided in the list.
+}
+\description{
+Function calls object-specific merge functions for RLum S4 class objects.
+}
+\details{
+The function provides a generalised access point for merge specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+corresponding merge function will be selected.  Allowed arguments can be
+found in the documentations of each merge function. Empty list elements (\code{NULL}) are
+automatically removed from the input \code{list}.
+
+\tabular{lll}{
+\bold{object} \tab \tab \bold{corresponding merge function} \cr
+
+\code{\linkS4class{RLum.Data.Curve}} \tab : \tab \code{merge_RLum.Data.Curve} \cr
+\code{\linkS4class{RLum.Analysis}} \tab : \tab \code{merge_RLum.Analysis} \cr
+\code{\linkS4class{RLum.Results}} \tab : \tab \code{merge_RLum.Results}
+}
+}
+\note{
+So far not for every \code{RLum} object a merging function exists.
+}
+\section{Function version}{
+ 0.1.2 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##Example based using data and from the calc_CentralDose() function
+
+##load example data
+data(ExampleData.DeValues, envir = environment())
+
+##apply the central dose model 1st time
+temp1 <- calc_CentralDose(ExampleData.DeValues$CA1)
+
+##apply the central dose model 2nd time
+temp2 <- calc_CentralDose(ExampleData.DeValues$CA1)
+
+##merge the results and store them in a new object
+temp.merged <- get_RLum(merge_RLum(objects = list(temp1, temp2)))
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+#
+}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Data.Image}},
+\code{\linkS4class{RLum.Data.Spectrum}}, \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
+}
+\keyword{utilities}
+
diff --git a/man/merge_RLum.Results.Rd b/man/merge_RLum.Results.Rd
new file mode 100644
index 0000000..0f6625b
--- /dev/null
+++ b/man/merge_RLum.Results.Rd
@@ -0,0 +1,27 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/merge_RLum.Results.R
+\name{merge_RLum.Results}
+\alias{merge_RLum.Results}
+\title{Merge function for RLum.Results S4-class objects}
+\usage{
+merge_RLum.Results(objects)
+}
+\arguments{
+\item{objects}{\code{\link{list}} (required): a list of \code{\linkS4class{RLum.Results}} objects}
+}
+\description{
+Function merges objects of class \code{\linkS4class{RLum.Results}}. The slots in the objects
+are combined depending on the object type, e.g., for \code{\link{data.frame}} and \code{\link{matrix}}
+rows are appended.
+}
+\note{
+The originator is taken from the first element and not reset to \code{merge_RLum}
+}
+\section{Function version}{
+ 0.2.0 (2016-05-02 09:36:06)
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+
diff --git a/man/merge_Risoe.BINfileData.Rd b/man/merge_Risoe.BINfileData.Rd
new file mode 100644
index 0000000..51221e5
--- /dev/null
+++ b/man/merge_Risoe.BINfileData.Rd
@@ -0,0 +1,94 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/merge_Risoe.BINfileData.R
+\name{merge_Risoe.BINfileData}
+\alias{merge_Risoe.BINfileData}
+\title{Merge Risoe.BINfileData objects or Risoe BIN-files}
+\usage{
+merge_Risoe.BINfileData(input.objects, output.file,
+  keep.position.number = FALSE, position.number.append.gap = 0)
+}
+\arguments{
+\item{input.objects}{\code{\link{character}} or
+\code{\linkS4class{Risoe.BINfileData}} (\bold{required}): Character vector
+with path and files names (e.g. \code{input.objects = c("path/file1.bin",
+"path/file2.bin")} or \code{\linkS4class{Risoe.BINfileData}} objects (e.g.
+\code{input.objects = c(object1, object2)})}
+
+\item{output.file}{\code{\link{character}} (optional): File output path and
+name. \cr If no value is given, a \code{\linkS4class{Risoe.BINfileData}} is
+returned instead of a file.}
+
+\item{keep.position.number}{\code{\link{logical}} (with default): Allows
+keeping the original position numbers of the input objects. Otherwise the
+position numbers are recalculated.}
+
+\item{position.number.append.gap}{\code{\link{integer}} (with default): Set
+the position number gap between merged BIN-file sets, if the option
+\code{keep.position.number = FALSE} is used. See details for further
+information.}
+}
+\value{
+Returns a \code{file} or a \code{\linkS4class{Risoe.BINfileData}}
+object.
+}
+\description{
+Function allows merging Risoe BIN/BINX files or Risoe.BINfileData objects.
+}
+\details{
+The function allows merging different measurements to one file or one
+object.\cr The record IDs are recalculated for the new object. Other values
+are kept for each object. The number of input objects is not limited. \cr
+
+\code{position.number.append.gap} option \cr
+
+If the option \code{keep.position.number = FALSE} is used, the position
+numbers of the new data set are recalculated by adding the highest position
+number of the previous data set to the each position number of the next data
+set. For example: The highest position number is 48, then this number will
+be added to all other position numbers of the next data set (e.g. 1 + 48 =
+49)\cr
+
+However, there might be cases where an additional addend (summand) is needed
+before the next position starts. Example: \cr
+
+Position number set (A): \code{1,3,5,7}\cr Position number set (B):
+\code{1,3,5,7} \cr
+
+With no additional summand the new position numbers would be:
+\code{1,3,5,7,8,9,10,11}. That might be unwanted. Using the argument
+\code{position.number.append.gap = 1} it will become:
+\code{1,3,5,7,9,11,13,15,17}.
+}
+\note{
+The validity of the output objects is not further checked.
+}
+\section{Function version}{
+ 0.2.5 (2016-09-09 10:32:17)
+}
+\examples{
+
+
+##merge two objects
+data(ExampleData.BINfileData, envir = environment())
+
+object1 <- CWOSL.SAR.Data
+object2 <- CWOSL.SAR.Data
+
+object.new <- merge_Risoe.BINfileData(c(object1, object2))
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Duller, G., 2007. Analyst.
+}
+\seealso{
+\code{\linkS4class{Risoe.BINfileData}}, \code{\link{read_BIN2R}},
+\code{\link{write_R2BIN}}
+}
+\keyword{IO}
+\keyword{manip}
+
diff --git a/man/methods_RLum.Rd b/man/methods_RLum.Rd
new file mode 100644
index 0000000..57c4b1c
--- /dev/null
+++ b/man/methods_RLum.Rd
@@ -0,0 +1,258 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/methods_RLum.R
+\name{methods_RLum}
+\alias{$.RLum.Analysis}
+\alias{$.RLum.Data.Curve}
+\alias{$.RLum.Results}
+\alias{*.RLum.Data.Curve}
+\alias{+.RLum.Data.Curve}
+\alias{-.RLum.Data.Curve}
+\alias{/.RLum.Data.Curve}
+\alias{[.RLum.Analysis}
+\alias{[.RLum.Data.Curve}
+\alias{[.RLum.Data.Image}
+\alias{[.RLum.Data.Spectrum}
+\alias{[.RLum.Results}
+\alias{[[.RLum.Analysis}
+\alias{[[.RLum.Results}
+\alias{as.data.frame.RLum.Data.Curve}
+\alias{as.data.frame.RLum.Data.Spectrum}
+\alias{as.list.RLum.Analysis}
+\alias{as.list.RLum.Data.Curve}
+\alias{as.list.RLum.Results}
+\alias{as.matrix.RLum.Data.Curve}
+\alias{as.matrix.RLum.Data.Spectrum}
+\alias{bin.RLum.Data.Curve}
+\alias{dim.RLum.Data.Curve}
+\alias{dim.RLum.Data.Spectrum}
+\alias{hist.RLum.Analysis}
+\alias{hist.RLum.Data.Curve}
+\alias{hist.RLum.Data.Image}
+\alias{hist.RLum.Results}
+\alias{is.RLum}
+\alias{is.RLum.Analysis}
+\alias{is.RLum.Data}
+\alias{is.RLum.Data.Curve}
+\alias{is.RLum.Data.Image}
+\alias{is.RLum.Data.Spectrum}
+\alias{is.RLum.Results}
+\alias{length.RLum.Analysis}
+\alias{length.RLum.Data.Curve}
+\alias{length.RLum.Results}
+\alias{length.Risoe.BINfileData}
+\alias{merge.RLum}
+\alias{methods_RLum}
+\alias{names.RLum.Analysis}
+\alias{names.RLum.Data.Curve}
+\alias{names.RLum.Data.Image}
+\alias{names.RLum.Data.Spectrum}
+\alias{names.RLum.Results}
+\alias{names.Risoe.BINfileData}
+\alias{plot.RLum.Analysis}
+\alias{plot.RLum.Data.Curve}
+\alias{plot.RLum.Data.Image}
+\alias{plot.RLum.Data.Spectrum}
+\alias{plot.RLum.Results}
+\alias{plot.Risoe.BINfileData}
+\alias{plot.list}
+\alias{rep.RLum}
+\alias{row.names.RLum.Data.Spectrum}
+\alias{subset.Risoe.BINfileData}
+\alias{summary.RLum.Analysis}
+\alias{summary.RLum.Data.Curve}
+\alias{summary.RLum.Data.Image}
+\alias{summary.RLum.Results}
+\alias{unlist.RLum.Analysis}
+\title{methods_RLum}
+\usage{
+\method{plot}{list}(x, y, ...)
+
+\method{plot}{RLum.Results}(x, y, ...)
+
+\method{plot}{RLum.Analysis}(x, y, ...)
+
+\method{plot}{RLum.Data.Curve}(x, y, ...)
+
+\method{plot}{RLum.Data.Spectrum}(x, y, ...)
+
+\method{plot}{RLum.Data.Image}(x, y, ...)
+
+\method{plot}{Risoe.BINfileData}(x, y, ...)
+
+\method{hist}{RLum.Results}(x, ...)
+
+\method{hist}{RLum.Data.Image}(x, ...)
+
+\method{hist}{RLum.Data.Curve}(x, ...)
+
+\method{hist}{RLum.Analysis}(x, ...)
+
+\method{summary}{RLum.Results}(object, ...)
+
+\method{summary}{RLum.Analysis}(object, ...)
+
+\method{summary}{RLum.Data.Image}(object, ...)
+
+\method{summary}{RLum.Data.Curve}(object, ...)
+
+\method{subset}{Risoe.BINfileData}(x, subset, records.rm = TRUE, ...)
+
+bin.RLum.Data.Curve(x, ...)
+
+\method{length}{RLum.Results}(x, ...)
+
+\method{length}{RLum.Analysis}(x, ...)
+
+\method{length}{RLum.Data.Curve}(x, ...)
+
+\method{length}{Risoe.BINfileData}(x, ...)
+
+\method{dim}{RLum.Data.Curve}(x)
+
+\method{dim}{RLum.Data.Spectrum}(x)
+
+\method{rep}{RLum}(x, ...)
+
+\method{names}{RLum.Data.Curve}(x, ...)
+
+\method{names}{RLum.Data.Spectrum}(x, ...)
+
+\method{names}{RLum.Data.Image}(x, ...)
+
+\method{names}{RLum.Analysis}(x, ...)
+
+\method{names}{RLum.Results}(x, ...)
+
+\method{names}{Risoe.BINfileData}(x)
+
+\method{row.names}{RLum.Data.Spectrum}(x, ...)
+
+\method{as.data.frame}{RLum.Data.Curve}(x, row.names = NULL,
+  optional = FALSE, ...)
+
+\method{as.data.frame}{RLum.Data.Spectrum}(x, row.names = NULL,
+  optional = FALSE, ...)
+
+\method{as.list}{RLum.Results}(x, ...)
+
+\method{as.list}{RLum.Data.Curve}(x, ...)
+
+\method{as.list}{RLum.Analysis}(x, ...)
+
+\method{as.matrix}{RLum.Data.Curve}(x, ...)
+
+\method{as.matrix}{RLum.Data.Spectrum}(x, ...)
+
+is.RLum(x, ...)
+
+is.RLum.Data(x, ...)
+
+is.RLum.Data.Curve(x, ...)
+
+is.RLum.Data.Spectrum(x, ...)
+
+is.RLum.Data.Image(x, ...)
+
+is.RLum.Analysis(x, ...)
+
+is.RLum.Results(x, ...)
+
+\method{merge}{RLum}(x, y, ...)
+
+\method{unlist}{RLum.Analysis}(x, recursive = TRUE, ...)
+
+\method{+}{RLum.Data.Curve}(x, y)
+
+\method{-}{RLum.Data.Curve}(x, y)
+
+\method{*}{RLum.Data.Curve}(x, y)
+
+\method{/}{RLum.Data.Curve}(x, y)
+
+\method{[}{RLum.Data.Curve}(x, y, z, drop = TRUE)
+
+\method{[}{RLum.Data.Spectrum}(x, y, z, drop = TRUE)
+
+\method{[}{RLum.Data.Image}(x, y, z, drop = TRUE)
+
+\method{[}{RLum.Analysis}(x, i, drop = FALSE)
+
+\method{[}{RLum.Results}(x, i, drop = TRUE)
+
+\method{[[}{RLum.Analysis}(x, i)
+
+\method{[[}{RLum.Results}(x, i)
+
+\method{$}{RLum.Data.Curve}(x, i)
+
+\method{$}{RLum.Analysis}(x, i)
+
+\method{$}{RLum.Results}(x, i)
+}
+\arguments{
+\item{x}{\code{\linkS4class{RLum}} or \code{\linkS4class{Risoe.BINfileData}} (\bold{required}): input opject}
+
+\item{y}{\code{\link{integer}} (optional): the row index of the matrix, data.frame}
+
+\item{...}{further arguments that can be passed to the method}
+
+\item{object}{\code{\linkS4class{RLum}} (\bold{required}): input opject}
+
+\item{subset}{\code{[subset]} \code{\link{expression}} (\bold{required}): logical expression indicating elements or rows to keep,
+this function works in \code{\linkS4class{Risoe.BINfileData}} objects like \code{\link{subset.data.frame}}, but takes care
+of the object structure}
+
+\item{records.rm}{[subset] \code{\link{logical}} (with default): remove records from data set, can
+be disabled, to just set the column \code{SET} to \code{TRUE} or \code{FALSE}}
+
+\item{row.names}{\code{\link{logical}} (with default): enables or disables row names (\code{as.data.frame})}
+
+\item{optional}{\code{\link{logical}} (with default): logical. If TRUE, setting row names and
+converting column names (to syntactic names: see make.names) is optional (see \code{\link[base]{as.data.frame}})}
+
+\item{recursive}{\code{\link{logical}} (with default): enables or disables further subsetting (\code{unlist})}
+
+\item{z}{\code{\link{integer}} (optional): the column index of the matrix, data.frame}
+
+\item{drop}{\code{\link{logical}} (with default): keep object structure or drop it}
+
+\item{i}{\code{\link{character}} (optional): name of the wanted record type or data object}
+}
+\description{
+Methods for S3-generics implemented for the package 'Luminescence'.
+This document summarises all implemented S3-generics. The name of the function
+is given before the first dot, after the dot the name of the object that is supported by this method
+is given, e.g. \code{plot.RLum.Data.Curve} can be called by \code{plot(object, ...)}, where
+\code{object} is the \code{RLum.Data.Curve} object.
+}
+\details{
+The term S3-generics sounds complicated, however, it just means that something has been implemented
+in the package to increase the usability for users new in R and who are not familiar with the
+underlying \code{RLum}-object structure of the package. The practical outcome is that
+operations and functions presented in standard books on R can be used without knowing the specifica
+of the R package 'Luminescence'. For examples see the example section.
+}
+\note{
+\code{methods_RLum} are not really new functions, everything given here are mostly just
+surrogates for existing functions in the package.
+}
+\examples{
+
+##load example data
+data(ExampleData.RLum.Analysis, envir = environment())
+
+
+##combine curve is various ways
+curve1 <- IRSAR.RF.Data[[1]]
+curve2 <-  IRSAR.RF.Data[[1]]
+curve1 + curve2
+curve1 - curve2
+curve1 / curve2
+curve1 * curve2
+
+
+##`$` access curves
+IRSAR.RF.Data$RF
+
+}
+
diff --git a/man/model_LuminescenceSignals.Rd b/man/model_LuminescenceSignals.Rd
new file mode 100644
index 0000000..9facba8
--- /dev/null
+++ b/man/model_LuminescenceSignals.Rd
@@ -0,0 +1,46 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model_LuminescenceSignals.R
+\name{model_LuminescenceSignals}
+\alias{model_LuminescenceSignals}
+\title{Model Luminescence Signals (wrapper)}
+\usage{
+model_LuminescenceSignals(model, sequence, lab.dose_rate = 1,
+  simulate_sample_history = FALSE, plot = TRUE, verbose = TRUE,
+  show.structure = FALSE, ...)
+}
+\arguments{
+\item{model}{\code{\link{character}} (\bold{required}): set model to be used. Available models are:
+"Bailey2001", "Bailey2002", "Bailey2004", "Pagonis2007", "Pagonis2008"}
+
+\item{sequence}{\code{\link{list}} (\bold{required}): set sequence to model as \code{\link{list}} or as *.seq file from the
+Riso sequence editor. To simulate SAR measurements there is an extra option to set the sequence list (cf. details).}
+
+\item{lab.dose_rate}{\code{\link{numeric}} (with default): laboratory dose rate in XXX
+Gy/s for calculating seconds into Gray in the *.seq file.}
+
+\item{simulate_sample_history}{\code{\link{logical}} (with default): FALSE (with default): simulation begins at laboratory conditions, TRUE: simulations begins at crystallization (all levels 0)
+process}
+
+\item{plot}{\code{\link{logical}} (with default): Enables or disables plot output}
+
+\item{verbose}{\code{\link{logical}} (with default): Verbose mode on/off}
+
+\item{show.structure}{\code{\link{logical}} (with default): Shows the structure of the result.
+Recommended to show record.id to analyse concentrations.}
+
+\item{...}{further arguments and graphical parameters passed to
+\code{\link{plot.default}}. See details for further information.}
+}
+\description{
+Wrapper for the function \code{\link[RLumModel]{model_LuminescenceSignals}} from the package
+\link[RLumModel]{RLumModel-package}. For the further details and examples please
+see the manual of this package.
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:36:06)
+}
+\author{
+Johannes Friedrich, University of Bayreuth (Germany),\cr
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaige (France), \cr
+\cr R Luminescence Package Team}
+
diff --git a/man/names_RLum.Rd b/man/names_RLum.Rd
new file mode 100644
index 0000000..cc63562
--- /dev/null
+++ b/man/names_RLum.Rd
@@ -0,0 +1,40 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/names_RLum.R
+\name{names_RLum}
+\alias{names_RLum}
+\title{S4-names function for RLum S4 class objects}
+\usage{
+names_RLum(object)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum}} (\bold{required}): S4 object of
+class \code{RLum}}
+}
+\value{
+Returns a \code{\link{character}}
+}
+\description{
+Function calls object-specific names functions for RLum S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+corresponding 'names' function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+}
+\section{Function version}{
+ 0.1.0 (2015-11-29 17:27:48)
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Image}},
+\code{\linkS4class{RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}
+}
+\keyword{utilities}
+
diff --git a/man/plot_AbanicoPlot.Rd b/man/plot_AbanicoPlot.Rd
new file mode 100644
index 0000000..b9a17f0
--- /dev/null
+++ b/man/plot_AbanicoPlot.Rd
@@ -0,0 +1,425 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_AbanicoPlot.R
+\name{plot_AbanicoPlot}
+\alias{plot_AbanicoPlot}
+\title{Function to create an Abanico Plot.}
+\usage{
+plot_AbanicoPlot(data, na.rm = TRUE, log.z = TRUE, z.0 = "mean.weighted",
+  dispersion = "qr", plot.ratio = 0.75, rotate = FALSE, mtext, summary,
+  summary.pos, summary.method = "MCM", legend, legend.pos, stats,
+  rug = FALSE, kde = TRUE, hist = FALSE, dots = FALSE,
+  boxplot = FALSE, y.axis = TRUE, error.bars = FALSE, bar, bar.col,
+  polygon.col, line, line.col, line.lty, line.label, grid.col, frame = 1,
+  bw = "SJ", output = FALSE, interactive = FALSE, ...)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+object (required): for \code{data.frame} two columns: De (\code{data[,1]})
+and De error (\code{data[,2]}). To plot several data sets in one plot the
+data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.}
+
+\item{na.rm}{\code{\link{logical}} (with default): exclude NA values
+from the data set prior to any further operations.}
+
+\item{log.z}{\code{\link{logical}} (with default): Option to display the
+z-axis in logarithmic scale. Default is \code{TRUE}.}
+
+\item{z.0}{\code{\link{character}} or \code{\link{numeric}}: User-defined
+central value, used for centering of data. One out of \code{"mean"},
+\code{"mean.weighted"} and \code{"median"} or a numeric value (not its
+logarithm). Default is \code{"mean.weighted"}.}
+
+\item{dispersion}{\code{\link{character}} (with default): measure of
+dispersion, used for drawing the scatter polygon. One out of \code{"qr"}
+(quartile range), \code{"pnn"} (symmetric percentile range with nn the lower
+percentile, e.g. \code{"p05"} depicting the range between 5 and 95 %),
+\code{"sd"} (standard deviation) and \code{"2sd"} (2 standard deviations),
+default is \code{"qr"}. Note that \code{"sd"} and \code{"2sd"} are only
+meaningful in combination with \code{"z.0 = 'mean'"} because the unweighted
+mean is used to center the polygon.}
+
+\item{plot.ratio}{\code{\link{numeric}}: Relative space, given to the radial
+versus the cartesian plot part, deault is \code{0.75}.}
+
+\item{rotate}{\code{\link{logical}}: Option to turn the plot by 90 degrees.}
+
+\item{mtext}{\code{\link{character}}: additional text below the plot title.}
+
+\item{summary}{\code{\link{character}} (optional): add statistic measures of
+centrality and dispersion to the plot. Can be one or more of several
+keywords. See details for available keywords. Results differ depending on
+the log-option for the z-scale (see details).}
+
+\item{summary.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+specified to place the summary below the plot header. However, this latter
+option in only possible if \code{mtext} is not used.}
+
+\item{summary.method}{\code{\link{character}} (with default): keyword
+indicating the method used to calculate the statistic summary. One out of
+\code{"unweighted"}, \code{"weighted"} and \code{"MCM"}. See
+\code{\link{calc_Statistics}} for details.}
+
+\item{legend}{\code{\link{character}} vector (optional): legend content to
+be added to the plot.}
+
+\item{legend.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the legend to be plotted.}
+
+\item{stats}{\code{\link{character}}: additional labels of statistically
+important values in the plot. One or more out of the following:
+\code{"min"}, \code{"max"}, \code{"median"}.}
+
+\item{rug}{\code{\link{logical}}: Option to add a rug to the KDE part, to
+indicate the location of individual values.}
+
+\item{kde}{\code{\link{logical}}: Option to add a KDE plot to the dispersion
+part, default is \code{TRUE}.}
+
+\item{hist}{\code{\link{logical}}: Option to add a histogram to the
+dispersion part. Only meaningful when not more than one data set is plotted.}
+
+\item{dots}{\code{\link{logical}}: Option to add a dot plot to the
+dispersion part. If number of dots exceeds space in the dispersion part, a
+square indicates this.}
+
+\item{boxplot}{\code{\link{logical}}: Option to add a boxplot to the
+dispersion part, default is \code{FALSE}.}
+
+\item{y.axis}{\code{\link{logical}}: Option to hide y-axis labels. Useful
+for data with small scatter.}
+
+\item{error.bars}{\code{\link{logical}}: Option to show De-errors as error
+bars on De-points. Useful in combination with \code{y.axis = FALSE, bar.col
+= "none"}.}
+
+\item{bar}{\code{\link{numeric}} (with default): option to add one or more
+dispersion bars (i.e., bar showing the 2-sigma range) centered at the
+defined values. By default a bar is drawn according to \code{"z.0"}. To omit
+the bar set \code{"bar = FALSE"}.}
+
+\item{bar.col}{\code{\link{character}} or \code{\link{numeric}} (with
+default): colour of the dispersion bar. Default is \code{"grey60"}.}
+
+\item{polygon.col}{\code{\link{character}} or \code{\link{numeric}} (with
+default): colour of the polygon showing the data scatter. Sometimes this
+polygon may be omitted for clarity. To disable it use \code{FALSE} or
+\code{polygon = FALSE}. Default is \code{"grey80"}.}
+
+\item{line}{\code{\link{numeric}}: numeric values of the additional lines to
+be added.}
+
+\item{line.col}{\code{\link{character}} or \code{\link{numeric}}: colour of
+the additional lines.}
+
+\item{line.lty}{\code{\link{integer}}: line type of additional lines}
+
+\item{line.label}{\code{\link{character}}: labels for the additional lines.}
+
+\item{grid.col}{\code{\link{character}} or \code{\link{numeric}} (with
+default): colour of the grid lines (originating at [0,0] and strechting to
+the z-scale). To disable grid lines use \code{FALSE}. Default is
+\code{"grey"}.}
+
+\item{frame}{\code{\link{numeric}} (with default): option to modify the
+plot frame type. Can be one out of \code{0} (no frame), \code{1} (frame
+originates at 0,0 and runs along min/max isochrons), \code{2} (frame
+embraces the 2-sigma bar), \code{3} (frame embraces the entire plot as a
+rectangle).Default is \code{1}.}
+
+\item{bw}{\code{\link{character}} (with default): bin-width for KDE, choose
+a numeric value for manual setting.}
+
+\item{output}{\code{\link{logical}}: Optional output of numerical plot
+parameters. These can be useful to reproduce similar plots. Default is
+\code{FALSE}.}
+
+\item{interactive}{\code{\link{logical}} (with default): create an interactive
+abanico plot (requires the 'plotly' package)}
+
+\item{\dots}{Further plot arguments to pass. \code{xlab} must be a vector of
+length 2, specifying the upper and lower x-axes labels.}
+}
+\value{
+returns a plot object and, optionally, a list with plot calculus
+data.
+}
+\description{
+A plot is produced which allows comprehensive presentation of data precision
+and its dispersion around a central value as well as illustration of a
+kernel density estimate, histogram and/or dot plot of the dose values.
+}
+\details{
+The Abanico Plot is a combination of the classic Radial Plot
+(\code{plot_RadialPlot}) and a kernel density estimate plot (e.g
+\code{plot_KDE}). It allows straightforward visualisation of data precision,
+error scatter around a user-defined central value and the combined
+distribution of the values, on the actual scale of the measured data (e.g.
+seconds, equivalent dose, years). The principle of the plot is shown in
+Galbraith & Green (1990). The function authors are thankful for the
+thoughtprovocing figure in this article. \cr The semi circle (z-axis) of the
+classic Radial Plot is bent to a straight line here, which actually is the
+basis for combining this polar (radial) part of the plot with any other
+cartesian visualisation method (KDE, histogram, PDF and so on). Note that
+the plot allows dispaying two measures of distribution. One is the 2-sigma
+bar, which illustrates the spread in value errors, and the other is the
+polygon, which stretches over both parts of the Abanico Plot (polar and
+cartesian) and illustrates the actual spread in the values themselfes. \cr
+Since the 2-sigma-bar is a polygon, it can be (and is) filled with shaded
+lines. To change density (lines per inch, default is 15) and angle (default
+is 45 degrees) of the shading lines, specify these parameters. See
+\code{?polygon()} for further help. \cr The Abanico Plot supports other than
+the weighted mean as measure of centrality. When it is obvious that the data
+is not (log-)normally distributed, the mean (weighted or not) cannot be a
+valid measure of centrality and hence central dose. Accordingly, the median
+and the weighted median can be chosen as well to represent a proper measure
+of centrality (e.g. \code{centrality = "median.weighted"}). Also
+user-defined numeric values (e.g. from the central age model) can be used if
+this appears appropriate. \cr The proportion of the polar part and the
+cartesian part of the Abanico Plot can be modfied for display reasons
+(\code{plot.ratio = 0.75}). By default, the polar part spreads over 75 \%
+and leaves 25 \% for the part that shows the KDE graph.\cr\cr
+A statistic summary, i.e. a collection of statistic measures of
+centrality and dispersion (and further measures) can be added by specifying
+one or more of the following keywords:
+
+\itemize{
+\item \code{"n"} (number of samples)
+\item \code{"mean"} (mean De value)
+\item \code{"median"} (median of the De values)
+\item \code{"sd.rel"} (relative standard deviation in percent)
+\item \code{"sd.abs"} (absolute standard deviation)
+\item \code{"se.rel"} (relative standard error)
+\item \code{"se.abs"} (absolute standard error)
+\item \code{"in.2s"} (percent of samples in 2-sigma range)
+\item \code{"kurtosis"} (kurtosis)
+\item \code{"skewness"} (skewness)
+}
+
+Note that the input data for the statistic summary is sent to the function
+\code{calc_Statistics()} depending on the log-option for the z-scale. If
+\code{"log.z = TRUE"}, the summary is based on the logarithms of the input
+data. If \code{"log.z = FALSE"} the linearly scaled data is used. \cr
+Note as well, that \code{"calc_Statistics()"} calculates these statistic
+measures in three different ways: \code{unweighted}, \code{weighted} and
+\code{MCM-based} (i.e., based on Monte Carlo Methods). By default, the
+MCM-based version is used. If you wish to use another method, indicate this
+with the appropriate keyword using the argument \code{summary.method}.\cr\cr
+
+The optional parameter \code{layout} allows to modify the entire plot more
+sophisticated. Each element of the plot can be addressed and its properties
+can be defined. This includes font type, size and decoration, colours and
+sizes of all plot items. To infer the definition of a specific layout style
+cf. \code{get_Layout()} or type eg. for the layout type \code{"journal"}
+\code{get_Layout("journal")}. A layout type can be modified by the user by
+assigning new values to the list object.\cr\cr It is possible for the
+z-scale to specify where ticks are to be drawn by using the parameter
+\code{at}, e.g. \code{at = seq(80, 200, 20)}, cf. function documentation of
+\code{axis}. Specifying tick positions manually overrides a
+\code{zlim}-definition.
+}
+\section{Function version}{
+ 0.1.10 (2016-09-09 10:32:17)
+}
+\examples{
+
+## load example data and recalculate to Gray
+data(ExampleData.DeValues, envir = environment())
+ExampleData.DeValues <- ExampleData.DeValues$CA1
+
+## plot the example data straightforward
+plot_AbanicoPlot(data = ExampleData.DeValues)
+
+## now with linear z-scale
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 log.z = FALSE)
+
+## now with output of the plot parameters
+plot1 <- plot_AbanicoPlot(data = ExampleData.DeValues,
+                          output = TRUE)
+str(plot1)
+plot1$zlim
+
+## now with adjusted z-scale limits
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 zlim = c(10, 200))
+
+## now with adjusted x-scale limits
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 xlim = c(0, 20))
+
+## now with rug to indicate individual values in KDE part
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 rug = TRUE)
+
+## now with a smaller bandwidth for the KDE plot
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 bw = 0.04)
+
+## now with a histogram instead of the KDE plot
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 hist = TRUE,
+                 kde = FALSE)
+
+## now with a KDE plot and histogram with manual number of bins
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 hist = TRUE,
+                 breaks = 20)
+
+## now with a KDE plot and a dot plot
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 dots = TRUE)
+
+## now with user-defined plot ratio
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 plot.ratio = 0.5)
+## now with user-defined central value
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 z.0 = 70)
+
+## now with median as central value
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 z.0 = "median")
+
+## now with the 17-83 percentile range as definition of scatter
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 z.0 = "median",
+                 dispersion = "p17")
+
+## now with user-defined green line for minimum age model
+CAM <- calc_CentralDose(ExampleData.DeValues,
+                        plot = FALSE)
+
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 line = CAM,
+                 line.col = "darkgreen",
+                 line.label = "CAM")
+
+## now create plot with legend, colour, different points and smaller scale
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 legend = "Sample 1",
+                 col = "tomato4",
+                 bar.col = "peachpuff",
+                 pch = "R",
+                 cex = 0.8)
+
+## now without 2-sigma bar, polygon, grid lines and central value line
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 bar.col = FALSE,
+                 polygon.col = FALSE,
+                 grid.col = FALSE,
+                 y.axis = FALSE,
+                 lwd = 0)
+
+## now with direct display of De errors, without 2-sigma bar
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 bar.col = FALSE,
+                 ylab = "",
+                 y.axis = FALSE,
+                 error.bars = TRUE)
+
+## now with user-defined axes labels
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 xlab = c("Data error (\%)",
+                          "Data precision"),
+                 ylab = "Scatter",
+                 zlab = "Equivalent dose [Gy]")
+
+## now with minimum, maximum and median value indicated
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 stats = c("min", "max", "median"))
+
+## now with a brief statistical summary as subheader
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 summary = c("n", "in.2s"))
+
+## now with another statistical summary
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 summary = c("mean.weighted", "median"),
+                 summary.pos = "topleft")
+
+## now a plot with two 2-sigma bars for one data set
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 bar = c(30, 100))
+
+## now the data set is split into sub-groups, one is manipulated
+data.1 <- ExampleData.DeValues[1:30,]
+data.2 <- ExampleData.DeValues[31:62,] * 1.3
+
+## now a common dataset is created from the two subgroups
+data.3 <- list(data.1, data.2)
+
+## now the two data sets are plotted in one plot
+plot_AbanicoPlot(data = data.3)
+
+## now with some graphical modification
+plot_AbanicoPlot(data = data.3,
+                 z.0 = "median",
+                 col = c("steelblue4", "orange4"),
+                 bar.col = c("steelblue3", "orange3"),
+                 polygon.col = c("steelblue1", "orange1"),
+                 pch = c(2, 6),
+                 angle = c(30, 50),
+                 summary = c("n", "in.2s", "median"))
+
+## create Abanico plot with predefined layout definition
+plot_AbanicoPlot(data = ExampleData.DeValues,
+                 layout = "journal")
+
+## now with predefined layout definition and further modifications
+plot_AbanicoPlot(data = data.3,
+                 z.0 = "median",
+                 layout = "journal",
+                 col = c("steelblue4", "orange4"),
+                 bar.col = adjustcolor(c("steelblue3", "orange3"),
+                                       alpha.f = 0.5),
+                 polygon.col = c("steelblue3", "orange3"))
+
+## for further information on layout definitions see documentation
+## of function get_Layout()
+
+## now with manually added plot content
+## create empty plot with numeric output
+AP <- plot_AbanicoPlot(data = ExampleData.DeValues,
+                       pch = NA,
+                       output = TRUE)
+
+## identify data in 2 sigma range
+in_2sigma <- AP$data[[1]]$data.in.2s
+
+## restore function-internal plot parameters
+par(AP$par)
+
+## add points inside 2-sigma range
+points(x = AP$data[[1]]$precision[in_2sigma],
+       y = AP$data[[1]]$std.estimate.plot[in_2sigma],
+       pch = 16)
+
+## add points outside 2-sigma range
+points(x = AP$data[[1]]$precision[!in_2sigma],
+       y = AP$data[[1]]$std.estimate.plot[!in_2sigma],
+       pch = 1)
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Inspired by a plot
+introduced by Galbraith & Green (1990)
+\cr R Luminescence Package Team}
+\references{
+Galbraith, R. & Green, P., 1990. Estimating the component ages
+in a finite mixture. International Journal of Radiation Applications and
+Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3),
+197-206.
+
+Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2015.
+The abanico plot: visualising chronometric data with individual standard errors.
+Quaternary Geochronology. doi:10.1016/j.quageo.2015.09.003
+}
+\seealso{
+\code{\link{plot_RadialPlot}}, \code{\link{plot_KDE}},
+\code{\link{plot_Histogram}}
+}
+
diff --git a/man/plot_DRTResults.Rd b/man/plot_DRTResults.Rd
new file mode 100644
index 0000000..41ade5b
--- /dev/null
+++ b/man/plot_DRTResults.Rd
@@ -0,0 +1,173 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_DRTResults.R
+\name{plot_DRTResults}
+\alias{plot_DRTResults}
+\title{Visualise dose recovery test results}
+\usage{
+plot_DRTResults(values, given.dose = NULL, error.range = 10, preheat,
+  boxplot = FALSE, mtext, summary, summary.pos, legend, legend.pos,
+  par.local = TRUE, na.rm = FALSE, ...)
+}
+\arguments{
+\item{values}{\code{\linkS4class{RLum.Results}} or \code{\link{data.frame}},
+(\bold{required}): input values containing at least De and De error. To plot
+more than one data set in one figure, a \code{list} of the individual data
+sets must be provided (e.g. \code{list(dataset.1, dataset.2)}).}
+
+\item{given.dose}{\code{\link{numeric}} (optional): given dose used for the
+dose recovery test to normalise data. If only one given dose is provided
+this given dose is valid for all input data sets (i.e., \code{values} is a
+list).  Otherwise a given dose for each input data set has to be provided
+(e.g., \code{given.dose = c(100,200)}). If no \code{given.dose} values are
+plotted without normalisation (might be useful for preheat plateau tests).
+Note: Unit has to be the same as from the input values (e.g., Seconds or
+Gray).}
+
+\item{error.range}{\code{\link{numeric}}: symmetric error range in percent
+will be shown as dashed lines in the plot. Set \code{error.range} to 0 to
+void plotting of error ranges.}
+
+\item{preheat}{\code{\link{numeric}}: optional vector of preheat
+temperatures to be used for grouping the De values. If specified, the
+temperatures are assigned to the x-axis.}
+
+\item{boxplot}{\code{\link{logical}}: optionally plot values, that are
+grouped by preheat temperature as boxplots. Only possible when
+\code{preheat} vector is specified.}
+
+\item{mtext}{\code{\link{character}}: additional text below the plot title.}
+
+\item{summary}{\code{\link{character}} (optional): adds numerical output to
+the plot.  Can be one or more out of: \code{"n"} (number of samples),
+\code{"mean"} (mean De value), \code{"mean.weighted"} (error-weighted mean),
+\code{"median"} (median of the De values), \code{"sdrel"} (relative standard
+deviation in percent), \code{"sdabs"} (absolute standard deviation),
+\code{"serel"} (relative standard error) and \code{"seabs"} (absolute
+standard error).}
+
+\item{summary.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+specified to place the summary below the plot header. However, this latter
+option in only possible if \code{mtext} is not used.}
+
+\item{legend}{\code{\link{character}} vector (optional): legend content to
+be added to the plot.}
+
+\item{legend.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the legend to be plotted.}
+
+\item{par.local}{\code{\link{logical}} (with default): use local graphical
+parameters for plotting, e.g. the plot is shown in one column and one row.
+If \code{par.local = FALSE}, global parameters are inherited, i.e. parameters
+provided via \code{par()} work}
+
+\item{na.rm}{\code{\link{logical}}: indicating wether \code{NA} values are
+removed before plotting from the input data set}
+
+\item{\dots}{further arguments and graphical parameters passed to
+\code{\link{plot}}.}
+}
+\value{
+A plot is returned.
+}
+\description{
+The function provides a standardised plot output for dose recovery test
+measurements.
+}
+\details{
+Procedure to test the accuracy of a measurement protocol to reliably
+determine the dose of a specific sample. Here, the natural signal is erased
+and a known laboratory dose administered which is treated as unknown. Then
+the De measurement is carried out and the degree of congruence between
+administered and recovered dose is a measure of the protocol's accuracy for
+this sample.\cr In the plot the normalised De is shown on the y-axis, i.e.
+obtained De/Given Dose.
+}
+\note{
+Further data and plot arguments can be added by using the appropiate R
+commands.
+}
+\section{Function version}{
+ 0.1.10 (2016-09-09 10:32:17)
+}
+\examples{
+
+
+## read example data set and misapply them for this plot type
+data(ExampleData.DeValues, envir = environment())
+
+## plot values
+plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+given.dose = 2800, mtext = "Example data")
+
+## plot values with legend
+plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+                given.dose = 2800,
+                legend = "Test data set")
+
+## create and plot two subsets with randomised values
+x.1 <- ExampleData.DeValues$BT998[7:11,]
+x.2 <- ExampleData.DeValues$BT998[7:11,] * c(runif(5, 0.9, 1.1), 1)
+
+plot_DRTResults(values = list(x.1, x.2),
+                given.dose = 2800)
+
+## some more user-defined plot parameters
+plot_DRTResults(values = list(x.1, x.2),
+                given.dose = 2800,
+                pch = c(2, 5),
+                col = c("orange", "blue"),
+                xlim = c(0, 8),
+                ylim = c(0.85, 1.15),
+                xlab = "Sample aliquot")
+
+## plot the data with user-defined statistical measures as legend
+plot_DRTResults(values = list(x.1, x.2),
+                given.dose = 2800,
+                summary = c("n", "mean.weighted", "sd"))
+
+## plot the data with user-defined statistical measures as sub-header
+plot_DRTResults(values = list(x.1, x.2),
+                given.dose = 2800,
+                summary = c("n", "mean.weighted", "sd"),
+                summary.pos = "sub")
+
+## plot the data grouped by preheat temperatures
+plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+                given.dose = 2800,
+                preheat = c(200, 200, 200, 240, 240))
+## read example data set and misapply them for this plot type
+data(ExampleData.DeValues, envir = environment())
+
+## plot values
+plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+                given.dose = 2800, mtext = "Example data")
+## plot two data sets grouped by preheat temperatures
+plot_DRTResults(values = list(x.1, x.2),
+                given.dose = 2800,
+                preheat = c(200, 200, 200, 240, 240))
+
+## plot the data grouped by preheat temperatures as boxplots
+plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+                given.dose = 2800,
+                preheat = c(200, 200, 200, 240, 240),
+                boxplot = TRUE)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
+\references{
+Wintle, A.G., Murray, A.S., 2006. A review of quartz optically
+stimulated luminescence characteristics and their relevance in
+single-aliquot regeneration dating protocols. Radiation Measurements, 41,
+369-391.
+}
+\seealso{
+\code{\link{plot}}
+}
+\keyword{dplot}
+
diff --git a/man/plot_DetPlot.Rd b/man/plot_DetPlot.Rd
new file mode 100644
index 0000000..91712eb
--- /dev/null
+++ b/man/plot_DetPlot.Rd
@@ -0,0 +1,135 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_DetPlot.R
+\name{plot_DetPlot}
+\alias{plot_DetPlot}
+\title{Create De(t) plot}
+\usage{
+plot_DetPlot(object, signal.integral.min, signal.integral.max,
+  background.integral.min, background.integral.max, method = "shift",
+  signal_integral.seq = NULL, analyse_function = "analyse_SAR.CWOSL",
+  analyse_function.control = list(), n.channels = NULL,
+  show_ShineDownCurve = TRUE, respect_RC.Status = FALSE, verbose = TRUE,
+  ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} (\bold{required}): input
+object containing data for analysis}
+
+\item{signal.integral.min}{\code{\link{integer}} (\bold{required}): lower
+bound of the signal integral.}
+
+\item{signal.integral.max}{\code{\link{integer}} (\bold{required}): upper
+bound of the signal integral.}
+
+\item{background.integral.min}{\code{\link{integer}} (\bold{required}):
+lower bound of the background integral.}
+
+\item{background.integral.max}{\code{\link{integer}} (\bold{required}):
+upper bound of the background integral.}
+
+\item{method}{\code{\link{character}} (with default): method applied for constructing the De(t) plot.
+\code{shift} (the default): the chosen signal integral is shifted the shine down curve,
+\code{expansion}: the chosen signal integral is expanded each time by its length}
+
+\item{signal_integral.seq}{\code{\link{numeric}} (optional): argument to provide an own
+signal integral sequence for constructing the De(t) plot}
+
+\item{analyse_function}{\code{\link{character}} (with default): name of the analyse function
+to be called. Supported functions are: \code{'analyse_SAR.CWOSL'}, \code{'analyse_pIRIRSequence'}}
+
+\item{analyse_function.control}{\code{\link{list}} (optional): arguments to be passed to the
+supported analyse functions (\code{'analyse_SAR.CWOSL'}, \code{'analyse_pIRIRSequence'})}
+
+\item{n.channels}{\code{\link{integer}} (optional): number of channels used for the De(t) plot.
+If nothing is provided all De-values are calculated and plotted until the start of the background
+integral.}
+
+\item{show_ShineDownCurve}{\code{\link{logical}} (with default): enables or disables shine down
+curve in the plot output}
+
+\item{respect_RC.Status}{\code{\link{logical} (with default)}: remove De-values with 'FAILED' RC.Status
+from the plot (cf. \code{\link{analyse_SAR.CWOSL}} and \code{\link{analyse_pIRIRSequence}})}
+
+\item{verbose}{\code{\link{logical} (with default)}: enables or disables terminal feedback}
+
+\item{\dots}{further arguments and graphical parameters passed to
+\code{\link{plot.default}}, \code{\link{analyse_SAR.CWOSL}} and \code{\link{analyse_pIRIRSequence}}.
+See details for further information.}
+}
+\value{
+A plot and an \code{\linkS4class{RLum.Results}} object with the produced De values
+
+\code{@data}:
+\tabular{lll}{
+\bold{Object} \tab \bold{Type} \tab \bold{Description}\cr
+De.values \tab \code{data.frame} \tab table with De values \cr
+signal_integral.seq \tab \code{numeric} \tab integral sequence used for the calculation
+}
+
+\code{@info}:
+
+\tabular{lll}{
+\bold{Object} \tab \bold{Type} \tab \bold{Description}\cr
+call \tab \code{call} \tab the original function call
+}
+}
+\description{
+Plots the equivalent dose (De) in dependency of the chosen signal integral (cf. Bailey et al., 2003).
+The function is simply passing several arguments to the function \code{\link{plot}} and the used
+analysis functions and runs it in a loop. Example: \code{legend.pos} for legend position,
+\code{legend} for legend text.\cr
+}
+\details{
+\bold{method}\cr
+
+The original method presented by Baiely et al., 2003 shifted the signal integrals and slightly
+extended them accounting for changes in the counting statistics. Example: \code{c(1:3, 3:5, 5:7)}.
+However, here also another method is provided allowing to expand the signal integral by
+consectutively expaning the integral by its chosen length. Example: \code{c(1:3, 1:5, 1:7)}
+
+Note that in both cases the integral limits are overlap. The finally applied limits are part
+of the function output.\cr
+}
+\note{
+The entire analysis is based on the used analysis functions, namely
+\code{\link{analyse_SAR.CWOSL}} and \code{\link{analyse_pIRIRSequence}}. However, the integrity
+checks of this function are not that thoughtful as in these functions itself. It means, that
+every sequence should be checked carefully before running long calculations using serveral
+hundreds of channels.
+}
+\section{Function version}{
+ 0.1.0 (2016-05-19 23:48:19)
+}
+\examples{
+
+\dontrun{
+##load data
+##ExampleData.BINfileData contains two BINfileData objects
+##CWOSL.SAR.Data and TL.SAR.Data
+data(ExampleData.BINfileData, envir = environment())
+
+##transform the values from the first position in a RLum.Analysis object
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+
+plot_DetPlot(object,
+             signal.integral.min = 1,
+             signal.integral.max = 3,
+             background.integral.min = 900,
+             background.integral.max = 1000,
+             n.channels = 5,
+)
+}
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+Bailey, R.M., Singarayer, J.S., Ward, S., Stokes, S., 2003. Identification of partial resetting
+using De as a function of illumination time. Radiation Measurements 37, 511-518.
+doi:10.1016/S1350-4487(03)00063-5
+}
+\seealso{
+\code{\link{plot}}, \code{\link{analyse_SAR.CWOSL}}, \code{\link{analyse_pIRIRSequence}}
+}
+
diff --git a/man/plot_FilterCombinations.Rd b/man/plot_FilterCombinations.Rd
new file mode 100644
index 0000000..761f4d8
--- /dev/null
+++ b/man/plot_FilterCombinations.Rd
@@ -0,0 +1,127 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_FilterCombinations.R
+\name{plot_FilterCombinations}
+\alias{plot_FilterCombinations}
+\title{Plot filter combinations along with the (optional) net transmission window}
+\usage{
+plot_FilterCombinations(filters, wavelength_range = 200:1000,
+  show_net_transmission = TRUE, plot = TRUE, ...)
+}
+\arguments{
+\item{filters}{\code{\link{list}} (\bold{required}): a named list of filter data for each filter to be shown.
+The filter data itself should be either provided as \code{\link{data.frame}} or \code{\link{matrix}}.
+(for more options s. Details)}
+
+\item{wavelength_range}{\code{\link{numeric}} (with default): wavelength range used for the interpolation}
+
+\item{show_net_transmission}{\code{\link{logical}} (with default): show net transmission window
+as polygon.}
+
+\item{plot}{\code{\link{logical}} (with default): enables or disables the plot output}
+
+\item{\dots}{further arguments that can be passed to control the plot output. Suppored are \code{main},
+\code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{type}, \code{lty}, \code{lwd}.
+For non common plotting parameters see the details section.}
+}
+\value{
+Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
+
+\bold{@data}
+\tabular{lll}{
+\bold{Object} \tab \bold{Type} \bold{Description} \cr
+ net_transmission_window \tab \code{matrix} \tab the resulting net transmission window \cr
+ filter_matrix \tab \code{matrix} \tab the filter matrix used for plotting
+
+}
+
+\bold{@info}
+\tabular{lll}{
+\bold{Object} \tab \bold{Type} \bold{Description} \cr
+call \tab \code{call} \tab the original function call
+
+}
+}
+\description{
+The function allows to plot transmission windows for different filters. Missing data for specific
+wavelenghts are automatically interpolated for the given filter data using the function \code{\link{approx}}.
+With that a standardised output is reached and a net transmission window can be shown.\cr
+}
+\details{
+\bold{How to provide input data?}\cr
+
+CASE 1\cr
+
+The function expects that all filter values are either of type \code{matrix} or \code{data.frame}
+with two columns. The first columens contains the wavelength, the second the relative transmission
+(but not in percentage, i.e. the maximum transmission can be only become 1).
+
+In this case only the transmission window is show as provided. Changes in filter thickness and
+relection factor are not considered. \cr
+
+CASE 2\cr
+
+The filter data itself are provided as list element containing a \code{matrix} or \code{data.frame}
+and additional information on the thickness of the filter, e.g., \code{list(filter1 = list(filter_matrix, d = 2))}.
+The given filter data are always considered as standard input and the filter thickness value
+is taken into account by
+
+\deqn{Transmission = Transmission^(d)}
+
+with d given in the same dimension as the original filter data.\cr
+
+CASE 3\cr
+
+Same as CASE 2 but additionally a reflection factor P is provided, e.g.,
+\code{list(filter1 = list(filter_matrix, d = 2, P = 0.9))}. The final transmission
+becomes:
+
+\deqn{Transmission = Transmission^(d) * P}\cr
+
+\bold{Advanced plotting parameters}\cr
+
+The following further non-common plotting parameters can be passed to the function:\cr
+
+\tabular{lll}{
+\bold{Argument} \tab \bold{Datatype} \tab \bold{Description}\cr
+\code{legend} \tab \code{logical} \tab enable/disable legend \cr
+\code{legend.pos} \tab \code{character} \tab change legend position (\code{\link[graphics]{legend}}) \cr
+\code{legend.text} \tab \code{character} \tab same as the argument \code{legend} in (\code{\link[graphics]{legend}}) \cr
+\code{net_transmission.col} \tab \code{col} \tab colour of net transmission window polygon \cr
+\code{grid} \tab \code{list} \tab full list of arguments that can be passd to the function \code{\link[graphics]{grid}}
+}
+
+For further modifications standard additional R plot functions are recommend, e.g., the legend
+can be fully customised by disabling the standard legend and use the function \code{\link[graphics]{legend}}
+instead.
+}
+\section{Function version}{
+ 0.1.0 (2016-08-26 10:45:14)
+}
+\examples{
+
+## (For legal reasons no real filter data are provided)
+
+## Create filter sets
+filter1 <- density(rnorm(100, mean = 450, sd = 20))
+filter1 <- matrix(c(filter1$x, filter1$y/max(filter1$y)), ncol = 2)
+filter2 <- matrix(c(200:799,rep(c(0,0.8,0),each = 200)), ncol = 2)
+
+## Example 1 (standard)
+plot_FilterCombinations(filters = list(filter1, filter2))
+
+## Example 2 (with d and P value and name for filter 2)
+results <- plot_FilterCombinations(
+filters = list(filter_1 = filter1, Rectangle = list(filter2, d = 2, P = 0.6)))
+results
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum.Results}}, \code{\link{approx}}
+}
+\keyword{aplot}
+\keyword{datagen}
+
diff --git a/man/plot_GrowthCurve.Rd b/man/plot_GrowthCurve.Rd
new file mode 100644
index 0000000..4db3306
--- /dev/null
+++ b/man/plot_GrowthCurve.Rd
@@ -0,0 +1,194 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_GrowthCurve.R
+\name{plot_GrowthCurve}
+\alias{plot_GrowthCurve}
+\title{Fit and plot a growth curve for luminescence data (Lx/Tx against dose)}
+\usage{
+plot_GrowthCurve(sample, na.rm = TRUE, fit.method = "EXP",
+  fit.force_through_origin = FALSE, fit.weights = TRUE,
+  fit.includingRepeatedRegPoints = TRUE, fit.NumberRegPoints = NULL,
+  fit.NumberRegPointsReal = NULL, fit.bounds = TRUE,
+  NumberIterations.MC = 100, output.plot = TRUE,
+  output.plotExtended = TRUE, output.plotExtended.single = FALSE,
+  cex.global = 1, txtProgressBar = TRUE, verbose = TRUE, ...)
+}
+\arguments{
+\item{sample}{\code{\link{data.frame}} (\bold{required}): data frame with
+three columns for x=Dose,y=LxTx,z=LxTx.Error, y1=TnTx. The column for the
+test dose response is optional, but requires 'TnTx' as column name if used. For exponential
+fits at least three dose points (including the natural) should be provided.}
+
+\item{na.rm}{\code{\link{logical}} (with default): excludes \code{NA} values
+from the data set prior to any further operations.}
+
+\item{fit.method}{\code{\link{character}} (with default): function used for
+fitting. Possible options are: \code{LIN}, \code{QDR}, \code{EXP}, \code{EXP OR LIN},
+\code{EXP+LIN} or \code{EXP+EXP}. See details.}
+
+\item{fit.force_through_origin}{\code{\link{logical}} (with default) allow to force
+the fitted function through the origin. For \code{method = "EXP+EXP"} the function will
+go to the origin in either case, so this option will have no effect.}
+
+\item{fit.weights}{\code{\link{logical}} (with default): option whether the
+fitting is done with or without weights. See details.}
+
+\item{fit.includingRepeatedRegPoints}{\code{\link{logical}} (with default):
+includes repeated points for fitting (\code{TRUE}/\code{FALSE}).}
+
+\item{fit.NumberRegPoints}{\code{\link{integer}} (optional): set number of
+regeneration points manually. By default the number of all (!) regeneration
+points is used automatically.}
+
+\item{fit.NumberRegPointsReal}{\code{\link{integer}} (optional): if the
+number of regeneration points is provided manually, the value of the real,
+regeneration points = all points (repeated points) including reg 0, has to
+be inserted.}
+
+\item{fit.bounds}{\code{\link{logical}} (with default): set lower fit bounds
+for all fitting parameters to 0. Limited for the use with the fit methods
+\code{EXP}, \code{EXP+LIN} and \code{EXP OR LIN}. Argument to be inserted
+for experimental application only!}
+
+\item{NumberIterations.MC}{\code{\link{integer}} (with default): number of
+Monte Carlo simulations for error estimation. See details.}
+
+\item{output.plot}{\code{\link{logical}} (with default): plot output
+(\code{TRUE/FALSE}).}
+
+\item{output.plotExtended}{\code{\link{logical}} (with default): If
+\code{TRUE}, 3 plots on one plot area are provided: (1) growth curve, (2)
+histogram from Monte Carlo error simulation and (3) a test dose response
+plot. If \code{FALSE}, just the growth curve will be plotted.
+\bold{Requires:} \code{output.plot = TRUE}.}
+
+\item{output.plotExtended.single}{\code{\link{logical}} (with default):
+single plot output (\code{TRUE/FALSE}) to allow for plotting the results in
+single plot windows. Requires \code{output.plot = TRUE} and
+\code{output.plotExtended = TRUE}.}
+
+\item{cex.global}{\code{\link{numeric}} (with default): global scaling
+factor.}
+
+\item{txtProgressBar}{\code{\link{logical}} (with default): enables or disables txtProgressBar.
+If \code{verbose = FALSE} also no txtProgressBar is shown.}
+
+\item{verbose}{\code{\link{logical}} (with default): enables or disables terminal feedback.}
+
+\item{\dots}{Further arguments and graphical parameters to be passed. Note:
+Standard arguments will only be passed to the growth curve plot. Supported:
+\code{xlim}, \code{ylim}, \code{main}, \code{xlab}, \code{ylab}}
+}
+\value{
+Along with a plot (so far wanted) an \code{RLum.Results} object is returned containing,
+the slot \code{data} contains the following elements:\cr
+
+\tabular{lll}{
+\bold{DATA.OBJECT} \tab \bold{TYPE} \tab \bold{DESCRIPTION} \cr
+\code{..$De} : \tab  \code{data.frame} \tab Table with De values \cr
+\code{..$De.MC} : \tab \code{numeric} \tab Table with De values from MC runs \cr
+\code{..$Fit} : \tab \code{\link{nls}} or \code{\link{lm}} \tab object from the fitting for \code{EXP},
+\code{EXP+LIN} and \code{EXP+EXP}. In case of a resulting  linear fit when using \code{LIN}, \code{QDR} or
+\code{EXP OR LIN} \cr
+\code{..$Formula} : \tab \code{\link{expression}} \tab Fitting formula as R expression \cr
+\code{..$call} : \tab \code{call} \tab The original function call\cr
+}
+}
+\description{
+A dose response curve is produced for luminescence measurements using a
+regenerative protocol.
+}
+\details{
+\bold{Fitting methods} \cr\cr For all options (except for the \code{LIN}, \code{QDR} and
+the \code{EXP OR LIN}), the \code{\link[minpack.lm]{nlsLM}} function with the
+\code{LM} (Levenberg-Marquardt algorithm) algorithm is used. Note: For historical reasons
+for the Monte Carlo simulations partly  the function \code{\link{nls}} using the \code{port} algorithm.
+
+The solution is found by transforming the function or using \code{\link{uniroot}}. \cr
+
+\code{LIN}: fits a linear function to the data using
+\link{lm}: \deqn{y = m*x+n}
+
+\code{QDR}: fits a linear function to the data using
+\link{lm}: \deqn{y = a + b * x + c * x^2}
+
+\code{EXP}: try to fit a function of the form
+\deqn{y = a*(1-exp(-(x+c)/b))} Parameters b and c are approximated by a
+linear fit using \link{lm}. Note: b = D0\cr
+
+\code{EXP OR LIN}: works for some cases where an \code{EXP} fit fails. If
+the \code{EXP} fit fails, a \code{LIN} fit is done instead. \cr
+
+\code{EXP+LIN}: tries to fit an exponential plus linear function of the
+form: \deqn{y = a*(1-exp(-(x+c)/b)+(g*x))} The De is calculated by
+iteration.\cr \bold{Note:} In the context of luminescence dating, this
+function has no physical meaning. Therefore, no D0 value is returned.\cr
+
+\code{EXP+EXP}: tries to fit a double exponential function of the form
+\deqn{y = (a1*(1-exp(-(x)/b1)))+(a2*(1-exp(-(x)/b2)))} This fitting
+procedure is not robust against wrong start parameters and should be further
+improved.\cr\cr
+
+\bold{Fit weighting}\cr
+
+If the option \code{fit.weights =  TRUE} is chosen, weights are calculated using
+provided signal errors (Lx/Tx error): \deqn{fit.weights = 1/error/(sum(1/error))}\cr
+
+\bold{Error estimation using Monte Carlo simulation}\cr
+
+Error estimation is done using a Monte Carlo (MC) simulation approach. A set of Lx/Tx values is
+constructed by randomly drawing curve data from samled from normal
+distributions. The normal distribution is defined by the input values (mean
+= value, sd = value.error). Then, a growth curve fit is attempted for each
+dataset resulting in a new distribution of single De values. The \link{sd}
+of this distribution is becomes then the error of the De. With increasing
+iterations, the error value becomes more stable. \bold{Note:} It may take
+some calculation time with increasing MC runs, especially for the composed
+functions (\code{EXP+LIN} and \code{EXP+EXP}).\cr Each error estimation is
+done with the function of the chosen fitting method. \cr
+
+\bold{Subtitle information}\cr
+
+To avoid plotting the subtitle information, provide an empty user mtext \code{mtext = ""}.
+To plot any other subtitle text, use \code{mtext}.
+}
+\section{Function version}{
+ 1.8.16 (2016-09-09 10:32:17)
+}
+\examples{
+
+##(1) plot growth curve for a dummy data.set and show De value
+data(ExampleData.LxTxData, envir = environment())
+temp <- plot_GrowthCurve(LxTxData)
+get_RLum(temp)
+
+##(1a) to access the fitting value try
+get_RLum(temp, data.object = "Fit")
+
+##(2) plot the growth curve only - uncomment to use
+##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special")
+plot_GrowthCurve(LxTxData)
+##dev.off()
+
+##(3) plot growth curve with pdf output - uncomment to use, single output
+##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special")
+plot_GrowthCurve(LxTxData, output.plotExtended.single = TRUE)
+##dev.off()
+
+##(4) plot resulting function for given intervall x
+x <- seq(1,10000, by = 100)
+plot(
+ x = x,
+ y = eval(temp$Formula),
+ type = "l"
+)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), \cr Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\link{nls}}, \code{\linkS4class{RLum.Results}},
+\code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}, \code{\link{lm}}, \code{uniroot}
+}
+
diff --git a/man/plot_Histogram.Rd b/man/plot_Histogram.Rd
new file mode 100644
index 0000000..b01ad38
--- /dev/null
+++ b/man/plot_Histogram.Rd
@@ -0,0 +1,123 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_Histogram.R
+\name{plot_Histogram}
+\alias{plot_Histogram}
+\title{Plot a histogram with separate error plot}
+\usage{
+plot_Histogram(data, na.rm = TRUE, mtext, cex.global, se, rug, normal_curve,
+  summary, summary.pos, colour, interactive = FALSE, ...)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+object (required): for \code{data.frame}: two columns: De (\code{data[,1]})
+and De error (\code{data[,2]})}
+
+\item{na.rm}{\code{\link{logical}} (with default): excludes \code{NA}
+values from the data set prior to any further operations.}
+
+\item{mtext}{\code{\link{character}} (optional): further sample information
+(\link{mtext}).}
+
+\item{cex.global}{\code{\link{numeric}} (with default): global scaling
+factor.}
+
+\item{se}{\code{\link{logical}} (optional): plots standard error points over
+the histogram, default is \code{FALSE}.}
+
+\item{rug}{\code{\link{logical}} (optional): adds rugs to the histogram,
+default is \code{TRUE}.}
+
+\item{normal_curve}{\code{\link{logical}} (with default): adds a normal
+curve to the histogram. Mean and sd are calculated from the input data. More
+see details section.}
+
+\item{summary}{\code{\link{character}} (optional): add statistic measures of
+centrality and dispersion to the plot. Can be one or more of several
+keywords. See details for available keywords.}
+
+\item{summary.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+specified to place the summary below the plot header. However, this latter
+option in only possible if \code{mtext} is not used. In case of coordinate
+specification, y-coordinate refers to the right y-axis.}
+
+\item{colour}{\code{\link{numeric}} or \link{character} (with default):
+optional vector of length 4 which specifies the colours of the following
+plot items in exactly this order: histogram bars, rug lines, normal
+distribution curve and standard error points\cr (e.g., \code{c("grey",
+"black", "red", "grey")}).}
+
+\item{interactive}{\code{\link{logical}} (with default): create an interactive
+histogram plot (requires the 'plotly' package)}
+
+\item{\dots}{further arguments and graphical parameters passed to
+\code{\link{plot}} or \code{\link{hist}}. If y-axis labels are provided,
+these must be specified as a vector of length 2 since the plot features two
+axes (e.g. \code{ylab = c("axis label 1", "axis label 2")}). Y-axes limits
+(\code{ylim}) must be provided as vector of length four, with the first two
+elements specifying the left axes limits and the latter two elements giving
+the right axis limits.}
+}
+\description{
+Function plots a predefined histogram with an accompanying error plot as
+suggested by Rex Galbraith at the UK LED in Oxford 2010.
+}
+\details{
+If the normal curve is added, the y-axis in the histogram will show the
+probability density.\cr\cr
+A statistic summary, i.e. a collection of statistic measures of
+centrality and dispersion (and further measures) can be added by specifying
+one or more of the following keywords: \code{"n"} (number of samples),
+\code{"mean"} (mean De value), \code{"mean.weighted"} (error-weighted mean),
+\code{"median"} (median of the De values), \code{"sdrel"} (relative standard
+deviation in percent), \code{"sdrel.weighted"} (error-weighted relative
+standard deviation in percent), \code{"sdabs"} (absolute standard deviation),
+\code{"sdabs.weighted"} (error-weighted absolute standard deviation),
+\code{"serel"} (relative standard error), \code{"serel.weighted"} (
+error-weighted relative standard error), \code{"seabs"} (absolute standard
+error), \code{"seabs.weighted"} (error-weighted absolute standard error),
+\code{"kurtosis"} (kurtosis) and \code{"skewness"} (skewness).
+}
+\note{
+The input data is not restricted to a special type.
+}
+\section{Function version}{
+ 0.4.4 (2016-07-16 11:28:11)
+}
+\examples{
+
+## load data
+data(ExampleData.DeValues, envir = environment())
+ExampleData.DeValues <-
+  Second2Gray(ExampleData.DeValues$BT998, dose.rate = c(0.0438,0.0019))
+
+## plot histogram the easiest way
+plot_Histogram(ExampleData.DeValues)
+
+## plot histogram with some more modifications
+plot_Histogram(ExampleData.DeValues,
+               rug = TRUE,
+               normal_curve = TRUE,
+               cex.global = 0.9,
+               pch = 2,
+               colour = c("grey", "black", "blue", "green"),
+               summary = c("n", "mean", "sdrel"),
+               summary.pos = "topleft",
+               main = "Histogram of De-values",
+               mtext = "Example data set",
+               ylab = c(expression(paste(D[e], " distribution")),
+                        "Standard error"),
+               xlim = c(100, 250),
+               ylim = c(0, 0.1, 5, 20))
+
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany), \cr Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\link{hist}}, \code{\link{plot}}
+}
+
diff --git a/man/plot_KDE.Rd b/man/plot_KDE.Rd
new file mode 100644
index 0000000..5dabf9f
--- /dev/null
+++ b/man/plot_KDE.Rd
@@ -0,0 +1,169 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_KDE.R
+\name{plot_KDE}
+\alias{plot_KDE}
+\title{Plot kernel density estimate with statistics}
+\usage{
+plot_KDE(data, na.rm = TRUE, values.cumulative = TRUE, order = TRUE,
+  boxplot = TRUE, rug = TRUE, summary, summary.pos,
+  summary.method = "MCM", bw = "nrd0", output = FALSE, ...)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+object (required): for \code{data.frame}: two columns: De
+(\code{values[,1]}) and De error (\code{values[,2]}). For plotting multiple
+data sets, these must be provided as \code{list} (e.g. \code{list(dataset1,
+dataset2)}).}
+
+\item{na.rm}{\code{\link{logical}} (with default): exclude NA values
+from the data set prior to any further operations.}
+
+\item{values.cumulative}{\code{\link{logical}} (with default): show
+cumulative individual data.}
+
+\item{order}{\code{\link{logical}}: Order data in ascending order.}
+
+\item{boxplot}{\code{\link{logical}} (with default): optionally show a
+boxplot (depicting median as thick central line, first and third quartile
+as box limits, whiskers denoting +/- 1.5 interquartile ranges and dots
+further outliers).}
+
+\item{rug}{\code{\link{logical}} (with default): optionally add rug.}
+
+\item{summary}{\code{\link{character}} (optional): add statistic measures of
+centrality and dispersion to the plot. Can be one or more of several
+keywords. See details for available keywords.}
+
+\item{summary.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+specified to place the summary below the plot header. However, this latter
+option in only possible if \code{mtext} is not used. In case of coordinate
+specification, y-coordinate refers to the right y-axis.}
+
+\item{summary.method}{\code{\link{character}} (with default): keyword
+indicating the method used to calculate the statistic summary. One out of
+\code{"unweighted"}, \code{"weighted"} and \code{"MCM"}. See
+\code{\link{calc_Statistics}} for details.}
+
+\item{bw}{\code{\link{character}} (with default): bin-width, chose a numeric
+value for manual setting.}
+
+\item{output}{\code{\link{logical}}: Optional output of numerical plot
+parameters. These can be useful to reproduce similar plots. Default is
+\code{FALSE}.}
+
+\item{\dots}{further arguments and graphical parameters passed to
+\code{\link{plot}}.}
+}
+\description{
+Plot a kernel density estimate of measurement values in combination with the
+actual values and associated error bars in ascending order. If enabled, the
+boxplot will show the usual distribution parameters (median as
+bold line, box delimited by the first and third quartile, whiskers defined
+by the extremes and outliers shown as points) and also the mean and
+standard deviation as pale bold line and pale polygon, respectively.
+}
+\details{
+The function allows passing several plot arguments, such as \code{main},
+\code{xlab}, \code{cex}. However, as the figure is an overlay of two
+separate plots, \code{ylim} must be specified in the order: c(ymin_axis1,
+ymax_axis1, ymin_axis2, ymax_axis2) when using the cumulative values plot
+option. See examples for some further explanations. For details on the
+calculation of the bin-width (parameter \code{bw}) see
+\code{\link{density}}.\cr\cr
+A statistic summary, i.e. a collection of statistic measures of
+centrality and dispersion (and further measures) can be added by specifying
+one or more of the following keywords:
+\itemize{
+\item \code{"n"} (number of samples)
+\item \code{"mean"} (mean De value)
+\item \code{"median"} (median of the De values)
+\item \code{"sd.rel"} (relative standard deviation in percent)
+\item \code{"sd.abs"} (absolute standard deviation)
+\item \code{"se.rel"} (relative standard error)
+\item \code{"se.abs"} (absolute standard error)
+\item \code{"in.2s"} (percent of samples in 2-sigma range)
+\item \code{"kurtosis"} (kurtosis)
+\item \code{"skewness"} (skewness)
+}
+Note that the input data for the statistic summary is sent to the function
+\code{calc_Statistics()} depending on the log-option for the z-scale. If
+\code{"log.z = TRUE"}, the summary is based on the logarithms of the input
+data. If \code{"log.z = FALSE"} the linearly scaled data is used. \cr
+Note as well, that \code{"calc_Statistics()"} calculates these statistic
+measures in three different ways: \code{unweighted}, \code{weighted} and
+\code{MCM-based} (i.e., based on Monte Carlo Methods). By default, the
+MCM-based version is used. If you wish to use another method, indicate this
+with the appropriate keyword using the argument \code{summary.method}.\cr\cr
+}
+\note{
+The plot output is no 'probability density' plot (cf. the discussion
+of Berger and Galbraith in Ancient TL; see references)!
+}
+\section{Function version}{
+ 3.5.3 (2016-09-09 10:32:17)
+}
+\examples{
+
+## read example data set
+data(ExampleData.DeValues, envir = environment())
+ExampleData.DeValues <-
+  Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+
+## create plot straightforward
+plot_KDE(data = ExampleData.DeValues)
+
+## create plot with logarithmic x-axis
+plot_KDE(data = ExampleData.DeValues,
+         log = "x")
+
+## create plot with user-defined labels and axes limits
+plot_KDE(data = ExampleData.DeValues,
+         main = "Dose distribution",
+         xlab = "Dose (s)",
+         ylab = c("KDE estimate", "Cumulative dose value"),
+         xlim = c(100, 250),
+         ylim = c(0, 0.08, 0, 30))
+
+## create plot with boxplot option
+plot_KDE(data = ExampleData.DeValues,
+         boxplot = TRUE)
+
+## create plot with statistical summary below header
+plot_KDE(data = ExampleData.DeValues,
+         summary = c("n", "median", "skewness", "in.2s"))
+
+## create plot with statistical summary as legend
+plot_KDE(data = ExampleData.DeValues,
+         summary = c("n", "mean", "sd.rel", "se.abs"),
+         summary.pos = "topleft")
+
+## split data set into sub-groups, one is manipulated, and merge again
+data.1 <- ExampleData.DeValues[1:15,]
+data.2 <- ExampleData.DeValues[16:25,] * 1.3
+data.3 <- list(data.1, data.2)
+
+## create plot with two subsets straightforward
+plot_KDE(data = data.3)
+
+## create plot with two subsets and summary legend at user coordinates
+plot_KDE(data = data.3,
+         summary = c("n", "median", "skewness"),
+         summary.pos = c(110, 0.07),
+         col = c("blue", "orange"))
+
+## example of how to use the numerical output of the function
+## return plot output to draw a thicker KDE line
+KDE_out <- plot_KDE(data = ExampleData.DeValues,
+output = TRUE)
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne
+\cr R Luminescence Package Team}
+\seealso{
+\code{\link{density}}, \code{\link{plot}}
+}
+
diff --git a/man/plot_NRt.Rd b/man/plot_NRt.Rd
new file mode 100644
index 0000000..9ccdade
--- /dev/null
+++ b/man/plot_NRt.Rd
@@ -0,0 +1,135 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_NRt.R
+\name{plot_NRt}
+\alias{plot_NRt}
+\title{Visualise natural/regenerated signal ratios}
+\usage{
+plot_NRt(data, log = FALSE, smooth = c("none", "spline", "rmean"), k = 3,
+  legend = TRUE, legend.pos = "topright", ...)
+}
+\arguments{
+\item{data}{a \code{\link{list}}, \code{\link{data.frame}}, \code{\link{matrix}} or
+\code{\linkS4class{RLum.Analysis}} object (\bold{required}). X,Y data of measured values
+(time and counts). See details on individual data structure.}
+
+\item{log}{\code{\link{character}} (optional): logarithmic axes
+(\code{c("x", "y", "xy")}).}
+
+\item{smooth}{\code{\link{character}} (optional): apply data smoothing. Use
+\code{"rmean"} to calculate the rolling where \code{k} determines the width
+of the rolling window (see \code{\link{rollmean}}).
+\code{"spline"} applies a smoothing spline to each curve
+(see \code{\link{smooth.spline}})}
+
+\item{k}{\code{\link{integer}} (with default): integer width of the rolling
+window.}
+
+\item{legend}{\code{\link{logical}} (with default): show or hide the plot legend.}
+
+\item{legend.pos}{\code{\link{character}} (with default): keyword specifying
+the position of the legend (see \code{\link{legend}}).}
+
+\item{...}{further parameters passed to \code{\link{plot}} (also see \code{\link{par}}).}
+}
+\value{
+Returns a plot and \code{\linkS4class{RLum.Analysis}} object.
+}
+\description{
+This function creates a Natural/Regenerated signal vs. time (NR(t)) plot
+as shown in Steffen et al. 2009
+}
+\details{
+This function accepts the individual curve data in many different formats. If
+\code{data} is a \code{list}, each element of the list must contain a two
+column \code{data.frame} or \code{matrix} containing the XY data of the curves
+(time and counts). Alternatively, the elements can be objects of class
+\code{\linkS4class{RLum.Data.Curve}}.
+Input values can also be provided as a \code{data.frame} or \code{matrix} where
+the first column contains the time values and each following column contains
+the counts of each curve.
+}
+\examples{
+
+## load example data
+data("ExampleData.BINfileData", envir = environment())
+
+## EXAMPLE 1
+
+## convert Risoe.BINfileData object to RLum.Analysis object
+data <- Risoe.BINfileData2RLum.Analysis(object = CWOSL.SAR.Data, pos = 8, ltype = "OSL")
+
+## extract all OSL curves
+allCurves <- get_RLum(data)
+
+## keep only the natural and regenerated signal curves
+pos <- seq(1, 9, 2)
+curves <- allCurves[pos]
+
+## plot a standard NR(t) plot
+plot_NRt(curves)
+
+## re-plot with rolling mean data smoothing
+plot_NRt(curves, smooth = "rmean", k = 10)
+
+## re-plot with a logarithmic x-axis
+plot_NRt(curves, log = "x", smooth = "rmean", k = 5)
+
+## re-plot with custom axes ranges
+plot_NRt(curves, smooth = "rmean", k = 5,
+         xlim = c(0.1, 5), ylim = c(0.4, 1.6),
+         legend.pos = "bottomleft")
+
+## re-plot with smoothing spline on log scale
+plot_NRt(curves, smooth = "spline", log = "x",
+         legend.pos = "top")
+
+## EXAMPLE 2
+
+# you may also use this function to check whether all
+# TD curves follow the same shape (making it a TnTx(t) plot).
+posTD <- seq(2, 14, 2)
+curves <- allCurves[posTD]
+
+plot_NRt(curves, main = "TnTx(t) Plot",
+         smooth = "rmean", k = 20,
+         ylab = "TD natural / TD regenerated",
+         xlim = c(0, 20), legend = FALSE)
+
+## EXAMPLE 3
+
+# extract data from all positions
+data <- lapply(1:24, FUN = function(pos) {
+   Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = pos, ltype = "OSL")
+})
+
+# get individual curve data from each aliquot
+aliquot <- lapply(data, get_RLum)
+
+# set graphical parameters
+par(mfrow = c(2, 2))
+
+# create NR(t) plots for all aliquots
+for (i in 1:length(aliquot)) {
+   plot_NRt(aliquot[[i]][pos],
+            main = paste0("Aliquot #", i),
+            smooth = "rmean", k = 20,
+            xlim = c(0, 10),
+            cex = 0.6, legend.pos = "bottomleft")
+}
+
+# reset graphical parameters
+par(mfrow = c(1, 1))
+
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+}
+\references{
+Steffen, D., Preusser, F., Schlunegger, F., 2009. OSL quartz underestimation due to
+unstable signal components. Quaternary Geochronology, 4, 353-362.
+}
+\seealso{
+\code{\link{plot}}
+}
+
diff --git a/man/plot_RLum.Analysis.Rd b/man/plot_RLum.Analysis.Rd
new file mode 100644
index 0000000..b51fa6c
--- /dev/null
+++ b/man/plot_RLum.Analysis.Rd
@@ -0,0 +1,110 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_RLum.Analysis.R
+\name{plot_RLum.Analysis}
+\alias{plot_RLum.Analysis}
+\title{Plot function for an RLum.Analysis S4 class object}
+\usage{
+plot_RLum.Analysis(object, subset, nrows, ncols, abline = NULL,
+  combine = FALSE, curve.transformation, plot.single = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} (\bold{required}): S4
+object of class \code{RLum.Analysis}}
+
+\item{subset}{named \code{\link{list}} (optional): subsets elements for plotting. The
+arguments in the named \code{\link{list}} will be directly passed to the function \code{\link{get_RLum}}
+(e.g., \code{subset = list(curveType = "measured")})}
+
+\item{nrows}{\code{\link{integer}} (optional): sets number of rows for
+plot output, if nothing is set the function tries to find a value.}
+
+\item{ncols}{\code{\link{integer}} (optional): sets number of columns
+for plot output, if nothing is set the function tries to find a value.}
+
+\item{abline}{\code{\link{list}} (optional): allows to add ablines to the plot. Argument are provided
+in a list and will be forwared to the function \code{\link{abline}}, e.g., \code{list(v = c(10, 100))}
+adds two vertical lines add 10 and 100 to all plots. In contrast \code{list(v = c(10), v = c(100)}
+adds a vertical at 10 to the first and a vertical line at 100 to the 2nd plot.}
+
+\item{combine}{\code{\link{logical}} (with default): allows to combine all
+\code{\linkS4class{RLum.Data.Curve}} objects in one single plot.}
+
+\item{curve.transformation}{\code{\link{character}} (optional): allows
+transforming CW-OSL and CW-IRSL curves to pseudo-LM curves via
+transformation functions. Allowed values are: \code{CW2pLM}, \code{CW2pLMi},
+\code{CW2pHMi} and \code{CW2pPMi}. See details.}
+
+\item{plot.single}{\code{\link{logical}} (with default): global par settings are
+considered, normally this should end in one plot per page}
+
+\item{\dots}{further arguments and graphical parameters will be passed to
+the \code{plot} function. Supported arguments: \code{main}, \code{mtext},
+\code{log}, \code{lwd}, \code{lty} \code{type}, \code{pch}, \code{col},
+\code{norm}, \code{xlim},\code{ylim}, \code{xlab}, \code{ylab}... and for \code{combine = TRUE}
+also: \code{sub}, \code{legend}, \code{legend.text}, \code{legend.pos} (typical plus 'outside'), \code{legend.col}, \code{smooth}.
+All arguments can be provided as \code{vector} or \code{list} to gain in full control
+of all plot settings.}
+}
+\value{
+Returns multiple plots.
+}
+\description{
+The function provides a standardised plot output for curve data of an
+RLum.Analysis S4 class object
+}
+\details{
+The function produces a multiple plot output. A file output is recommended
+(e.g., \code{\link{pdf}}).
+
+\bold{curve.transformation}\cr
+
+This argument allows transforming continuous wave (CW) curves to pseudo
+(linear) modulated curves. For the transformation, the functions of the
+package are used. Currently, it is not possible to pass further arguments to
+the transformation functions. The argument works only for \code{ltype}
+\code{OSL} and \code{IRSL}.\cr
+
+Please note: The curve transformation within this functions works roughly,
+i.e. every IRSL or OSL curve is transformed, without considerung whether it
+is measured with the PMT or not! However, for a fast look it might be
+helpful.\cr
+}
+\note{
+Not all arguments available for \code{\link{plot}} will be passed!
+Only plotting of \code{RLum.Data.Curve} and \code{RLum.Data.Spectrum}
+objects are currently supported.\cr
+}
+\section{Function version}{
+ 0.3.6 (2016-09-09 10:32:17)
+}
+\examples{
+
+##load data
+data(ExampleData.BINfileData, envir = environment())
+
+##convert values for position 1
+temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+
+##plot (combine) TL curves in one plot
+plot_RLum.Analysis(
+temp,
+subset = list(recordType = "TL"),
+combine = TRUE,
+norm = TRUE,
+abline = list(v = c(110))
+)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+#
+}
+\seealso{
+\code{\link{plot}}, \code{\link{plot_RLum}},
+\code{\link{plot_RLum.Data.Curve}}
+}
+\keyword{aplot}
+
diff --git a/man/plot_RLum.Data.Curve.Rd b/man/plot_RLum.Data.Curve.Rd
new file mode 100644
index 0000000..3a24486
--- /dev/null
+++ b/man/plot_RLum.Data.Curve.Rd
@@ -0,0 +1,71 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_RLum.Data.Curve.R
+\name{plot_RLum.Data.Curve}
+\alias{plot_RLum.Data.Curve}
+\title{Plot function for an RLum.Data.Curve S4 class object}
+\usage{
+plot_RLum.Data.Curve(object, par.local = TRUE, norm = FALSE,
+  smooth = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Data.Curve}} (\bold{required}): S4
+object of class \code{RLum.Data.Curve}}
+
+\item{par.local}{\code{\link{logical}} (with default): use local graphical
+parameters for plotting, e.g. the plot is shown in one column and one row.
+If \code{par.local = FALSE}, global parameters are inherited.}
+
+\item{norm}{\code{\link{logical}} (with default): allows curve normalisation
+to the highest count value}
+
+\item{smooth}{\code{\link{logical}} (with default): provides an automatic curve smoothing
+based on \code{\link[zoo]{rollmean}}}
+
+\item{\dots}{further arguments and graphical parameters that will be passed
+to the \code{plot} function}
+}
+\value{
+Returns a plot.
+}
+\description{
+The function provides a standardised plot output for curve data of an
+RLum.Data.Curve S4 class object
+}
+\details{
+Only single curve data can be plotted with this function.  Arguments
+according to \code{\link{plot}}.
+}
+\note{
+Not all arguments of \code{\link{plot}} will be passed!
+}
+\section{Function version}{
+ 0.2.0 (2016-05-02 09:36:06)
+}
+\examples{
+
+
+##plot curve data
+
+#load Example data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+#transform data.frame to RLum.Data.Curve object
+temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve")
+
+#plot RLum.Data.Curve object
+plot_RLum.Data.Curve(temp)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+#
+}
+\seealso{
+\code{\link{plot}}, \code{\link{plot_RLum}}
+}
+\keyword{aplot}
+
diff --git a/man/plot_RLum.Data.Image.Rd b/man/plot_RLum.Data.Image.Rd
new file mode 100644
index 0000000..e44a99f
--- /dev/null
+++ b/man/plot_RLum.Data.Image.Rd
@@ -0,0 +1,103 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_RLum.Data.Image.R
+\name{plot_RLum.Data.Image}
+\alias{plot_RLum.Data.Image}
+\title{Plot function for an \code{RLum.Data.Image} S4 class object}
+\usage{
+plot_RLum.Data.Image(object, par.local = TRUE, plot.type = "plot.raster",
+  ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Data.Image}} (\bold{required}): S4
+object of class \code{RLum.Data.Image}}
+
+\item{par.local}{\code{\link{logical}} (with default): use local graphical
+parameters for plotting, e.g. the plot is shown in one column and one row.
+If \code{par.local = FALSE} global parameters are inherited.}
+
+\item{plot.type}{\code{\link{character}} (with default): plot types.
+Supported types are \code{plot.raster}, \code{plotRGB} or \code{contour}}
+
+\item{\dots}{further arguments and graphical parameters that will be passed
+to the specific plot functions.}
+}
+\value{
+Returns a plot.
+}
+\description{
+The function provides a standardised plot output for image data of an
+\code{RLum.Data.Image}S4 class object, mainly using the plot functions
+provided by the \code{\link{raster}} package.
+}
+\details{
+\bold{Details on the plot functions} \cr
+
+Image is visualised as 2D plot usinng generic plot types provided by other
+packages.
+
+Supported plot types: \cr
+
+\bold{\code{plot.type = "plot.raster"}}\cr
+
+Uses the standard plot function for raster data from the package
+\code{\link[raster]{raster}}: \code{\link[raster]{plot}}. For each raster layer in a
+raster brick one plot is produced.
+
+Arguments that are passed through the function call:\cr
+
+\code{main},\code{axes}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim},
+\code{col}
+
+\bold{\code{plot.type = "plotRGB"}}\cr
+
+Uses the function \code{\link[raster]{plotRGB}} from the
+\code{\link[raster]{raster}} package. Only one image plot is produced as all layers
+in a brick a combined.  This plot type is useful to see whether any signal
+is recorded by the camera.\cr Arguments that are passed through the function
+call:\cr
+
+\code{main},\code{axes}, \code{xlab}, \code{ylab}, \code{ext},
+\code{interpolate}, \code{maxpixels}, \code{alpha}, \code{colNA},
+\code{stretch}\cr
+
+\bold{\code{plot.type = "contour"}}\cr
+
+Uses the function contour plot function from the \code{\link{raster}}
+function (\code{\link[raster]{contour}}). For each raster layer one contour
+plot is produced. Arguments that are passed through the function call:\cr
+
+\code{main},\code{axes}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim},
+\code{col}
+}
+\note{
+This function has been created to faciliate the plotting of image data
+imported by the function \code{\link{read_SPE2R}}. However, so far the
+function is not optimized to handle image data > ca. 200 MByte and thus
+plotting of such data is extremely slow.
+}
+\section{Function version}{
+ 0.1 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##load data
+data(ExampleData.RLum.Data.Image, envir = environment())
+
+##plot data
+plot_RLum.Data.Image(ExampleData.RLum.Data.Image)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+-
+}
+\seealso{
+\code{\linkS4class{RLum.Data.Image}}, \code{\link{plot}},
+\code{\link{plot_RLum}}, \code{\link[raster]{raster}},
+}
+\keyword{aplot}
+
diff --git a/man/plot_RLum.Data.Spectrum.Rd b/man/plot_RLum.Data.Spectrum.Rd
new file mode 100644
index 0000000..8e296ff
--- /dev/null
+++ b/man/plot_RLum.Data.Spectrum.Rd
@@ -0,0 +1,213 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_RLum.Data.Spectrum.R
+\name{plot_RLum.Data.Spectrum}
+\alias{plot_RLum.Data.Spectrum}
+\title{Plot function for an RLum.Data.Spectrum S4 class object}
+\usage{
+plot_RLum.Data.Spectrum(object, par.local = TRUE, plot.type = "contour",
+  optical.wavelength.colours = TRUE, bg.channels, bin.rows = 1,
+  bin.cols = 1, rug = TRUE, limit_counts = NULL, xaxis.energy = FALSE,
+  legend.text, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Data.Spectrum}} or \code{\link{matrix}} (\bold{required}): S4
+object of class \code{RLum.Data.Spectrum} or a \code{matrix} containing count values of the spectrum.\cr
+Please note that in case of a matrix rownames and colnames are set automatically if not provided.}
+
+\item{par.local}{\code{\link{logical}} (with default): use local graphical
+parameters for plotting, e.g. the plot is shown in one column and one row.
+If \code{par.local = FALSE} global parameters are inherited.}
+
+\item{plot.type}{\code{\link{character}} (with default): plot type, for
+3D-plot use \code{persp}, or \code{interactive}, for a 2D-plot \code{contour},
+\code{single} or \code{multiple.lines} (along the time or temperature axis)
+or \code{transect} (along the wavelength axis) \cr}
+
+\item{optical.wavelength.colours}{\code{\link{logical}} (with default): use
+optical wavelength colour palette. Note: For this, the spectrum range is
+limited: \code{c(350,750)}. Own colours can be set with the argument
+\code{col}.}
+
+\item{bg.channels}{\code{\link{vector}} (optional): defines channel for
+background subtraction If a vector is provided the mean of the channels is
+used for subtraction. Note: Background subtraction is applied prior to
+channel binning}
+
+\item{bin.rows}{\code{\link{integer}} (with defaul): allow summing-up
+wavelength channels (horizontal binning), e.g. \code{bin.rows = 2} two
+channels are summed up}
+
+\item{bin.cols}{\code{\link{integer}} (with default): allow summing-up
+channel counts (vertical binning) for plotting, e.g. \code{bin.cols = 2} two
+channels are summed up}
+
+\item{rug}{\code{\link{logical}} (with default): enables or disables colour
+rug. Currently only implemented for plot type \code{multiple.lines} and
+\code{single}}
+
+\item{limit_counts}{\code{\link{numeric}} (optional): value to limit all count values to
+this value, i.e. all count values above this threshold will be replaced by this threshold. This
+is helpfull especially in case of TL-spectra.}
+
+\item{xaxis.energy}{\code{\link{logical}} (with default): enables or
+disables energy instead of wavelength axis. Note: This option means not only
+simnply redrawing the axis, insteadly the spectrum in terms of intensity is
+recalculated, s. details.}
+
+\item{legend.text}{\code{\link{character}} (with default): possiblity to
+provide own legend text. This argument is only considered for plot types
+providing a legend, e.g. \code{plot.type="transect"}}
+
+\item{\dots}{further arguments and graphical parameters that will be passed
+to the \code{plot} function.}
+}
+\value{
+Returns a plot.
+}
+\description{
+The function provides a standardised plot output for spectrum data of an
+RLum.Data.Spectrum S4 class object
+}
+\details{
+\bold{Matrix structure} \cr (cf. \code{\linkS4class{RLum.Data.Spectrum}})
+
+\itemize{ \item \code{rows} (x-values): wavelengths/channels (xlim, xlab)
+\item \code{columns} (y-values): time/temperature (ylim, ylab) \item
+\code{cells} (z-values): count values (zlim, zlab) }
+
+\emph{Note: This nomenclature is valid for all plot types of this
+function!}\cr
+
+\bold{Nomenclature for value limiting}
+
+\code{xlim}: Limits values along the wavelength axis\cr \code{ylim}: Limits
+values along the time/temperature axis\cr \code{zlim}: Limits values along
+the count value axis\cr
+
+\bold{Energy axis re-calculation}
+
+If the argument \code{xaxis.energy = TRUE} is chosen, instead intensity vs.
+wavelength the spectrum is plotted as intensiyt vs. energy. Therefore the
+entire spectrum is re-recaluated (e.g., Appendix 4 in Blasse and Grabmeier,
+1994):
+
+The intensity of the spectrum (z-values) is re-calcualted using the
+following equation:
+
+\deqn{\phi_{E} = \phi_{\lambda} * \lambda^2 / (hc)}
+
+with \eqn{\phi_{E}} the intensity per interval of energy \eqn{E} (eV),
+\eqn{\phi_{\lambda}} the intensity per interval of wavelength \eqn{\lambda}
+(nm) and \eqn{h} (eV/s) the Planck constant and \eqn{c} (m/s) the velocity
+of light.
+
+For transforming the wavelength axis (x-values) the equation
+
+\deqn{E = hc/\lambda}
+
+is used. For further details please see the cited the literature.\cr
+
+\bold{Details on the plot functions}
+
+Spectrum is visualised as 3D or 2D plot. Both plot types are based on
+internal R plot functions. \cr
+
+\bold{\code{plot.type = "persp"}}
+
+Arguments that will be passed to \code{\link{persp}}: \itemize{ \item
+\code{shade}: default is \code{0.4} \item \code{phi}: default is \code{15}
+\item \code{theta}: default is \code{-30} \item \code{expand}: default is
+\code{1} \item \code{ticktype}: default is \code{detailed}, \code{r}: default is \code{10}}
+
+\emph{Note: Further parameters can be adjusted via \code{par}. For example
+to set the background transparent and reduce the thickness of the lines use:
+\code{par(bg = NA, lwd = 0.7)} previous the function call.}
+
+\bold{\code{plot.type = "single"}}\cr
+
+Per frame a single curve is returned. Frames are time or temperature
+steps.\cr
+
+\bold{\code{plot.type = "multiple.lines"}}\cr
+
+All frames plotted in one frame.\cr
+
+\bold{\code{plot.type = "transect"}}\cr
+
+Depending on the selected wavelength/channel range a transect over the
+time/temperature (y-axis) will be plotted along the wavelength/channels
+(x-axis). If the range contains more than one channel, values (z-values) are
+summed up. To select a transect use the \code{xlim} argument, e.g.
+\code{xlim = c(300,310)} plot along the summed up count values of channel
+300 to 310.\cr
+
+\bold{Further arguments that will be passed (depending on the plot type)}
+
+\code{xlab}, \code{ylab}, \code{zlab}, \code{xlim}, \code{ylim},
+\code{zlim}, \code{main}, \code{mtext}, \code{pch}, \code{type}, \code{col},
+\code{border}, \code{box} \code{lwd}, \code{bty} \cr
+}
+\note{
+Not all additional arguments (\code{...}) will be passed similarly!
+}
+\section{Function version}{
+ 0.5.0 (2016-09-09 10:32:17)
+}
+\examples{
+
+
+##load example data
+data(ExampleData.XSYG, envir = environment())
+
+##(1)plot simple spectrum (2D) - contour
+plot_RLum.Data.Spectrum(TL.Spectrum,
+                        plot.type="contour",
+                        xlim = c(310,750),
+                        ylim = c(0,300),
+                        bin.rows=10,
+                        bin.cols = 1)
+
+##(2) plot spectrum (3D)
+plot_RLum.Data.Spectrum(TL.Spectrum,
+                        plot.type="persp",
+                        xlim = c(310,750),
+                        ylim = c(0,100),
+                        bin.rows=10,
+                        bin.cols = 1)
+
+##(3) plot multiple lines (2D) - multiple.lines (with ylim)
+plot_RLum.Data.Spectrum(TL.Spectrum,
+                        plot.type="multiple.lines",
+                        xlim = c(310,750),
+                        ylim = c(0,100),
+                        bin.rows=10,
+                        bin.cols = 1)
+
+\dontrun{
+ ##(4) interactive plot using the package plotly
+ plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+ xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+ bin.cols = 1)
+
+ ##(5) alternative using the package fields
+ fields::image.plot(get_RLum(TL.Spectrum))
+ contour(get_RLum(TL.Spectrum), add = TRUE)
+
+}
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Blasse, G., Grabmaier, B.C., 1994. Luminescent Materials.
+Springer.
+}
+\seealso{
+\code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{plot}},
+\code{\link{plot_RLum}}, \code{\link{persp}}, \code{\link[plotly]{plot_ly}},
+\code{\link{contour}}
+}
+\keyword{aplot}
+
diff --git a/man/plot_RLum.Rd b/man/plot_RLum.Rd
new file mode 100644
index 0000000..5edbaf4
--- /dev/null
+++ b/man/plot_RLum.Rd
@@ -0,0 +1,80 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_RLum.R
+\name{plot_RLum}
+\alias{plot_RLum}
+\title{General plot function for RLum S4 class objects}
+\usage{
+plot_RLum(object, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum}} (\bold{required}): S4 object of
+class \code{RLum}. Optional a \code{\link{list}} containing objects of class \code{\linkS4class{RLum}}
+can be provided. In this case the function tries to plot every object in this list according
+to its \code{RLum} class.}
+
+\item{\dots}{further arguments and graphical parameters that will be passed
+to the specific plot functions. The only argument that is supported directly is \code{main}
+(setting the plot title). In contrast to the normal behaviour \code{main} can be here provided as
+\code{\link{list}} and the arguments in the list will dispatched to the plots if the \code{object}
+is of type \code{list} as well.}
+}
+\value{
+Returns a plot.
+}
+\description{
+Function calls object specific plot functions for RLum S4 class objects.
+}
+\details{
+The function provides a generalised access point for plotting specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+corresponding plot function will be selected.  Allowed arguments can be
+found in the documentations of each plot function.  \tabular{lll}{
+\bold{object} \tab \tab \bold{corresponding plot function} \cr
+
+\code{\linkS4class{RLum.Data.Curve}} \tab : \tab
+\code{\link{plot_RLum.Data.Curve}} \cr
+\code{\linkS4class{RLum.Data.Spectrum}} \tab : \tab
+\code{\link{plot_RLum.Data.Spectrum}}\cr
+\code{\linkS4class{RLum.Data.Image}} \tab : \tab
+\code{\link{plot_RLum.Data.Image}}\cr \code{\linkS4class{RLum.Analysis}}
+\tab : \tab \code{\link{plot_RLum.Analysis}}\cr
+\code{\linkS4class{RLum.Results}} \tab : \tab
+\code{\link{plot_RLum.Results}} }
+}
+\note{
+The provided plot output depends on the input object.
+}
+\section{Function version}{
+ 0.4.2 (2016-09-09 10:32:17)
+}
+\examples{
+
+
+#load Example data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+#transform data.frame to RLum.Data.Curve object
+temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve")
+
+#plot RLum object
+plot_RLum(temp)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+#
+}
+\seealso{
+\code{\link{plot_RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Curve}}, \code{\link{plot_RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{plot_RLum.Data.Image}},
+\code{\linkS4class{RLum.Data.Image}}, \code{\link{plot_RLum.Analysis}},
+\code{\linkS4class{RLum.Analysis}}, \code{\link{plot_RLum.Results}},
+\code{\linkS4class{RLum.Results}}
+}
+\keyword{dplot}
+
diff --git a/man/plot_RLum.Results.Rd b/man/plot_RLum.Results.Rd
new file mode 100644
index 0000000..1bed719
--- /dev/null
+++ b/man/plot_RLum.Results.Rd
@@ -0,0 +1,69 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_RLum.Results.R
+\name{plot_RLum.Results}
+\alias{plot_RLum.Results}
+\title{Plot function for an RLum.Results S4 class object}
+\usage{
+plot_RLum.Results(object, single = TRUE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Results}} (\bold{required}): S4 object
+of class \code{RLum.Results}}
+
+\item{single}{\code{\link{logical}} (with default): single plot output
+(\code{TRUE/FALSE}) to allow for plotting the results in as few plot windows
+as possible.}
+
+\item{\dots}{further arguments and graphical parameters will be passed to
+the \code{plot} function.}
+}
+\value{
+Returns multiple plots.
+}
+\description{
+The function provides a standardised plot output for data of an RLum.Results
+S4 class object
+}
+\details{
+The function produces a multiple plot output.  A file output is recommended
+(e.g., \code{\link{pdf}}).
+}
+\note{
+Not all arguments available for \code{\link{plot}} will be passed!
+Only plotting of \code{RLum.Results} objects are supported.
+}
+\section{Function version}{
+ 0.2.1 (2016-05-16 22:24:15)
+}
+\examples{
+
+
+###load data
+data(ExampleData.DeValues, envir = environment())
+
+# apply the un-logged minimum age model
+mam <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.2, log = TRUE, plot = FALSE)
+
+##plot
+plot_RLum.Results(mam)
+
+# estimate the number of grains on an aliquot
+grains<- calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, plot = FALSE, MC.iter = 100)
+
+##plot
+plot_RLum.Results(grains)
+
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, IRAMAT-CRP2A,
+Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+#
+}
+\seealso{
+\code{\link{plot}}, \code{\link{plot_RLum}},
+}
+\keyword{aplot}
+
diff --git a/man/plot_RadialPlot.Rd b/man/plot_RadialPlot.Rd
new file mode 100644
index 0000000..b6d5f03
--- /dev/null
+++ b/man/plot_RadialPlot.Rd
@@ -0,0 +1,263 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_RadialPlot.R
+\name{plot_RadialPlot}
+\alias{plot_RadialPlot}
+\title{Function to create a Radial Plot}
+\usage{
+plot_RadialPlot(data, na.rm = TRUE, negatives = "remove", log.z = TRUE,
+  central.value, centrality = "mean.weighted", mtext, summary, summary.pos,
+  legend, legend.pos, stats, rug = FALSE, plot.ratio, bar.col,
+  y.ticks = TRUE, grid.col, line, line.col, line.label, output = FALSE, ...)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
+object (required): for \code{data.frame} two columns: De (\code{data[,1]})
+and De error (\code{data[,2]}). To plot several data sets in one plot, the
+data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.}
+
+\item{na.rm}{\code{\link{logical}} (with default): excludes \code{NA}
+values from the data set prior to any further operations.}
+
+\item{negatives}{\code{\link{character}} (with default): rule for negative
+values. Default is \code{"remove"} (i.e. negative values are removed from
+the data set).}
+
+\item{log.z}{\code{\link{logical}} (with default): Option to display the
+z-axis in logarithmic scale. Default is \code{TRUE}.}
+
+\item{central.value}{\code{\link{numeric}}: User-defined central value,
+primarily used for horizontal centering of the z-axis.}
+
+\item{centrality}{\code{\link{character}} or \code{\link{numeric}} (with
+default): measure of centrality, used for automatically centering the plot
+and drawing the central line. Can either be one out of \code{"mean"},
+\code{"median"}, \code{"mean.weighted"} and \code{"median.weighted"} or a
+numeric value used for the standardisation.}
+
+\item{mtext}{\code{\link{character}}: additional text below the plot title.}
+
+\item{summary}{\code{\link{character}} (optional): add statistic measures of
+centrality and dispersion to the plot. Can be one or more of several
+keywords. See details for available keywords.}
+
+\item{summary.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+specified to place the summary below the plot header. However, this latter
+option is only possible if \code{mtext} is not used.}
+
+\item{legend}{\code{\link{character}} vector (optional): legend content to
+be added to the plot.}
+
+\item{legend.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position coordinates or keyword (e.g. \code{"topright"})
+for the legend to be plotted.}
+
+\item{stats}{\code{\link{character}}: additional labels of statistically
+important values in the plot. One or more out of the following:
+\code{"min"}, \code{"max"}, \code{"median"}.}
+
+\item{rug}{\code{\link{logical}}: Option to add a rug to the z-scale, to
+indicate the location of individual values}
+
+\item{plot.ratio}{\code{\link{numeric}}: User-defined plot area ratio (i.e.
+curvature of the z-axis). If omitted, the default value (\code{4.5/5.5}) is
+used and modified automatically to optimise the z-axis curvature. The
+parameter should be decreased when data points are plotted outside the
+z-axis or when the z-axis gets too elliptic.}
+
+\item{bar.col}{\code{\link{character}} or \code{\link{numeric}} (with
+default): colour of the bar showing the 2-sigma range around the central
+value. To disable the bar, use \code{"none"}. Default is \code{"grey"}.}
+
+\item{y.ticks}{\code{\link{logical}}: Option to hide y-axis labels. Useful
+for data with small scatter.}
+
+\item{grid.col}{\code{\link{character}} or \code{\link{numeric}} (with
+default): colour of the grid lines (originating at [0,0] and stretching to
+the z-scale). To disable grid lines, use \code{"none"}. Default is
+\code{"grey"}.}
+
+\item{line}{\code{\link{numeric}}: numeric values of the additional lines to
+be added.}
+
+\item{line.col}{\code{\link{character}} or \code{\link{numeric}}: colour of
+the additional lines.}
+
+\item{line.label}{\code{\link{character}}: labels for the additional lines.}
+
+\item{output}{\code{\link{logical}}: Optional output of numerical plot
+parameters. These can be useful to reproduce similar plots. Default is
+\code{FALSE}.}
+
+\item{\dots}{Further plot arguments to pass. \code{xlab} must be a vector of
+length 2, specifying the upper and lower x-axes labels.}
+}
+\value{
+Returns a plot object.
+}
+\description{
+A Galbraith's radial plot is produced on a logarithmic or a linear scale.
+}
+\details{
+Details and the theoretical background of the radial plot are given in the
+cited literature. This function is based on an S script of Rex Galbraith. To
+reduce the manual adjustments, the function has been rewritten. Thanks to
+Rex Galbraith for useful comments on this function. \cr Plotting can be
+disabled by adding the argument \code{plot = "FALSE"}, e.g. to return only
+numeric plot output.\cr
+
+Earlier versions of the Radial Plot in this package had the 2-sigma-bar
+drawn onto the z-axis. However, this might have caused misunderstanding in
+that the 2-sigma range may also refer to the z-scale, which it does not!
+Rather it applies only to the x-y-coordinate system (standardised error vs.
+precision). A spread in doses or ages must be drawn as lines originating at
+zero precision (x0) and zero standardised estimate (y0). Such a range may be
+drawn by adding lines to the radial plot ( \code{line}, \code{line.col},
+\code{line.label}, cf. examples).\cr\cr
+
+A statistic summary, i.e. a collection of statistic measures of
+centrality and dispersion (and further measures) can be added by specifying
+one or more of the following keywords: \code{"n"} (number of samples),
+\code{"mean"} (mean De value), \code{"mean.weighted"} (error-weighted mean),
+\code{"median"} (median of the De values), \code{"sdrel"} (relative standard
+deviation in percent), \code{"sdrel.weighted"} (error-weighted relative
+standard deviation in percent), \code{"sdabs"} (absolute standard deviation),
+\code{"sdabs.weighted"} (error-weighted absolute standard deviation),
+\code{"serel"} (relative standard error), \code{"serel.weighted"} (
+error-weighted relative standard error), \code{"seabs"} (absolute standard
+error), \code{"seabs.weighted"} (error-weighted absolute standard error),
+\code{"in.2s"} (percent of samples in 2-sigma range),
+\code{"kurtosis"} (kurtosis) and \code{"skewness"} (skewness).
+}
+\section{Function version}{
+ 0.5.3 (2016-05-19 23:47:38)
+}
+\examples{
+
+## load example data
+data(ExampleData.DeValues, envir = environment())
+ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+
+## plot the example data straightforward
+plot_RadialPlot(data = ExampleData.DeValues)
+
+## now with linear z-scale
+plot_RadialPlot(data = ExampleData.DeValues,
+                log.z = FALSE)
+
+## now with output of the plot parameters
+plot1 <- plot_RadialPlot(data = ExampleData.DeValues,
+                         log.z = FALSE,
+                         output = TRUE)
+plot1
+plot1$zlim
+
+## now with adjusted z-scale limits
+plot_RadialPlot(data = ExampleData.DeValues,
+               log.z = FALSE,
+               zlim = c(100, 200))
+
+## now the two plots with serious but seasonally changing fun
+#plot_RadialPlot(data = data.3, fun = TRUE)
+
+## now with user-defined central value, in log-scale again
+plot_RadialPlot(data = ExampleData.DeValues,
+                central.value = 150)
+
+## now with a rug, indicating individual De values at the z-scale
+plot_RadialPlot(data = ExampleData.DeValues,
+                rug = TRUE)
+
+## now with legend, colour, different points and smaller scale
+plot_RadialPlot(data = ExampleData.DeValues,
+                legend.text = "Sample 1",
+                col = "tomato4",
+                bar.col = "peachpuff",
+                pch = "R",
+                cex = 0.8)
+
+## now without 2-sigma bar, y-axis, grid lines and central value line
+plot_RadialPlot(data = ExampleData.DeValues,
+                bar.col = "none",
+                grid.col = "none",
+                y.ticks = FALSE,
+                lwd = 0)
+
+## now with user-defined axes labels
+plot_RadialPlot(data = ExampleData.DeValues,
+                xlab = c("Data error (\%)",
+                         "Data precision"),
+                ylab = "Scatter",
+                zlab = "Equivalent dose [Gy]")
+
+## now with minimum, maximum and median value indicated
+plot_RadialPlot(data = ExampleData.DeValues,
+                central.value = 150,
+                stats = c("min", "max", "median"))
+
+## now with a brief statistical summary
+plot_RadialPlot(data = ExampleData.DeValues,
+                summary = c("n", "in.2s"))
+
+## now with another statistical summary as subheader
+plot_RadialPlot(data = ExampleData.DeValues,
+                summary = c("mean.weighted", "median"),
+                summary.pos = "sub")
+
+## now the data set is split into sub-groups, one is manipulated
+data.1 <- ExampleData.DeValues[1:15,]
+data.2 <- ExampleData.DeValues[16:25,] * 1.3
+
+## now a common dataset is created from the two subgroups
+data.3 <- list(data.1, data.2)
+
+## now the two data sets are plotted in one plot
+plot_RadialPlot(data = data.3)
+
+## now with some graphical modification
+plot_RadialPlot(data = data.3,
+                col = c("darkblue", "darkgreen"),
+                bar.col = c("lightblue", "lightgreen"),
+                pch = c(2, 6),
+                summary = c("n", "in.2s"),
+                summary.pos = "sub",
+                legend = c("Sample 1", "Sample 2"))
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Based on a rewritten
+S script of Rex Galbraith, 2010
+\cr R Luminescence Package Team}
+\references{
+Galbraith, R.F., 1988. Graphical Display of Estimates Having
+Differing Standard Errors. Technometrics, 30 (3), 271-281.
+
+Galbraith, R.F., 1990. The radial plot: Graphical assessment of spread in
+ages. International Journal of Radiation Applications and Instrumentation.
+Part D. Nuclear Tracks and Radiation Measurements, 17 (3), 207-214.
+
+Galbraith, R. & Green, P., 1990. Estimating the component ages in a finite
+mixture. International Journal of Radiation Applications and
+Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3)
+197-206.
+
+Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
+track ages. Nuclear Tracks And Radiation Measurements, 21 (4), 459-470.
+
+Galbraith, R.F., 1994. Some Applications of Radial Plots. Journal of the
+American Statistical Association, 89 (428), 1232-1242.
+
+Galbraith, R.F., 2010. On plotting OSL equivalent doses. Ancient TL, 28 (1),
+1-10.
+
+Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent
+dose and error calculation and display in OSL dating: An overview and some
+recommendations. Quaternary Geochronology, 11, 1-27.
+}
+\seealso{
+\code{\link{plot}}, \code{\link{plot_KDE}},
+\code{\link{plot_Histogram}}
+}
+
diff --git a/man/plot_Risoe.BINfileData.Rd b/man/plot_Risoe.BINfileData.Rd
new file mode 100644
index 0000000..8468ba0
--- /dev/null
+++ b/man/plot_Risoe.BINfileData.Rd
@@ -0,0 +1,116 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_Risoe.BINfileData.R
+\name{plot_Risoe.BINfileData}
+\alias{plot_Risoe.BINfileData}
+\title{Plot single luminescence curves from a BIN file object}
+\usage{
+plot_Risoe.BINfileData(BINfileData, position, run, set, sorter = "POSITION",
+  ltype = c("IRSL", "OSL", "TL", "RIR", "RBR", "RL"), curve.transformation,
+  dose_rate, temp.lab, cex.global = 1, ...)
+}
+\arguments{
+\item{BINfileData}{\link{Risoe.BINfileData-class} (\bold{required}):
+requires an S4 object returned by the \link{read_BIN2R} function.}
+
+\item{position}{\link{vector} (optional): option to limit the plotted curves
+by position (e.g. \code{position = 1}, \code{position = c(1,3,5)}).}
+
+\item{run}{\link{vector} (optional): option to limit the plotted curves by
+run (e.g., \code{run = 1}, \code{run = c(1,3,5)}).}
+
+\item{set}{\link{vector} (optional): option to limit the plotted curves by
+set (e.g., \code{set = 1}, \code{set = c(1,3,5)}).}
+
+\item{sorter}{\link{character} (with default): the plot output can be
+ordered by "POSITION","SET" or "RUN". POSITION, SET and RUN are options
+defined in the Risoe Sequence Editor.}
+
+\item{ltype}{\link{character} (with default): option to limit the plotted
+curves by the type of luminescence stimulation.  Allowed values:
+\code{"IRSL"}, \code{"OSL"},\code{"TL"}, \code{"RIR"}, \code{"RBR"}
+(corresponds to LM-OSL), \code{"RL"}.  All type of curves are plotted by
+default.}
+
+\item{curve.transformation}{\link{character} (optional): allows transforming
+CW-OSL and CW-IRSL curves to pseudo-LM curves via transformation functions.
+Allowed values are: \code{CW2pLM}, \code{CW2pLMi}, \code{CW2pHMi} and
+\code{CW2pPMi}. See details.}
+
+\item{dose_rate}{\link{numeric} (optional): dose rate of the irradition
+source at the measurement date. If set, the given irradiation dose will be
+shown in Gy.  See details.}
+
+\item{temp.lab}{\link{character} (optional): option to allow for different
+temperature units. If no value is set deg. C is chosen.}
+
+\item{cex.global}{\link{numeric} (with default): global scaling factor.}
+
+\item{\dots}{further undocumented plot arguments.}
+}
+\value{
+Returns a plot.
+}
+\description{
+Plots single luminescence curves from an object returned by the
+\link{read_BIN2R} function.
+}
+\details{
+\bold{Nomenclature}\cr
+
+See \code{\link{Risoe.BINfileData-class}}
+
+\bold{curve.transformation}\cr
+
+This argument allows transforming continuous wave (CW) curves to pseudo
+(linear) modulated curves. For the transformation, the functions of the
+package are used.  Currently, it is not possible to pass further arguments
+to the transformation functions. The argument works only for \code{ltype}
+\code{OSL} and \code{IRSL}.\cr
+
+\bold{Irradiation time}\cr
+
+Plotting the irradiation time (s) or the given dose (Gy) requires that the
+variable \code{IRR_TIME} has been set within the BIN-file.  This is normally
+done by using the 'Run Info' option within the Sequence Editor or by editing
+in R.
+}
+\note{
+The function has been successfully tested for the Sequence Editor file
+output version 3 and 4.
+}
+\section{Function version}{
+ 0.4.1 (2015-11-29 17:27:48)
+}
+\examples{
+
+
+##load data
+data(ExampleData.BINfileData, envir = environment())
+
+##plot all curves from the first position to the desktop
+#pdf(file = "~/Desktop/CurveOutput.pdf", paper = "a4", height = 11, onefile = TRUE)
+
+##example - load from *.bin file
+#BINfile<- file.choose()
+#BINfileData<-read_BIN2R(BINfile)
+
+#par(mfrow = c(4,3), oma = c(0.5,1,0.5,1))
+#plot_Risoe.BINfileData(CWOSL.SAR.Data,position = 1)
+#mtext(side = 4, BINfile, outer = TRUE, col = "blue", cex = .7)
+#dev.off()
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France),\cr Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
+\references{
+Duller, G., 2007. Analyst. pp. 1-45.
+}
+\seealso{
+\code{\link{Risoe.BINfileData-class}},\code{\link{read_BIN2R}},
+\code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pPMi}},
+\code{\link{CW2pHMi}}
+}
+\keyword{dplot}
+
diff --git a/man/plot_ViolinPlot.Rd b/man/plot_ViolinPlot.Rd
new file mode 100644
index 0000000..b9399ca
--- /dev/null
+++ b/man/plot_ViolinPlot.Rd
@@ -0,0 +1,90 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plot_ViolinPlot.R
+\name{plot_ViolinPlot}
+\alias{plot_ViolinPlot}
+\title{Create a violin plot}
+\usage{
+plot_ViolinPlot(data, boxplot = TRUE, rug = TRUE, summary = NULL,
+  summary.pos = "sub", na.rm = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\link{numeric}} or \code{\linkS4class{RLum.Results}}
+object (required): input data for plotting. Alternatively a \code{\link{data.frame}} or
+a \code{\link{matrix}} can be provided, but only the first column will be considered by the
+function}
+
+\item{boxplot}{\code{\link{logical}} (with default): enable or disable boxplot}
+
+\item{rug}{\code{\link{logical}} (with default): enable or disable rug}
+
+\item{summary}{\code{\link{character}} (optional): add statistic measures of
+centrality and dispersion to the plot. Can be one or more of several
+keywords. See details for available keywords.}
+
+\item{summary.pos}{\code{\link{numeric}} or \code{\link{character}} (with
+default): optional position keywords (cf., \code{\link{legend}})
+for the statistical summary. Alternatively, the keyword \code{"sub"} may be
+specified to place the summary below the plot header. However, this latter
+option in only possible if \code{mtext} is not used.}
+
+\item{na.rm}{\code{\link{logical}} (with default): exclude NA values
+from the data set prior to any further operations.}
+
+\item{\dots}{further arguments and graphical parameters passed to
+\code{\link{plot.default}}, \code{\link[stats]{density}} and \code{\link{boxplot}}. See details for
+further information}
+}
+\description{
+Draws a kernal densiy plot in combination with a boxplot in its middle. The shape of the violin
+is constructed using a mirrored density curve. This plot is especially designed for cases
+where the individual errors are zero or to small to be visualised. The idea for this plot is
+based on the the 'volcano plot' in the ggplot2 package by Hadely Wickham and Winston Chang.
+The general idea for the Violin Plot seems to be introduced by Hintze and Nelson (1998).
+}
+\details{
+The function is passing several arguments to the function \code{\link{plot}},
+\code{\link[stats]{density}}, \code{\link[graphics]{boxplot}}:
+Supported arguments are: \code{xlim}, \code{main}, \code{xlab},
+\code{ylab}, \code{col.violin}, \code{col.boxplot}, \code{mtext}, \code{cex}, \code{mtext}
+
+\bold{\code{Valid summary keywords}}\cr
+
+'n', 'mean', 'median', 'sd.abs', 'sd.rel', 'se.abs', 'se.rel', 'skewness', 'kurtosis'
+}
+\note{
+Although the code for this function was developed independently and just the idea for the plot
+was based on the 'ggplot2' package plot type 'volcano', it should be mentioned that, beyond this,
+two other R packages exist providing a possibility to produces this kind of plot, namely:
+'vioplot' and 'violinmplot' (see References for details).
+}
+\section{Function version}{
+ 0.1.2 (2016-05-17 13:27:04)
+}
+\examples{
+## read example data set
+data(ExampleData.DeValues, envir = environment())
+ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019))
+
+## create plot straightforward
+plot_ViolinPlot(data = ExampleData.DeValues)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+Daniel Adler (2005). vioplot: A violin plot is a combination of a box plot and a kernel density plot.
+R package version 0.2 http://CRAN.R-project.org/package=violplot
+
+Hintze, J.L., Nelson, R.D., 1998. A Box Plot-Density Trace Synergism. The American Statistician 52, 181-184.
+
+Raphael W. Majeed (2012). violinmplot: Combination of violin plot with mean and standard deviation.
+R package version 0.2.1. http://CRAN.R-project.org/package=violinmplot
+
+Wickham. H (2009). ggplot2: elegant graphics for data analysis. Springer New York.
+}
+\seealso{
+\code{\link[stats]{density}}, \code{\link{plot}}, \code{\link{boxplot}}, \code{\link{rug}},
+\code{\link{calc_Statistics}}
+}
+
diff --git a/man/read_BIN2R.Rd b/man/read_BIN2R.Rd
new file mode 100644
index 0000000..c37a535
--- /dev/null
+++ b/man/read_BIN2R.Rd
@@ -0,0 +1,118 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/read_BIN2R.R
+\name{read_BIN2R}
+\alias{read_BIN2R}
+\title{Import Risoe BIN-file into R}
+\usage{
+read_BIN2R(file, show.raw.values = FALSE, position = NULL,
+  n.records = NULL, zero_data.rm = TRUE, duplicated.rm = FALSE,
+  fastForward = FALSE, show.record.number = FALSE, txtProgressBar = TRUE,
+  forced.VersionNumber = NULL, pattern = NULL, verbose = TRUE, ...)
+}
+\arguments{
+\item{file}{\code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
+BIN/BINX file. If input is a \code{list} it should comprise only \code{character}s representing
+each valid path and BIN/BINX-file names.
+Alternatively the input character can be just a directory (path), in this case the
+the function tries to detect and import all BIN/BINX files found in the directory.}
+
+\item{show.raw.values}{\link{logical} (with default): shows raw values from
+BIN file for \code{LTYPE}, \code{DTYPE} and \code{LIGHTSOURCE} without
+translation in characters. Can be provided as \code{list} if \code{file} is a \code{list}.}
+
+\item{position}{\code{\link{numeric}} (optional): imports only the selected position. Note:
+the import performance will not benefit by any selection made here.
+Can be provided as \code{list} if \code{file} is a \code{list}.}
+
+\item{n.records}{\link{raw} (optional): limits the number of imported
+records. Can be used in combination with \code{show.record.number} for
+debugging purposes, e.g. corrupt BIN-files. Can be provided as \code{list} if \code{file} is a \code{list}.}
+
+\item{zero_data.rm}{\code{\link{logical}} (with default): remove erroneous data with no count
+values. As such data are usally not needed for the subsequent data analysis they will be removed
+by default. Can be provided as \code{list} if \code{file} is a \code{list}.}
+
+\item{duplicated.rm}{\code{\link{logical}} (with default): remove duplicated entries if \code{TRUE}.
+This may happen due to an erroneous produced BIN/BINX-file. This option compares only
+predeccessor and successor. Can be provided as \code{list} if \code{file} is a \code{list}.}
+
+\item{fastForward}{\code{\link{logical}} (with default): if \code{TRUE} for a
+more efficient data processing only a list of \code{RLum.Analysis} objects is returned instead
+of a \link{Risoe.BINfileData-class} object. Can be provided as \code{list} if \code{file} is a \code{list}.}
+
+\item{show.record.number}{\link{logical} (with default): shows record number
+of the imported record, for debugging usage only. Can be provided as \code{list} if \code{file} is a \code{list}.}
+
+\item{txtProgressBar}{\link{logical} (with default): enables or disables
+\code{\link{txtProgressBar}}.}
+
+\item{forced.VersionNumber}{\code{\link{integer}} (optional): allows to cheat the
+version number check in the function by own values for cases where the
+BIN-file version is not supported. Can be provided as \code{list} if \code{file} is a \code{list}.\cr
+Note: The usage is at own risk, only supported BIN-file versions have been tested.}
+
+\item{pattern}{\code{\link{character}} (optional): argument that is used if only a path is provided.
+The argument will than be passed to the function \code{\link{list.files}} used internally to
+construct a \code{list} of wanted files}
+
+\item{verbose}{\code{\link{logical}} (with default): enables or disables verbose mode}
+
+\item{\dots}{further arguments that will be passed to the function
+\code{\link{Risoe.BINfileData2RLum.Analysis}}. Please note that any matching argument
+automatically sets \code{fastForward = TRUE}}
+}
+\value{
+Returns an S4 \link{Risoe.BINfileData-class} object containing two
+slots:\cr \item{METADATA}{A \link{data.frame} containing all variables
+stored in the bin-file.} \item{DATA}{A \link{list} containing a numeric
+\link{vector} of the measured data. The ID corresponds to the record ID in
+METADATA.}\cr
+
+If \code{fastForward = TRUE} a list of \code{\linkS4class{RLum.Analysis}} object is returned. The
+internal coercing is done using the function \code{\link{Risoe.BINfileData2RLum.Analysis}}
+}
+\description{
+Import a *.bin or a *.binx file produced by a Risoe DA15 and DA20 TL/OSL
+reader into R.
+}
+\details{
+The binary data file is parsed byte by byte following the data structure
+published in the Appendices of the Analyst manual p. 42.\cr\cr For the
+general BIN-file structure, the reader is referred to the Risoe website:
+\code{http://www.nutech.dtu.dk/}
+}
+\note{
+The function works for BIN/BINX-format versions 03, 04, 06, 07 and 08. The
+version number depends on the used Sequence Editor.\cr\cr
+
+\bold{ROI data sets introduced with BIN-file version 8 are not supported and skipped durint
+import.}
+}
+\section{Function version}{
+ 0.15.0 (2016-06-13 21:17:19)
+}
+\examples{
+
+
+##(1) import Risoe BIN-file to R (uncomment for usage)
+
+#FILE <- file.choose()
+#temp <- read_BIN2R(FILE)
+#temp
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Margret C. Fuchs, HZDR Freiberg, (Germany)
+\cr R Luminescence Package Team}
+\references{
+DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
+\url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+}
+\seealso{
+\code{\link{write_R2BIN}}, \code{\linkS4class{Risoe.BINfileData}},
+\code{\link[base]{readBin}}, \code{\link{merge_Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}}
+\code{\link[utils]{txtProgressBar}}, \code{\link{list.files}}
+}
+\keyword{IO}
+
diff --git a/man/read_Daybreak2R.Rd b/man/read_Daybreak2R.Rd
new file mode 100644
index 0000000..14f0739
--- /dev/null
+++ b/man/read_Daybreak2R.Rd
@@ -0,0 +1,48 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/read_Daybreak2R.R
+\name{read_Daybreak2R}
+\alias{read_Daybreak2R}
+\title{Import Daybreak ASCII dato into R}
+\usage{
+read_Daybreak2R(file, verbose = TRUE, txtProgressBar = TRUE)
+}
+\arguments{
+\item{file}{\code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
+file to be imported. Alternatively a list of file names can be provided or just the path a folder
+containing measurement data. Please note that the specific, common, file extension (txt) is likely
+leading to function failures during import when just a path is provided.}
+
+\item{verbose}{\code{\link{logical}} (with default): enables or disables terminal feedback}
+
+\item{txtProgressBar}{\code{\link{logical}} (with default): enables or disables
+\code{\link{txtProgressBar}}.}
+}
+\value{
+A list of \code{\linkS4class{RLum.Analysis}} objects (each per position) is provided.
+}
+\description{
+Import a *.txt (ASCII) file produced by a Daybreak reader into R.
+}
+\note{
+\bold{[BETA VERSION]} This function version still needs to be properly tested.
+}
+\section{Function version}{
+ 0.2.1 (2016-05-02 09:36:06)
+}
+\examples{
+
+## This function has no example yet.
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)\cr Based on a suggestion by Willian Amidon and Andrew Louis Gorin.
+\cr R Luminescence Package Team}
+\references{
+-
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}}
+}
+\keyword{IO}
+
diff --git a/man/read_SPE2R.Rd b/man/read_SPE2R.Rd
new file mode 100644
index 0000000..58f5024
--- /dev/null
+++ b/man/read_SPE2R.Rd
@@ -0,0 +1,116 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/read_SPE2R.R
+\name{read_SPE2R}
+\alias{read_SPE2R}
+\title{Import Princeton Intruments (TM) SPE-file into R}
+\usage{
+read_SPE2R(file, output.object = "RLum.Data.Image", frame.range,
+  txtProgressBar = TRUE)
+}
+\arguments{
+\item{file}{\link{character} (\bold{required}): spe-file name (including
+path), e.g. \cr [WIN]: \code{read_SPE2R("C:/Desktop/test.spe")}, \cr
+[MAC/LINUX]: \code{readSPER("/User/test/Desktop/test.spe")}}
+
+\item{output.object}{\code{\link{character}} (with default): set \code{RLum}
+output object.  Allowed types are \code{"RLum.Data.Spectrum"},
+\code{"RLum.Data.Image"} or \code{"matrix"}}
+
+\item{frame.range}{\code{\link{vector}} (optional): limit frame range, e.g.
+select first 100 frames by \code{frame.range = c(1,100)}}
+
+\item{txtProgressBar}{\link{logical} (with default): enables or disables
+\code{\link{txtProgressBar}}.}
+}
+\value{
+Depending on the chosen option the functions returns three different
+type of objects:\cr
+
+\code{output.object}. \cr
+
+\code{RLum.Data.Spectrum}\cr
+
+An object of type \code{\linkS4class{RLum.Data.Spectrum}} is returned.  Row
+sums are used to integrate all counts over one channel.
+
+\code{RLum.Data.Image}\cr
+
+An object of type \code{\linkS4class{RLum.Data.Image}} is returned.  Due to
+performace reasons the import is aborted for files containing more than 100
+frames. This limitation can be overwritten manually by using the argument
+\code{frame.frange}.
+
+\code{matrix}\cr
+
+Returns a matrix of the form: Rows = Channels, columns = Frames. For the
+transformation the function \code{\link{get_RLum}} is used,
+meaning that the same results can be obtained by using the function
+\code{\link{get_RLum}} on an \code{RLum.Data.Spectrum} or \code{RLum.Data.Image} object.
+}
+\description{
+Function imports Princeton Instruments (TM) SPE-files into R environment and
+provides \code{RLum} objects as output.
+}
+\details{
+Function provides an import routine for the Princton Instruments SPE format.
+Import functionality is based on the file format description provided by
+Princton Instruments and a MatLab script written by Carl Hall (s.
+references).
+}
+\note{
+\bold{The function does not test whether the input data are spectra or
+pictures for spatial resolved analysis!}\cr
+
+The function has been successfully tested for SPE format versions 2.x.
+
+\emph{Currently not all information provided by the SPE format are
+supported.}
+}
+\section{Function version}{
+ 0.1.0 (2016-05-02 09:42:32)
+}
+\examples{
+
+
+## to run examples uncomment lines and run the code
+
+##(1) Import data as RLum.Data.Spectrum object
+#file <- file.choose()
+#temp <- read_SPE2R(file)
+#temp
+
+##(2) Import data as RLum.Data.Image object
+#file <- file.choose()
+#temp <- read_SPE2R(file, output.object = "RLum.Data.Image")
+#temp
+
+##(3) Import data as matrix object
+#file <- file.choose()
+#temp <- read_SPE2R(file, output.object = "matrix")
+#temp
+
+##(4) Export raw data to csv, if temp is a RLum.Data.Spectrum object
+# write.table(x = get_RLum(temp),
+#             file = "[your path and filename]",
+#             sep = ";", row.names = FALSE)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Princeton Instruments, 2014. Princeton Instruments SPE 3.0 File
+Format Specification, Version 1.A,
+\url{ftp://ftp.princetoninstruments.com/Public/Manuals/Princeton\%20Instruments/SPE\%203.0\%20File\%20Format\%20Specification.pdf}
+
+Hall, C., 2012: readSPE.m.
+\url{http://www.mathworks.com/matlabcentral/fileexchange/35940-readspe/content/readSPE.m}
+}
+\seealso{
+\code{\link{readBin}}, \code{\linkS4class{RLum.Data.Spectrum}},
+\code{\link[raster]{raster}}
+}
+\keyword{IO}
+
diff --git a/man/read_XSYG2R.Rd b/man/read_XSYG2R.Rd
new file mode 100644
index 0000000..1fbc6ee
--- /dev/null
+++ b/man/read_XSYG2R.Rd
@@ -0,0 +1,159 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/read_XSYG2R.R
+\name{read_XSYG2R}
+\alias{read_XSYG2R}
+\title{Import XSYG files to R}
+\usage{
+read_XSYG2R(file, recalculate.TL.curves = TRUE, fastForward = FALSE,
+  import = TRUE, pattern = ".xsyg", txtProgressBar = TRUE)
+}
+\arguments{
+\item{file}{\code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
+XSYG file. If input is a \code{list} it should comprise only \code{character}s representing each valid
+path and xsyg-file names. Alternatively the input character can be just a directory (path), in this case the
+the function tries to detect and import all xsyg files found in the directory.}
+
+\item{recalculate.TL.curves}{\link{logical} (with default): if set to
+\code{TRUE}, TL curves are returned as temperature against count values (see
+details for more information) Note: The option overwrites the time vs. count
+TL curve. Select \code{FALSE} to import the raw data delivered by the
+lexsyg. Works for TL curves and spectra.}
+
+\item{fastForward}{\code{\link{logical}} (with default): if \code{TRUE} for a
+more efficient data processing only a list of \code{RLum.Analysis} objects is returned.}
+
+\item{import}{\code{\link{logical}} (with default): if set to \code{FALSE}, only
+the XSYG file structure is shown.}
+
+\item{pattern}{\code{\link{regex}} (with default): optional regular expression if \code{file} is
+a link to a folder, to select just specific XSYG-files}
+
+\item{txtProgressBar}{\link{logical} (with default): enables \code{TRUE} or
+disables \code{FALSE} the progression bar during import}
+}
+\value{
+\bold{Using the option \code{import = FALSE}}\cr\cr A list
+consisting of two elements is shown: \item{Sample}{\link{data.frame} with
+information on file.} \item{Sequences}{\link{data.frame} with information on
+the sequences stored in the XSYG file}.\cr\cr \bold{Using the option
+\code{import = TRUE} (default)} \cr\cr A list is provided, the list elements
+contain: \item{Sequence.Header}{\link{data.frame} with information on the
+sequence.} \item{Sequence.Object}{\code{\linkS4class{RLum.Analysis}}
+containing the curves.}
+}
+\description{
+Imports XSYG files produced by a Freiberg Instrument lexsyg reader into R.
+}
+\details{
+\bold{How does the import function work?}\cr\cr The function uses the
+\code{\link{xml}} package to parse the file structure. Each sequence is
+subsequently translated into an \code{\linkS4class{RLum.Analysis}}
+object.\cr\cr
+
+\bold{General structure XSYG format}\cr\cr \code{<?xml?}\cr \code{
+<Sample>}\cr \code{ <Sequence>}\cr \code{ <Record>}\cr \code{ <Curve
+name="first curve" />}\cr \code{ <Curve name="curve with data">}\cr \code{
+x0 , y0 ; x1 , y1 ; x2 , y2 ; x3 , y3}\cr \code{ </Curve>}\cr \code{
+</Record>}\cr \code{ </Sequence>}\cr \code{ </Sample>}\cr\cr So far, each
+XSYG file can only contain one \code{<Sample></Sample>}, but multiple
+sequences. \cr\cr Each record may comprise several curves.\cr\cr
+
+\bold{TL curve recalculation}\cr
+
+On the FI lexsyg device TL curves are recorded as time against count values.
+Temperature values are monitored on the heating plate and stored in a
+separate curve (time vs. temperature). If the option
+\code{recalculate.TL.curves = TRUE} is chosen, the time values for each TL
+curve are replaced by temperature values.\cr
+
+Practically, this means combining two matrices (Time vs. Counts and Time vs.
+Temperature) with different row numbers by their time values. Three cases
+are considered:
+
+HE: Heating element\cr PMT: Photomultiplier tube\cr Interpolation is done
+using the function \code{\link{approx}}\cr
+
+CASE (1): \code{nrow(matrix(PMT))} > \code{nrow(matrix(HE))} \cr
+
+Missing temperature values from the heating element are calculated using
+time values from the PMT measurement.\cr
+
+CASE (2): \code{nrow(matrix(PMT))} < \code{nrow(matrix(HE))} \cr
+
+Missing count values from the PMT are calculated using time values from the
+heating element measurement.\cr
+
+CASE (3): \code{nrow(matrix(PMT))} == \code{nrow(matrix(HE))} \cr
+
+A new matrix is produced using temperature values from the heating element
+and count values from the PMT. \cr
+
+\emph{Note: Please note that due to the recalculation of the temperature
+values based on values delivered by the heating element, it may happen that
+mutiple count values exists for each temperature value and temperature
+values may also decrease during heating, not only increase. }\cr
+
+\bold{Advanced file import}\cr
+
+To allow for a more efficient usage of the function, instead of single path to a file just
+a directory can be passed as input. In this particular case the function tries to extract
+all XSYG-files found in the directory and import them all. Using this option internally the function
+constructs as list of the XSYG-files found in the directory. Please note no recursive detection
+is supported as this may lead to endless loops.
+}
+\note{
+This function is a beta version as the XSYG file format is not yet
+fully specified. Thus, further file operations (merge, export, write) should
+be done using the functions provided with the package \code{\link{xml}}.\cr
+
+\bold{So far, no image data import is provided!}\cr Corresponding values in
+the XSXG file are skipped.
+}
+\section{Function version}{
+ 0.5.7 (2016-09-05 20:21:40)
+}
+\examples{
+
+
+##(1) import XSYG file to R (uncomment for usage)
+
+#FILE <- file.choose()
+#temp <- read_XSYG2R(FILE)
+
+##(2) additional examples for pure XML import using the package XML
+##    (uncomment for usage)
+
+  ##import entire XML file
+  #FILE <- file.choose()
+  #temp <- XML::xmlRoot(XML::xmlTreeParse(FILE))
+
+  ##search for specific subnodes with curves containing 'OSL'
+  #getNodeSet(temp, "//Sample/Sequence/Record[@recordType = 'OSL']/Curve")
+
+##(2) How to extract single curves ... after import
+data(ExampleData.XSYG, envir = environment())
+
+##grep one OSL curves and plot the first curve
+OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]]
+
+##(3) How to see the structure of an object?
+structure_RLum(OSL.SARMeasurement$Sequence.Object)
+
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+Grehl, S., Kreutzer, S., Hoehne, M., 2013. Documentation of the
+XSYG file format. Unpublished Technical Note. Freiberg, Germany \cr\cr
+
+\bold{Further reading} \cr\cr XML: \url{http://en.wikipedia.org/wiki/XML}
+}
+\seealso{
+\code{\link{xml}}, \code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Data.Curve}}, \code{\link{approx}}
+}
+\keyword{IO}
+
diff --git a/man/replicate_RLum.Rd b/man/replicate_RLum.Rd
new file mode 100644
index 0000000..4c68459
--- /dev/null
+++ b/man/replicate_RLum.Rd
@@ -0,0 +1,32 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/replicate_RLum.R
+\name{replicate_RLum}
+\alias{replicate_RLum}
+\title{General replication function for RLum S4 class objects}
+\usage{
+replicate_RLum(object, times = NULL)
+}
+\arguments{
+\item{object}{an object of class \code{\linkS4class{RLum}} (\bold{required})}
+
+\item{times}{\code{\link{integer}} (optional): number for times each element is repeated
+element}
+}
+\value{
+Returns a \code{\link{list}} of the object to be repeated
+}
+\description{
+Function replicates RLum S4 class objects and returns a list for this objects
+}
+\section{Function version}{
+ 0.1.0 (2015-11-29 17:27:48)
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum}},
+}
+\keyword{utilities}
+
diff --git a/man/report_RLum.Rd b/man/report_RLum.Rd
new file mode 100644
index 0000000..542f5ee
--- /dev/null
+++ b/man/report_RLum.Rd
@@ -0,0 +1,188 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/report_RLum.R
+\name{report_RLum}
+\alias{report_RLum}
+\title{Create a HTML report for (RLum) objects}
+\usage{
+report_RLum(object, file = tempfile(), title = "RLum.Report",
+  compact = TRUE, timestamp = TRUE, launch.browser = FALSE,
+  css.file = NULL, quiet = TRUE, clean = TRUE, ...)
+}
+\arguments{
+\item{object}{(\bold{required}): 
+The object to be reported on, preferably of any \code{RLum}-class.}
+
+\item{file}{\code{\link{character}} (with default): 
+A character string naming the output file. If no filename is provided a 
+temporary file is created.}
+
+\item{title}{\code{\link{character}} (with default):
+A character string specifying the title of the document.}
+
+\item{compact}{\code{\link{logical}} (with default):
+When \code{TRUE} the following report components are hidden: 
+\code{@.pid}, \code{@.uid}, \code{'Object structure'}, \code{'Session Info'}
+and only the first and last 5 rows of long matrices and data frames are shown.
+See details.}
+
+\item{timestamp}{\code{\link{logical}} (with default):
+\code{TRUE} to add a timestamp to the filename (suffix).}
+
+\item{launch.browser}{\code{\link{logical}} (with default):
+\code{TRUE} to open the HTML file in the system's default web browser after
+it has been rendered.}
+
+\item{css.file}{\code{\link{character}} (optional):
+Path to a CSS file to change the default styling of the HTML document.}
+
+\item{quiet}{\code{\link{logical}} (with default):
+\code{TRUE} to supress printing of the pandoc command line.}
+
+\item{clean}{\code{\link{logical}} (with default): 
+\code{TRUE} to clean intermediate files created during rendering.}
+
+\item{...}{further arguments passed to or from other methods and to control
+the document's structure (see details).}
+}
+\value{
+Writes a HTML and .Rds file.
+}
+\description{
+This function creates a HTML report for a given object, listing its complete
+structure and content. The object itself is saved as a serialised .Rds file.
+The report file serves both as a convenient way of browsing through objects with 
+complex data structures as well as a mean of properly documenting and saving
+objects.
+}
+\details{
+The HTML report is created with \code{\link[rmarkdown]{render}} and has the
+following structure:
+
+\tabular{ll}{
+ \bold{Section} \tab \bold{Description} \cr
+ \code{Header} \tab A summary of general characteristics of the object \cr
+ \code{Object content} \tab A comprehensive list of the complete structure
+ and content of the provided object. \cr
+ \code{Object structure} \tab Summary of the objects structure given as a table \cr
+ \code{File} \tab Information on the saved RDS file \cr
+ \code{Session Info} \tab Captured output from sessionInfo() \cr
+ \code{Plots} \tab (optional) For \code{RLum-class} objects a variable number of plots \cr
+}
+
+The structure of the report can be controlled individually by providing one or more of the
+following arguments (all \code{logical}):
+
+\tabular{ll}{
+\bold{Argument} \tab \bold{Description} \cr
+\code{header} \tab Hide or show general information on the object \cr
+\code{main} \tab Hide or show the object's content \cr
+\code{structure} \tab Hide or show object's structure \cr
+\code{rds} \tab Hide or show information on the saved RDS file \cr
+\code{session} \tab Hide or show the session info \cr
+\code{plot} \tab Hide or show the plots (depending on object) \cr
+}
+
+Note that these arguments have higher precedence than \code{compact}.
+
+Further options that can be provided via the \code{...} argument:
+
+\tabular{ll}{
+\bold{Argument} \tab \bold{Description} \cr
+\code{short_table} \tab If \code{TRUE} only show the first and last 5 rows of lang tables. \cr
+\code{theme} \tab Specifies the Bootstrap
+theme to use for the report. Valid themes include "default", "cerulean", "journal", "flatly", 
+"readable", "spacelab", "united", "cosmo", "lumen", "paper", "sandstone", "simplex", and "yeti". \cr
+\code{highlight} \tab Specifies the syntax highlighting
+ style. Supported styles include "default", "tango", "pygments", "kate", "monochrome", 
+ "espresso", "zenburn", "haddock", and "textmate". \cr
+\code{css} \tab \code{TRUE} or \code{FALSE} to enable/disable custom CSS styling \cr
+}
+
+The following arguments can be used to customise the report via CSS (Cascading Style Sheets):
+
+\tabular{ll}{
+\bold{Argument} \tab \bold{Description} \cr
+\code{font_family} \tab Define the font family of the HTML document (default: arial) \cr
+\code{headings_size} \tab Size of the <h1> to <h6> tags used to define HTML headings (default: 166\%). \cr
+\code{content_color} \tab Color of the object's content (default: #a72925). \cr
+}
+
+Note that these arguments must all be of class \code{\link{character}} and follow standard CSS syntax.
+For exhaustive CSS styling you can provide a custom CSS file for argument \code{css.file}. 
+CSS styling can be turned of using \code{css = FALSE}.
+}
+\note{
+This function requires the R packages 'rmarkdown', 'pander' and 'rstudioapi'.
+}
+\section{Function version}{
+ 0.1.0 (2016-09-09 10:32:17)
+}
+\examples{
+
+\dontrun{
+## Example: RLum.Results ----
+
+# load example data
+data("ExampleData.DeValues")
+
+# apply the MAM-3 age model and save results
+mam <- calc_MinDose(ExampleData.DeValues$CA1, sigmab = 0.2) 
+
+# create the HTML report
+report_RLum(object = mam, file = "~/CA1_MAM.Rmd",
+            timestamp = FALSE,
+            title = "MAM-3 for sample CA1")
+
+# when creating a report the input file is automatically saved to a 
+# .Rds file (see saveRDS()).
+mam_report <- readRDS("~/CA1_MAM.Rds")
+all.equal(mam, mam_report)
+
+
+## Example: Temporary file & Viewer/Browser ----
+
+# (a)
+# Specifying a filename is not necessarily required. If no filename is provided,
+# the report is rendered in a temporary file. If you use the RStudio IDE, the
+# temporary report is shown in the interactive Viewer pane.
+report_RLum(object = mam)
+
+# (b)
+# Additionally, you can view the HTML report in your system's default web browser.
+report_RLum(object = mam, launch.browser = TRUE)
+
+
+## Example: RLum.Analysis ----
+
+data("ExampleData.RLum.Analysis")
+
+# create the HTML report (note that specifying a file
+# extension is not necessary)
+report_RLum(object = IRSAR.RF.Data, file = "~/IRSAR_RF")
+
+
+## Example: RLum.Data.Curve ----
+
+data.curve <- get_RLum(IRSAR.RF.Data)[[1]]
+
+# create the HTML report
+report_RLum(object = data.curve, file = "~/Data_Curve")
+
+## Example: Any other object ----
+x <- list(x = 1:10, 
+          y = runif(10, -5, 5), 
+          z = data.frame(a = LETTERS[1:20], b = dnorm(0:9)),
+          NA)
+
+report_RLum(object = x, file = "~/arbitray_list")
+}
+}
+\author{
+Christoph Burow, University of Cologne (Germany) \cr
+\cr R Luminescence Package Team}
+\seealso{
+\code{\link[rmarkdown]{render}}, \code{\link[pander]{pander_return}},
+\code{\link[pander]{openFileInOS}}, \code{\link[rstudioapi]{viewer}},
+\code{\link{browseURL}}
+}
+
diff --git a/man/sTeve.Rd b/man/sTeve.Rd
new file mode 100644
index 0000000..10fae34
--- /dev/null
+++ b/man/sTeve.Rd
@@ -0,0 +1,47 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/zzz.R
+\name{sTeve}
+\alias{sTeve}
+\title{sTeve - sophisticated tool for efficient data validation and evaluation}
+\usage{
+sTeve(n_frames = 10, t_animation = 2, n.tree = 7, type)
+}
+\arguments{
+\item{n_frames}{\code{\link{integer}} (with default): n frames}
+
+\item{t_animation}{\code{\link{integer}} (with default): t animation}
+
+\item{n.tree}{\code{\link{integer}} (with default): How many trees do you
+want to cut?}
+
+\item{type}{\code{\link{integer}} (optional): Make a decision: 1, 2 or 3}
+}
+\value{
+Validates your data.
+}
+\description{
+This function provides a sophisticated routine for comprehensive
+luminescence dating data analysis.
+}
+\details{
+This amazing sophisticated function validates your data seriously.
+}
+\note{
+This function should not be taken too seriously.
+}
+\examples{
+
+##no example available
+
+}
+\author{
+R Luminescence Team, 2012-2013
+}
+\references{
+#
+}
+\seealso{
+\link{plot_KDE}
+}
+\keyword{manip}
+
diff --git a/man/set_RLum.Rd b/man/set_RLum.Rd
new file mode 100644
index 0000000..1f3fd70
--- /dev/null
+++ b/man/set_RLum.Rd
@@ -0,0 +1,75 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/set_RLum.R
+\name{set_RLum}
+\alias{set_RLum}
+\title{General set function for RLum S4 class objects}
+\usage{
+set_RLum(class, originator, .uid = .create_UID(), .pid = NA_character_, ...)
+}
+\arguments{
+\item{class}{\code{\linkS4class{RLum}} (\bold{required}): name of the S4 class to
+create}
+
+\item{originator}{\code{\link{character}} (automatic): contains the name of the calling function
+(the function that produces this object); can be set manually.}
+
+\item{.uid}{\code{\link{character}} (automatic): sets an unique ID for this object
+using the internal C++ function \code{.create_UID}.}
+
+\item{.pid}{\code{\link{character}} (with default): option to provide a parent id for nesting
+at will.}
+
+\item{\dots}{further arguments that one might want to pass to the specific
+set method}
+}
+\value{
+Returns an object of the specified class.
+}
+\description{
+Function calls object-specific set functions for RLum S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the given class, the
+corresponding method to create an object from this class will be selected.
+Allowed additional arguments can be found in the documentations of the
+corresponding \code{\linkS4class{RLum}} class: \code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Image}}, \code{\linkS4class{RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Analysis}} and \code{\linkS4class{RLum.Results}}
+}
+\section{Function version}{
+ 0.3.0 (2016-05-02 09:43:47)
+}
+\examples{
+
+##produce empty objects from each class
+set_RLum(class = "RLum.Data.Curve")
+set_RLum(class = "RLum.Data.Spectrum")
+set_RLum(class = "RLum.Data.Spectrum")
+set_RLum(class = "RLum.Analysis")
+set_RLum(class = "RLum.Results")
+
+##produce a curve object with arbitrary curve values
+object <- set_RLum(
+class = "RLum.Data.Curve",
+curveType = "arbitrary",
+recordType = "OSL",
+data = matrix(c(1:100,exp(-c(1:100))),ncol = 2))
+
+##plot this curve object
+plot_RLum(object)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Image}},
+\code{\linkS4class{RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}
+}
+\keyword{utilities}
+
diff --git a/man/set_Risoe.BINfileData.Rd b/man/set_Risoe.BINfileData.Rd
new file mode 100644
index 0000000..88ac443
--- /dev/null
+++ b/man/set_Risoe.BINfileData.Rd
@@ -0,0 +1,39 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/set_Risoe.BINfileData.R
+\name{set_Risoe.BINfileData}
+\alias{set_Risoe.BINfileData}
+\title{General accessor function for RLum S4 class objects}
+\usage{
+set_Risoe.BINfileData(METADATA, DATA, .RESERVED)
+}
+\arguments{
+\item{METADATA}{x}
+
+\item{DATA}{x}
+
+\item{.RESERVED}{x}
+}
+\value{
+Return is the same as input objects as provided in the list.
+}
+\description{
+Function calls object-specific get functions for RisoeBINfileData S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{Risoe.BINfileData}} objects.\cr Depending on the input object, the
+corresponding get function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{Risoe.BINfileData}} class.
+}
+\section{Function version}{
+ 0.1 (2015-11-29 17:27:48)
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{Risoe.BINfileData}}
+}
+\keyword{utilities}
+
diff --git a/man/structure_RLum.Rd b/man/structure_RLum.Rd
new file mode 100644
index 0000000..bab1922
--- /dev/null
+++ b/man/structure_RLum.Rd
@@ -0,0 +1,52 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/structure_RLum.R
+\name{structure_RLum}
+\alias{structure_RLum}
+\title{General structure function for RLum S4 class objects}
+\usage{
+structure_RLum(object, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum}} (\bold{required}): S4 object of
+class \code{RLum}}
+
+\item{\dots}{further arguments that one might want to pass to the specific
+structure method}
+}
+\value{
+Returns a \code{data.frame} with structure of the object.
+}
+\description{
+Function calls object-specific get functions for RLum S4 class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+corresponding structure function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{RLum}} class.
+}
+\section{Function version}{
+ 0.2.0 (2016-05-02 09:36:06)
+}
+\examples{
+
+##load example data
+data(ExampleData.XSYG, envir = environment())
+
+##show structure
+structure_RLum(OSL.SARMeasurement$Sequence.Object)
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Image}},
+\code{\linkS4class{RLum.Data.Spectrum}},
+\code{\linkS4class{RLum.Analysis}},
+\code{\linkS4class{RLum.Results}}
+}
+\keyword{utilities}
+
diff --git a/man/template_DRAC.Rd b/man/template_DRAC.Rd
new file mode 100644
index 0000000..703912f
--- /dev/null
+++ b/man/template_DRAC.Rd
@@ -0,0 +1,79 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/template_DRAC.R
+\name{template_DRAC}
+\alias{template_DRAC}
+\title{Create a DRAC input data template (v1.1)}
+\usage{
+template_DRAC(nrow = 1, notification = TRUE)
+}
+\arguments{
+\item{nrow}{\code{\link{integer}} (with default): specifies the number of rows
+of the template (i.e., the number of data sets you want to submit)}
+
+\item{notification}{\code{\link{logical}} (with default): show or hide the
+notification}
+}
+\value{
+A list.
+}
+\description{
+This function returns a DRAC input template (v1.1) to be used in conjunction
+with the use_DRAC() function
+}
+\examples{
+
+# create a new DRAC input input
+input <- template_DRAC()
+
+# show content of the input
+print(input)
+print(input$`Project ID`)
+print(input[[4]])
+
+
+## Example: DRAC Quartz example
+# note that you only have to assign new values where they 
+# are different to the default values
+input$`Project ID` <- "DRAC-Example"
+input$`Sample ID` <- "Quartz"
+input$`Conversion factors` <- "AdamiecAitken1998"
+input$`ExternalU (ppm)` <- 3.4
+input$`errExternal U (ppm)` <- 0.51
+input$`External Th (ppm)` <- 14.47
+input$`errExternal Th (ppm)` <- 1.69
+input$`External K (\%)` <- 1.2
+input$`errExternal K (\%)` <- 0.14
+input$`Calculate external Rb from K conc?` <- "N"
+input$`Calculate internal Rb from K conc?` <- "N"
+input$`Scale gammadoserate at shallow depths?` <- "N"
+input$`Grain size min (microns)` <- 90
+input$`Grain size max (microns)` <- 125
+input$`Water content ((wet weight - dry weight)/dry weight) \%` <- 5
+input$`errWater content \%` <- 2
+input$`Depth (m)` <- 2.2
+input$`errDepth (m)` <- 0.22
+input$`Overburden density (g cm-3)` <- 1.8
+input$`errOverburden density (g cm-3)` <- 0.1
+input$`Latitude (decimal degrees)` <- 30.0000
+input$`Longitude (decimal degrees)` <- 70.0000
+input$`Altitude (m)` <- 150
+input$`De (Gy)` <- 20
+input$`errDe (Gy)` <- 0.2
+
+# use DRAC
+\dontrun{
+output <- use_DRAC(input)
+}
+
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+}
+\references{
+Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating.
+Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012
+}
+\seealso{
+\code{\link{as.data.frame}} \code{\link{list}}
+}
+
diff --git a/man/tune_Data.Rd b/man/tune_Data.Rd
new file mode 100644
index 0000000..e8a3f8a
--- /dev/null
+++ b/man/tune_Data.Rd
@@ -0,0 +1,61 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/tune_Data.R
+\name{tune_Data}
+\alias{tune_Data}
+\title{Tune data for experimental purpose}
+\usage{
+tune_Data(data, decrease.error = 0, increase.data = 0)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} (\bold{required}): input values,
+structure: data (\code{values[,1]}) and data error (\code{values [,2]}) are
+required}
+
+\item{decrease.error}{\code{\link{numeric}}: factor by which the error
+is decreased, ranges between 0 and 1.}
+
+\item{increase.data}{\code{\link{numeric}}: factor by which the error
+is decreased, ranges between 0 and inf.}
+}
+\value{
+Returns a \code{\link{data.frame}} with tuned values.
+}
+\description{
+The error can be reduced and sample size increased for specific purpose.
+}
+\note{
+You should not use this function to improve your poor data set!
+}
+\section{Function version}{
+ 0.5.0 (2015-11-29 17:27:48)
+}
+\examples{
+## load example data set
+data(ExampleData.DeValues, envir = environment())
+x <- ExampleData.DeValues$CA1
+
+## plot original data
+plot_AbanicoPlot(data = x,
+                 summary = c("n", "mean"))
+
+## decrease error by 10 \%
+plot_AbanicoPlot(data = tune_Data(x, decrease.error = 0.1),
+                 summary = c("n", "mean"))
+
+## increase sample size by 200 \%
+#plot_AbanicoPlot(data = tune_Data(x, increase.data = 2) ,
+#                summary = c("n", "mean"))
+
+
+}
+\author{
+Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
+\references{
+#
+}
+\seealso{
+#
+}
+\keyword{manip}
+
diff --git a/man/use_DRAC.Rd b/man/use_DRAC.Rd
new file mode 100644
index 0000000..c78881e
--- /dev/null
+++ b/man/use_DRAC.Rd
@@ -0,0 +1,107 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/use_DRAC.R
+\name{use_DRAC}
+\alias{use_DRAC}
+\title{Use DRAC to calculate dose rate data}
+\usage{
+use_DRAC(file, name, ...)
+}
+\arguments{
+\item{file}{\code{\link{character}}: spreadsheet to be passed
+to the DRAC website for calculation. Can also be a DRAC template object
+obtained from \code{template_DRAC()}.}
+
+\item{name}{\code{\link{character}}: Optional user name submitted to DRAC. If
+omitted, a random name will be generated}
+
+\item{...}{Further arguments.}
+}
+\value{
+Returns an \code{\linkS4class{RLum.Results}} object containing the following elements:
+
+\item{DRAC}{\link{list}: a named list containing the following elements in slot \code{@data}:
+
+\tabular{lll}{
+   \code{$highlights} \tab \code{\link{data.frame}} \tab summary of 25 most important input/output fields \cr
+   \code{$header} \tab \code{\link{character}} \tab HTTP header from the DRAC server response \cr
+   \code{$labels} \tab \code{\link{data.frame}} \tab descriptive headers of all input/output fields \cr
+   \code{$content} \tab \code{\link{data.frame}} \tab complete DRAC input/output table \cr
+   \code{$input} \tab \code{\link{data.frame}} \tab DRAC input table \cr
+   \code{$output} \tab \code{\link{data.frame}} \tab DRAC output table \cr
+}
+
+}
+\item{data}{\link{character} or \link{list} path to the input spreadsheet or a DRAC template}
+\item{call}{\link{call} the function call}
+\item{args}{\link{list} used arguments}
+
+The output should be accessed using the function \code{\link{get_RLum}}.
+}
+\description{
+The function provides an interface from R to DRAC. An R-object or a
+pre-formatted XLS/XLSX file is passed to the DRAC website and the
+results are re-imported into R.
+}
+\section{Function version}{
+ 0.1.0 (2015-12-05 15:52:49)
+}
+\examples{
+
+## (1) Method using the DRAC spreadsheet
+
+file <-  "/PATH/TO/DRAC_Input_and_Output_Template.xlsx"
+
+# send the actual IO template spreadsheet to DRAC
+\dontrun{
+use_DRAC(file = file)
+}
+
+
+
+## (2) Method using an R template object
+
+# Create a template
+input <- template_DRAC()
+
+# Fill the template with values
+input$`Project ID` <- "DRAC-Example"
+input$`Sample ID` <- "Quartz"
+input$`Conversion factors` <- "AdamiecAitken1998"
+input$`ExternalU (ppm)` <- 3.4
+input$`errExternal U (ppm)` <- 0.51
+input$`External Th (ppm)` <- 14.47
+input$`errExternal Th (ppm)` <- 1.69
+input$`External K (\%)` <- 1.2
+input$`errExternal K (\%)` <- 0.14
+input$`Calculate external Rb from K conc?` <- "N"
+input$`Calculate internal Rb from K conc?` <- "N"
+input$`Scale gammadoserate at shallow depths?` <- "N"
+input$`Grain size min (microns)` <- 90
+input$`Grain size max (microns)` <- 125
+input$`Water content ((wet weight - dry weight)/dry weight) \%` <- 5
+input$`errWater content \%` <- 2
+input$`Depth (m)` <- 2.2
+input$`errDepth (m)` <- 0.22
+input$`Overburden density (g cm-3)` <- 1.8
+input$`errOverburden density (g cm-3)` <- 0.1
+input$`Latitude (decimal degrees)` <- 30.0000
+input$`Longitude (decimal degrees)` <- 70.0000
+input$`Altitude (m)` <- 150
+input$`De (Gy)` <- 20
+input$`errDe (Gy)` <- 0.2
+
+# use DRAC
+\dontrun{
+output <- use_DRAC(input)
+}
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Michael Dietze,
+GFZ Potsdam (Germany), Christoph Burow, University of Cologne (Germany)\cr
+\cr R Luminescence Package Team}
+\references{
+Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating.
+Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012
+}
+
diff --git a/man/verify_SingleGrainData.Rd b/man/verify_SingleGrainData.Rd
new file mode 100644
index 0000000..38a965f
--- /dev/null
+++ b/man/verify_SingleGrainData.Rd
@@ -0,0 +1,136 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/verify_SingleGrainData.R
+\name{verify_SingleGrainData}
+\alias{verify_SingleGrainData}
+\title{Verify single grain data sets and check for invalid grains, i.e. zero light level grains}
+\usage{
+verify_SingleGrainData(object, threshold = 10, cleanup = FALSE,
+  cleanup_level = "aliquot", verbose = TRUE, plot = FALSE)
+}
+\arguments{
+\item{object}{\code{\linkS4class{Risoe.BINfileData}} or \code{\linkS4class{RLum.Analysis}}
+(\bold{required}): input object. The function also accepts a list with objects of allowed type.}
+
+\item{threshold}{\code{\link{numeric}} (with default): numeric threshold value for the allowed difference between
+the \code{mean} and the \code{var} of the count values (see details)}
+
+\item{cleanup}{\code{\link{logical}} (with default): if set to \code{TRUE} curves indentified as
+zero light level curves are automatically removed. Ouput is an object as same type as the input, i.e.
+either \code{\linkS4class{Risoe.BINfileData}} or \code{\linkS4class{RLum.Analysis}}}
+
+\item{cleanup_level}{\code{\link{character}} (with default): selects the level for the cleanup
+of the input data sets. Two options are allowed: \code{"curve"} or \code{"aliquot"}. If  \code{"curve"}
+is selected every single curve marked as \code{invalid} is removed. If \code{"aliquot"} is selected,
+curves of one aliquot (grain or disc) can be marked as invalid, but will not be removed. An aliquot
+will be only removed if all curves of this aliquot are marked as invalid.}
+
+\item{verbose}{\code{\link{logical}} (with default): enables or disables terminal feedback}
+
+\item{plot}{\code{\link{logical}} (with default): enables or disables graphical feedback}
+}
+\value{
+The function returns
+
+-----------------------------------\cr
+[ NUMERICAL OUTPUT ]\cr
+-----------------------------------\cr
+\bold{\code{RLum.Reuslts}}-object\cr
+
+\bold{slot:} \bold{\code{@data}}\cr
+\tabular{lll}{
+\bold{Element} \tab \bold{Type} \tab \bold{Description}\cr
+ \code{$unique_pairs} \tab \code{data.frame} \tab the unique position and grain pairs \cr
+ \code{$selection_id} \tab \code{numeric} \tab the selection as record ID \cr
+ \code{$selection_full} \tab \code{data.frame} \tab implemented models used in the baSAR-model core \cr
+}
+
+\bold{slot:} \bold{\code{@info}}\cr
+
+The original function call\cr
+
+\bold{Output variation}\cr
+
+For \code{cleanup = TRUE} the same object as the input, but with cleaned up (invalid curves removed).
+This means: Either an \code{\linkS4class{Risoe.BINfileData}} or an \code{\linkS4class{RLum.Analysis}}
+object is returned in such cases. An \code{\linkS4class{Risoe.BINfileData}} object can be exported
+to a BIN-file by using the function \code{\link{write_R2BIN}}.
+}
+\description{
+This function tries to identify automatically zero light level curves (grains) from single grain data
+measurements. \cr
+}
+\details{
+\bold{How the method works?}\cr
+
+The function compares the expected values (\eqn{E(X)}) and the variance (\eqn{Var(X)})
+of the count values for each curve. Assuming that the background roughly follows a poisson
+distribution the absolute difference of both values should be zero or at least around zero as
+
+\deqn{E(x) = Var(x) = \lambda}
+
+Thus the function checks for:
+
+\deqn{abs(E(x) - Var(x)) >= \Theta}
+
+With \eqn{\Theta} an arbitray, user defined, threshold. Values above indicating curves
+comprising a signal.\cr
+
+Note: the absolute difference of \eqn{E(X)} and \eqn{Var(x)} instead of the ratio was chosen as
+both can become 0 which would result in \code{Inf} values.
+}
+\note{
+This function can work with \code{\linkS4class{Risoe.BINfileData}} objects or
+\code{\linkS4class{RLum.Analysis}} objects (or a list of it). However, the function is highly optimised
+for \code{\linkS4class{Risoe.BINfileData}} objects as it make sense to remove identify invalid
+grains before the conversion to an \code{\linkS4class{RLum.Analysis}} object.\cr
+
+The function checking for invalid curves works rather robust and it is likely that Reg0 curves
+within a SAR cycle are removed as well. Therefore it is strongly recommended to use the argument
+\code{cleanup = TRUE} carefully.
+}
+\section{Function version}{
+ 0.2.0 (2016-06-20 19:34:56)
+}
+\examples{
+
+##01 - basic example I
+##just show how to apply the function
+data(ExampleData.XSYG, envir = environment())
+
+##verify and get data.frame out of it
+verify_SingleGrainData(OSL.SARMeasurement$Sequence.Object)$selection_full
+
+##02 - basic example II
+data(ExampleData.BINfileData, envir = environment())
+id <- verify_SingleGrainData(object = CWOSL.SAR.Data,
+cleanup_level = "aliquot")$selection_id
+
+\dontrun{
+##03 - advanced example I
+##importing and exporting a BIN-file
+
+##select and import file
+file <- file.choose()
+object <- read_BIN2R(file)
+
+##remove invalid aliquots(!)
+object <- verify_SingleGrainData(object, cleanup = TRUE)
+
+##export to new BIN-file
+write_R2BIN(object, paste0(dirname(file),"/", basename(file), "_CLEANED.BIN"))
+}
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
+\references{
+-
+}
+\seealso{
+\code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}},
+\code{\link{write_R2BIN}}, \code{\link{read_BIN2R}}
+}
+\keyword{datagen}
+\keyword{manip}
+
diff --git a/man/write_R2BIN.Rd b/man/write_R2BIN.Rd
new file mode 100644
index 0000000..6f31687
--- /dev/null
+++ b/man/write_R2BIN.Rd
@@ -0,0 +1,86 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/write_R2BIN.R
+\name{write_R2BIN}
+\alias{write_R2BIN}
+\title{Export Risoe.BINfileData into Risoe BIN-file}
+\usage{
+write_R2BIN(object, file, version, compatibility.mode = FALSE,
+  txtProgressBar = TRUE)
+}
+\arguments{
+\item{object}{\code{\linkS4class{Risoe.BINfileData}} (\bold{required}):
+input object to be stored in a bin file.}
+
+\item{file}{\code{\link{character}} (\bold{required}): file name and path of
+the output file\cr [WIN]: \code{write_R2BIN(object, "C:/Desktop/test.bin")},
+\cr [MAC/LINUX]: \code{write_R2BIN("/User/test/Desktop/test.bin")}}
+
+\item{version}{\code{\link{character}} (optional): version number for the
+output file. If no value is provided the highest version number from the
+\code{\linkS4class{Risoe.BINfileData}} is taken automatically.\cr\cr Note:
+This argument can be used to convert BIN-file versions.}
+
+\item{compatibility.mode}{\code{\link{logical}} (with default): this option
+recalculates the position values if necessary and set the max. value to 48.
+The old position number is appended as comment (e.g., 'OP: 70). This option
+accounts for potential compatibility problems with the Analyst software.}
+
+\item{txtProgressBar}{\link{logical} (with default): enables or disables
+\code{\link{txtProgressBar}}.}
+}
+\value{
+Write a binary file.
+}
+\description{
+Exports a Risoe.BINfileData object in a *.bin or *.binx file that can be
+opened by the Analyst software or other Risoe software.
+}
+\details{
+The structure of the exported binary data follows the data structure
+published in the Appendices of the Analyst manual p. 42.\cr\cr If
+\code{LTYPE}, \code{DTYPE} and \code{LIGHTSOURCE} are not of type
+\code{\link{character}}, no transformation into numeric values is done.
+}
+\note{
+The function just roughly checks the data structures. The validity of
+the output data depends on the user.\cr\cr The validity of the file path is
+not further checked. \cr BIN-file conversions using the argument
+\code{version} may be a lossy conversion, depending on the chosen input and
+output data (e.g., conversion from version 08 to 07 to 06 to 04 or 03).\cr
+
+\bold{Warning}\cr
+
+Although the coding was done carefully it seems that the BIN/BINX-files
+produced by Risoe DA 15/20 TL/OSL readers slightly differ on the byte level.
+No obvious differences are observed in the METADATA, however, the
+BIN/BINX-file may not fully compatible, at least not similar to the once
+directly produced by the Risoe readers!\cr
+
+ROI definitions (introduced in BIN-file version 8) are not supported! There are furthermore
+ignored by the function \code{\link{read_BIN2R}}.
+}
+\section{Function version}{
+ 0.4.0 (2016-06-13 21:17:19)
+}
+\examples{
+
+##uncomment for usage
+
+##data(ExampleData.BINfileData, envir = environment())
+##write_R2BIN(CWOSL.SAR.Data, file="[your path]/output.bin")
+
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
+\references{
+DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
+\url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+}
+\seealso{
+\code{\link{read_BIN2R}}, \code{\linkS4class{Risoe.BINfileData}},
+\code{\link{writeBin}}
+}
+\keyword{IO}
+
diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp
new file mode 100644
index 0000000..cc9352a
--- /dev/null
+++ b/src/RcppExports.cpp
@@ -0,0 +1,31 @@
+// Generated by using Rcpp::compileAttributes() -> do not edit by hand
+// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
+
+#include <RcppArmadillo.h>
+#include <Rcpp.h>
+
+using namespace Rcpp;
+
+// analyse_IRSARRF_SRS
+RcppExport SEXP analyse_IRSARRF_SRS(NumericVector values_regenerated_limited, NumericVector values_natural_limited, int n_MC);
+RcppExport SEXP Luminescence_analyse_IRSARRF_SRS(SEXP values_regenerated_limitedSEXP, SEXP values_natural_limitedSEXP, SEXP n_MCSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
+    Rcpp::traits::input_parameter< NumericVector >::type values_regenerated_limited(values_regenerated_limitedSEXP);
+    Rcpp::traits::input_parameter< NumericVector >::type values_natural_limited(values_natural_limitedSEXP);
+    Rcpp::traits::input_parameter< int >::type n_MC(n_MCSEXP);
+    rcpp_result_gen = Rcpp::wrap(analyse_IRSARRF_SRS(values_regenerated_limited, values_natural_limited, n_MC));
+    return rcpp_result_gen;
+END_RCPP
+}
+// create_UID
+CharacterVector create_UID();
+RcppExport SEXP Luminescence_create_UID() {
+BEGIN_RCPP
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
+    rcpp_result_gen = Rcpp::wrap(create_UID());
+    return rcpp_result_gen;
+END_RCPP
+}
diff --git a/src/analyse_IRSARRF_SRS.cpp b/src/analyse_IRSARRF_SRS.cpp
new file mode 100644
index 0000000..d507fc4
--- /dev/null
+++ b/src/analyse_IRSARRF_SRS.cpp
@@ -0,0 +1,66 @@
+//analyse_IRSARRF_SRS.cpp
+//author: Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+//version: 0.2.0 [2015-10-10]
+//Function calculates the squared residuals for the R function analyse_IRSAR.RF()
+//including MC runs for the obtained minimum
+//
+#include <RcppArmadilloExtensions/sample.h>
+// [[Rcpp::depends(RcppArmadillo)]]
+
+using namespace Rcpp;
+
+// [[Rcpp::export(".analyse_IRSARRF_SRS")]]
+RcppExport SEXP analyse_IRSARRF_SRS(NumericVector values_regenerated_limited,
+                                    NumericVector values_natural_limited,
+                                    int n_MC
+                                    ){
+
+
+  //pre-define variables
+  NumericVector residuals = values_natural_limited.length();
+  NumericVector results = values_regenerated_limited.size() - values_natural_limited.size();
+  NumericVector results_vector_min_MC = n_MC;
+
+
+  //(1) calculate sum of the squared residuals
+  // this will be used to find the best fit of the curves (which is the minimum)
+  for (int i=0; i<results.length(); ++i){
+
+    //squared residuals
+    for (int j=0; j<values_natural_limited.length(); ++j){
+      residuals[j] = pow((values_regenerated_limited[j+i] - values_natural_limited[j]),2);
+
+    }
+
+    //sum up the residuals
+    results[i] = sum(residuals);
+
+  }
+
+  //(2) error calculation
+  //use this values to bootstrap and find minimum values and to account for the variation
+  //that may result from this method itself (the minimum lays within a valley of minima)
+  //
+  //using the obtained sliding vector and the function RcppArmadillo::sample() (which equals the
+  //function sample() in R, but faster)
+  //http://gallery.rcpp.org/articles/using-the-Rcpp-based-sample-implementation/
+  for (int i=0; i<results_vector_min_MC.length(); ++i){
+    results_vector_min_MC[i] = min(
+      RcppArmadillo::sample(
+        results,
+        results.length(),
+        TRUE,
+        NumericVector::create()
+     )
+    );
+  }
+
+  //build list with two elements
+  //sliding_vector: the original results_vector (this can be used to reproduced the results in R)
+  //sliding_vector_min_MC: minimum values based on bootstrapping
+  List results_list;
+    results_list["sliding_vector"] = results;
+    results_list["sliding_vector_min_MC"] = results_vector_min_MC;
+
+  return results_list;
+}
diff --git a/src/create_UID.cpp b/src/create_UID.cpp
new file mode 100644
index 0000000..8040025
--- /dev/null
+++ b/src/create_UID.cpp
@@ -0,0 +1,33 @@
+//create_UID.cpp
+//author: Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+//version: 0.1.0 [2016-01-26]
+// -------------------------------------------------------------------------------------------------
+//The purpose of this function is to create a unique ID for RLum objects based on the system time
+//and a random number.
+
+#include <Rcpp.h>
+#include <time.h>
+
+using namespace Rcpp;
+
+// [[Rcpp::export(".create_UID")]]
+CharacterVector create_UID() {
+
+  //define variables
+  CharacterVector random;
+  time_t rawtime;
+  struct tm * timeinfo;
+  char timestamp [80];
+
+  //set date + timestamp (code snippet taken from C++ reference page)
+  time (&rawtime);
+  timeinfo = localtime (&rawtime);
+  strftime (timestamp,80,"%Y-%m-%d-%I:%M.",timeinfo);
+
+  //get time information and add a random number
+  //according to the CRAN policy the standard C-function, rand(), even sufficient here, is not allowed
+  random = runif(1);
+
+  //combine and return results
+  return timestamp + Rcpp::as<std::string>(random);
+}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-luminescence.git



More information about the debian-med-commit mailing list