[r-cran-jsonlite] 01/06: Import Upstream version 0.9.19

Andreas Tille tille at debian.org
Wed Nov 16 08:46:44 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-jsonlite.

commit ad7fa5300b168957c501ffe2766cf25821a40fc0
Author: Andreas Tille <tille at debian.org>
Date:   Wed Nov 16 09:39:35 2016 +0100

    Import Upstream version 0.9.19
---
 DESCRIPTION                               |  26 +
 LICENSE                                   |   2 +
 MD5                                       | 169 +++++++
 NAMESPACE                                 |  35 ++
 NEWS                                      | 129 +++++
 R/apply_by_pages.R                        |  30 ++
 R/as.scalar.R                             |  19 +
 R/asJSON.AAAgeneric.R                     |   3 +
 R/asJSON.ANY.R                            |  24 +
 R/asJSON.AsIs.R                           |  14 +
 R/asJSON.Date.R                           |  15 +
 R/asJSON.NULL.R                           |   8 +
 R/asJSON.POSIXt.R                         |  38 ++
 R/asJSON.array.R                          |  33 ++
 R/asJSON.character.R                      |  42 ++
 R/asJSON.classRepresentation.R            |  10 +
 R/asJSON.complex.R                        |  24 +
 R/asJSON.data.frame.R                     | 111 +++++
 R/asJSON.factor.R                         |  17 +
 R/asJSON.function.R                       |  11 +
 R/asJSON.int64.R                          |   4 +
 R/asJSON.json.R                           |   9 +
 R/asJSON.list.R                           |  54 +++
 R/asJSON.logical.R                        |  39 ++
 R/asJSON.numeric.R                        |  43 ++
 R/asJSON.pairlist.R                       |   4 +
 R/asJSON.raw.R                            |  15 +
 R/asJSON.scalar.R                         |  12 +
 R/asJSON.ts.R                             |   5 +
 R/base64.R                                |  19 +
 R/cleannames.R                            |   6 +
 R/collapse.R                              |  15 +
 R/collapse_object.R                       |  13 +
 R/deparse_vector.R                        |  27 ++
 R/fixNativeSymbol.R                       |  25 +
 R/flatten.R                               |  46 ++
 R/fromJSON.R                              | 125 +++++
 R/helpfunctions.R                         |  29 ++
 R/is.recordlist.R                         |  26 +
 R/is.scalarlist.R                         |  14 +
 R/list_to_vec.R                           |   3 +
 R/loadpkg.R                               |   6 +
 R/makesymbol.R                            |   9 +
 R/null_to_na.R                            |  38 ++
 R/num_to_char.R                           |  40 ++
 R/pack.R                                  | 107 +++++
 R/parseJSON.R                             |  15 +
 R/prettify.R                              |  33 ++
 R/print.R                                 |  19 +
 R/push_parser.R                           |  27 ++
 R/raw_to_json.R                           |  13 +
 R/rbind.pages.R                           |  87 ++++
 R/serializeJSON.R                         |  47 ++
 R/simplify.R                              | 127 +++++
 R/simplifyDataFrame.R                     |  87 ++++
 R/stop.R                                  |   3 +
 R/stream.R                                | 208 ++++++++
 R/toJSON.R                                |  47 ++
 R/unbox.R                                 |  52 ++
 R/unescape_unicode.R                      |  19 +
 R/utf8conv.R                              |   3 +
 R/validate.R                              |  19 +
 R/warn_keep_vec_names.R                   |   6 +
 build/vignette.rds                        | Bin 0 -> 418 bytes
 inst/CITATION                             |  15 +
 inst/doc/json-aaquickstart.R              |  65 +++
 inst/doc/json-aaquickstart.Rmd            | 126 +++++
 inst/doc/json-aaquickstart.html           | 241 ++++++++++
 inst/doc/json-apis.Rmd                    | 376 +++++++++++++++
 inst/doc/json-apis.html                   | 347 ++++++++++++++
 inst/doc/json-mapping.pdf                 | Bin 0 -> 208166 bytes
 inst/doc/json-mapping.pdf.asis            |   6 +
 inst/doc/json-opencpu.R                   |   5 +
 inst/doc/json-opencpu.Rnw                 | 132 ++++++
 inst/doc/json-opencpu.pdf                 | Bin 0 -> 64127 bytes
 inst/doc/json-paging.Rmd                  | 223 +++++++++
 inst/doc/json-paging.html                 | 260 ++++++++++
 inst/tests/flatten.R                      |   9 +
 inst/tests/helper-toJSON.R                |  11 +
 inst/tests/issues.txt                     |   4 +
 inst/tests/readme.txt                     |   6 +
 inst/tests/test-fromJSON-NA-values.R      |  24 +
 inst/tests/test-fromJSON-array.R          |  53 +++
 inst/tests/test-fromJSON-dataframe.R      |  59 +++
 inst/tests/test-fromJSON-datasets.R       |  18 +
 inst/tests/test-fromJSON-date.R           |  18 +
 inst/tests/test-fromJSON-matrix.R         |  44 ++
 inst/tests/test-libjson-escaping.R        |  29 ++
 inst/tests/test-libjson-large.R           |  17 +
 inst/tests/test-libjson-utf8.R            |  40 ++
 inst/tests/test-libjson-validator.R       |  17 +
 inst/tests/test-network-Github.R          |  66 +++
 inst/tests/test-serializeJSON-datasets.R  |  18 +
 inst/tests/test-serializeJSON-functions.R |  26 +
 inst/tests/test-serializeJSON-types.R     |  38 ++
 inst/tests/test-toJSON-AsIs.R             |  14 +
 inst/tests/test-toJSON-Date.R             |  23 +
 inst/tests/test-toJSON-NA-values.R        |  13 +
 inst/tests/test-toJSON-NULL-values.R      |  23 +
 inst/tests/test-toJSON-POSIXt.R           |  74 +++
 inst/tests/test-toJSON-complex.R          |  24 +
 inst/tests/test-toJSON-dataframe.R        |  24 +
 inst/tests/test-toJSON-factor.R           |   7 +
 inst/tests/test-toJSON-keep-vec-names.R   |  32 ++
 inst/tests/test-toJSON-logical.R          |  21 +
 inst/tests/test-toJSON-matrix.R           |   9 +
 inst/tests/test-toJSON-numeric.R          |  24 +
 inst/tests/test-toJSON-raw.R              |  11 +
 inst/tests/test-toJSON-zerovec.R          |  23 +
 inst/tests/testS4.R                       |  14 +
 man/flatten.Rd                            |  39 ++
 man/fromJSON.Rd                           | 117 +++++
 man/prettify.Rd                           |  27 ++
 man/rbind.pages.Rd                        |  45 ++
 man/serializeJSON.Rd                      |  53 +++
 man/stream_in.Rd                          | 140 ++++++
 man/unbox.Rd                              |  48 ++
 man/validate.Rd                           |  24 +
 src/Makevars                              |  19 +
 src/base64.c                              | 225 +++++++++
 src/base64.h                              |  33 ++
 src/collapse_array.c                      |  37 ++
 src/collapse_object.c                     |  53 +++
 src/collapse_pretty.c                     | 179 +++++++
 src/escape_chars.c                        | 115 +++++
 src/integer64_to_na.c                     |  33 ++
 src/is_recordlist.c                       |  45 ++
 src/is_scalarlist.c                       |  35 ++
 src/modp_numtoa.c                         | 291 ++++++++++++
 src/modp_numtoa.h                         | 102 ++++
 src/null_to_na.c                          |  59 +++
 src/num_to_char.c                         |  78 +++
 src/parse.c                               | 112 +++++
 src/prettify.c                            | 148 ++++++
 src/push_parser.c                         |  64 +++
 src/push_parser.h                         |   3 +
 src/row_collapse.c                        |  55 +++
 src/validate.c                            |  43 ++
 src/yajl/api/yajl_common.h                |  75 +++
 src/yajl/api/yajl_gen.h                   | 167 +++++++
 src/yajl/api/yajl_parse.h                 | 226 +++++++++
 src/yajl/api/yajl_tree.h                  | 186 ++++++++
 src/yajl/api/yajl_version.h               |  23 +
 src/yajl/readme.txt                       |   9 +
 src/yajl/yajl.c                           | 175 +++++++
 src/yajl/yajl_alloc.c                     |  52 ++
 src/yajl/yajl_alloc.h                     |  34 ++
 src/yajl/yajl_buf.c                       | 103 ++++
 src/yajl/yajl_buf.h                       |  57 +++
 src/yajl/yajl_bytestack.h                 |  69 +++
 src/yajl/yajl_encode.c                    | 220 +++++++++
 src/yajl/yajl_encode.h                    |  34 ++
 src/yajl/yajl_gen.c                       | 366 ++++++++++++++
 src/yajl/yajl_lex.c                       | 763 ++++++++++++++++++++++++++++++
 src/yajl/yajl_lex.h                       | 117 +++++
 src/yajl/yajl_parser.c                    | 499 +++++++++++++++++++
 src/yajl/yajl_parser.h                    |  78 +++
 src/yajl/yajl_tree.c                      | 552 +++++++++++++++++++++
 src/yajl/yajl_version.c                   |   7 +
 tests/run-all.R                           |   7 +
 vignettes/json-aaquickstart.Rmd           | 126 +++++
 vignettes/json-apis.Rmd                   | 376 +++++++++++++++
 vignettes/json-apis.Rmd.orig              | 184 +++++++
 vignettes/json-mapping.Rnw.orig           | 583 +++++++++++++++++++++++
 vignettes/json-mapping.pdf.asis           |   6 +
 vignettes/json-opencpu.Rnw                | 132 ++++++
 vignettes/json-paging.Rmd                 | 223 +++++++++
 vignettes/json-paging.Rmd.orig            |  92 ++++
 vignettes/precompile.R                    |   8 +
 vignettes/references.bib                  | 150 ++++++
 170 files changed, 12855 insertions(+)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100644
index 0000000..64533de
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,26 @@
+Package: jsonlite
+Version: 0.9.19
+Title: A Robust, High Performance JSON Parser and Generator for R
+License: MIT + file LICENSE
+NeedsCompilation: yes
+Depends: methods
+Author: Jeroen Ooms, Duncan Temple Lang, Lloyd Hilaiel
+URL: http://arxiv.org/abs/1403.2805,
+        https://www.opencpu.org/posts/jsonlite-a-smarter-json-encoder
+BugReports: http://github.com/jeroenooms/jsonlite/issues
+Maintainer: Jeroen Ooms <jeroen.ooms at stat.ucla.edu>
+VignetteBuilder: knitr, R.rsp
+Description: A fast JSON parser and generator optimized for statistical data
+    and the web. Started out as a fork of 'RJSONIO', but has been completely
+    rewritten in recent versions. The package offers flexible, robust, high
+    performance tools for working with JSON in R and is particularly powerful
+    for building pipelines and interacting with a web API. The implementation is
+    based on the mapping described in the vignette (Ooms, 2014). In addition to
+    converting JSON data from/to R objects, 'jsonlite' contains functions to
+    stream, validate, and prettify JSON data. The unit tests included with the
+    package verify that all edge cases are encoded and decoded consistently for
+    use with dynamic data in systems and applications.
+Suggests: curl (>= 0.5), plyr, testthat, knitr, rmarkdown, R.rsp
+Packaged: 2015-11-27 20:06:25 UTC; jeroen
+Repository: CRAN
+Date/Publication: 2015-11-28 09:38:27
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..71586a8
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,2 @@
+YEAR: 2015
+COPYRIGHT HOLDER: Jeroen Ooms
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..648dc10
--- /dev/null
+++ b/MD5
@@ -0,0 +1,169 @@
+826885e9a9cd67f4c315131c481cf63f *DESCRIPTION
+2b11af982e62e96e928587e70fe113fa *LICENSE
+5a58c9e49d90f11642db23d72b8ce327 *NAMESPACE
+e442b90763067e25312b5689f3db8bd6 *NEWS
+6392afdce07064ef77d4d49d02afb914 *R/apply_by_pages.R
+14bcfc61e1a010f05f4ce98f3d714e64 *R/as.scalar.R
+f5128c6b69f745efe3478372c1edf744 *R/asJSON.AAAgeneric.R
+8f9c49db93b95acea9964a6aeafe3f14 *R/asJSON.ANY.R
+3c0c9d555f572fafe375a57b8ac3bccf *R/asJSON.AsIs.R
+d3cb1622378eee1386424dc9a6f869b8 *R/asJSON.Date.R
+1c81ac6905684d593a92df6b2e4b999c *R/asJSON.NULL.R
+a268164dd6f2873d0286260f336c99e2 *R/asJSON.POSIXt.R
+5adf2b5f2203c08e2ef0f38a25a1ad6e *R/asJSON.array.R
+2ffd93fe6f8f8c0bd0600cda27a9e99b *R/asJSON.character.R
+b51612de3148fa36cacc82d1b856c7ad *R/asJSON.classRepresentation.R
+5d732b0dce5caa77b2901ab38f730398 *R/asJSON.complex.R
+b3ea8259af7281920005d7b17924eee9 *R/asJSON.data.frame.R
+557ff63ba0ec13de5b6f727627ec51f1 *R/asJSON.factor.R
+9df01dee05497f6ca358b80169e8f9ae *R/asJSON.function.R
+b58441d8b3ee56dcf5c05aab05fedf92 *R/asJSON.int64.R
+4cd5cff473a75152242c2c0c88e25245 *R/asJSON.json.R
+41e3855be2ae4d4eb0f6578bfe009e0c *R/asJSON.list.R
+a6072971e1167adb5d077e5d7d9c50ea *R/asJSON.logical.R
+328eead4cc7313d02a27b8260203a070 *R/asJSON.numeric.R
+b376c50c91caddd8a390e256d5ed9b13 *R/asJSON.pairlist.R
+fa461eada788c9231b8f486d0449c934 *R/asJSON.raw.R
+ff63b75529d018c9d22309715a6cd04e *R/asJSON.scalar.R
+3a9b614527a562cef764817d5b029b62 *R/asJSON.ts.R
+af33fe6e18b089922504cd706e4c32a4 *R/base64.R
+56915a86e78a7780df73a4c32e0cd9ad *R/cleannames.R
+6fc02cc465a0cd7276a1426a0d1cb4f2 *R/collapse.R
+441c4b477d55d6bf478c3354d5111bb5 *R/collapse_object.R
+d97f2b53cacf86184caf6959d1e46152 *R/deparse_vector.R
+8f4a044a90bf62da1f4233dc49aedf2c *R/fixNativeSymbol.R
+22a25cd71893e904b27ec45c16f259ad *R/flatten.R
+33f443bbe70eb553a0c7c4c97017dccb *R/fromJSON.R
+c0ffae30c415067ec42c0c75b5fb43d1 *R/helpfunctions.R
+ddd8e726241c082235d01da679c08006 *R/is.recordlist.R
+359fa06156183f02a19595b59d26ef54 *R/is.scalarlist.R
+f2a790afb805eab141a57244d7ae2525 *R/list_to_vec.R
+2fdf2de3abf1a95c7728bad187a454ae *R/loadpkg.R
+a11832ba11c041a7278f1ab8baf591a8 *R/makesymbol.R
+092b4a52b2af56d68b8e99e4b8cb9b32 *R/null_to_na.R
+e0379b523ef1d1d5608753796301590b *R/num_to_char.R
+510f85cb798a5e7a61f1fba82bdbb221 *R/pack.R
+ec478d3b9e438dd9b21858fce1a49801 *R/parseJSON.R
+0803a4e10e819cabb638d29739b4f7f0 *R/prettify.R
+bbf0a09478028541352f62ca3c1aa495 *R/print.R
+1a41cb6d58b87c61fa7671af0c68e6ef *R/push_parser.R
+a13262093fcd5fe68cdf1b6db628b5bb *R/raw_to_json.R
+584f420223f56249527284bec02eb915 *R/rbind.pages.R
+4ea6b630845526cba478d0f6fe048e62 *R/serializeJSON.R
+1dd24c1536f68288d0d4dddeb30d5fef *R/simplify.R
+ba01e3e30193969a46cb9c5ba74cc769 *R/simplifyDataFrame.R
+d735d795c263b8e9a6400df135a54190 *R/stop.R
+a03eca64105a2a09e234411a65e5ed4f *R/stream.R
+456003c526a090942774cde7fefba02d *R/toJSON.R
+241d61276b727fc1e7348dbeab742e47 *R/unbox.R
+d8505e361886fc0ac03d93b26eda8626 *R/unescape_unicode.R
+77465bb5bcda44fec86bbe810150618e *R/utf8conv.R
+4b1ef0cb1cd0814f6950b5d1d7ffb4ee *R/validate.R
+f63dc8cea8e10742970bb44db8bdd70e *R/warn_keep_vec_names.R
+95f798e237f7baced8dcd900a07f4a76 *build/vignette.rds
+c96f0a5dcfd55f6e3e13c321629cb4dc *inst/CITATION
+d02344562338775bfd92f48090f8f623 *inst/doc/json-aaquickstart.R
+59736f7a3e0e0f261b921bcea129edba *inst/doc/json-aaquickstart.Rmd
+43583af54063abb63a1836a375b41b04 *inst/doc/json-aaquickstart.html
+4e99d2b8c7a7580f31c1bf2ba2957a25 *inst/doc/json-apis.Rmd
+a03498e36b1a92f116b84127fa1b2854 *inst/doc/json-apis.html
+719e45223ab9bff85c709ad0e0525d91 *inst/doc/json-mapping.pdf
+bf707572c3655753491b742bfef2fad0 *inst/doc/json-mapping.pdf.asis
+41627461cb61033b6332d711e60761d8 *inst/doc/json-opencpu.R
+5577cc27f9fe4d7d86faa3ca88b74f90 *inst/doc/json-opencpu.Rnw
+c0cc85bb1e3464bfc34ef878960312db *inst/doc/json-opencpu.pdf
+738b15230beaf8317aafa65920f692bc *inst/doc/json-paging.Rmd
+203f5e8307e41a4e573f219b7ef8bcc7 *inst/doc/json-paging.html
+b39b11c0272ca25f6818952bff533031 *inst/tests/flatten.R
+b1bae2329825cb18b0a1d9995714985a *inst/tests/helper-toJSON.R
+f23023e455638146ad63169f20d36803 *inst/tests/issues.txt
+71ea8e4256bf3294c409c881681a934d *inst/tests/readme.txt
+4cb91a98ddd47a10d233678ab5989d1a *inst/tests/test-fromJSON-NA-values.R
+c9f19e48a92535e5532f996cbbac02d0 *inst/tests/test-fromJSON-array.R
+27be9afc0fa79345ad41d8ad53aa7200 *inst/tests/test-fromJSON-dataframe.R
+aa7791160baad2fd7fb3597ed808348a *inst/tests/test-fromJSON-datasets.R
+d75eee473a26e82255e677ba26520784 *inst/tests/test-fromJSON-date.R
+c6b5d8bad601bfe421a138faf26b7658 *inst/tests/test-fromJSON-matrix.R
+e237a83753f070f71421b860b91e7e72 *inst/tests/test-libjson-escaping.R
+cdc6699814eaee6709c09f96e965eace *inst/tests/test-libjson-large.R
+4ca0c2309ec1ace0dfdefc9a1285df56 *inst/tests/test-libjson-utf8.R
+5397e2ea1e806a04fb51e4eb59d34fd7 *inst/tests/test-libjson-validator.R
+361c49618cf3486bd5437dd1bb41971f *inst/tests/test-network-Github.R
+cd499599d5494ca0fc5800f511409f8a *inst/tests/test-serializeJSON-datasets.R
+aeb2168457555594252b099a76bcaa2c *inst/tests/test-serializeJSON-functions.R
+97e85447387747898b1ea5840f53c81a *inst/tests/test-serializeJSON-types.R
+b84e60041fc128cb5170f10e577c9ad5 *inst/tests/test-toJSON-AsIs.R
+eb4a97b650ff522419054f5ffaf71b5d *inst/tests/test-toJSON-Date.R
+45420c7ed3efa57e9bae8a45784c7b02 *inst/tests/test-toJSON-NA-values.R
+ff5f8b6ee8ec115226bb319131e4b361 *inst/tests/test-toJSON-NULL-values.R
+4712592e3c1bc94ca3a1c67e5c253242 *inst/tests/test-toJSON-POSIXt.R
+c9de8f6eb487ce445780eb3fbbf0209e *inst/tests/test-toJSON-complex.R
+928cfa5544be5c01a6e91983f2e83e34 *inst/tests/test-toJSON-dataframe.R
+83355d4d1aa22a0616da31644b30fa7d *inst/tests/test-toJSON-factor.R
+6319bd28125018c6b955b284de617dec *inst/tests/test-toJSON-keep-vec-names.R
+5a7f74f2f51703cdae5eed433b4ed5d4 *inst/tests/test-toJSON-logical.R
+fb28c7dc5dbd33ed9c9f4cb6d2d7ab01 *inst/tests/test-toJSON-matrix.R
+0759a0a27f2346bb29cb13179ca5759d *inst/tests/test-toJSON-numeric.R
+b67ddf907b7eda8835e59b0cf944f1b3 *inst/tests/test-toJSON-raw.R
+bdad5ec4e8cd10c38cf233a8b1305daa *inst/tests/test-toJSON-zerovec.R
+609172b33786e8d3ae0dab8450d21a0a *inst/tests/testS4.R
+e635dae3629bdc8929ad6333fb490933 *man/flatten.Rd
+0f62f6e6c6685865bddaa8fc72efa0dc *man/fromJSON.Rd
+c11fc6155ec81aa0328ded58d25f9c21 *man/prettify.Rd
+a62c8f16af864d890e437afbec5b485b *man/rbind.pages.Rd
+5ffee26ad9e5d035f91677488f3d088c *man/serializeJSON.Rd
+29d3fc427815a7ea1c30d6f011a5cab4 *man/stream_in.Rd
+b061d1c30f4dc301d0dfc28cf98f1e8a *man/unbox.Rd
+70dd43a31bfbdacef6f6688f48d02cba *man/validate.Rd
+36086dda4d4fecd925d8e45f9805d25a *src/Makevars
+766384995d24d4939dae31e5d955a3bb *src/base64.c
+4dc5aee3f5803a9c71315f2d68e0d1b5 *src/base64.h
+faea98fda8fdbb6f78697710542c0fba *src/collapse_array.c
+0ae4cfc11ff20bd4930f6b106d21ba7f *src/collapse_object.c
+0549413f578a2afa06df08984092ce4d *src/collapse_pretty.c
+b8327031bf8e32e0ae0d18b53deb1990 *src/escape_chars.c
+f5b084b6a0394dcfb37917435d2480f3 *src/integer64_to_na.c
+ba977a53aa8e0ebd1bab8d9ba4c3276b *src/is_recordlist.c
+09701a7eb31c40523d58cb06ecaba86b *src/is_scalarlist.c
+2efe63deda72cbbd3ced4181899f214a *src/modp_numtoa.c
+a1a97f472d00c7f705a84e96feb7d352 *src/modp_numtoa.h
+b1d3deee3a4ba2960d857892991ad682 *src/null_to_na.c
+1664105ca329631102383dadd72dec79 *src/num_to_char.c
+5bb8438acc3c958a2724526e4a6674c2 *src/parse.c
+ca491c7e763e61bf82e17b66966ecc78 *src/prettify.c
+8becd2a5c39b0f1705dc432f50d2ee87 *src/push_parser.c
+4d8d6fb9043e7306cc526f267183313c *src/push_parser.h
+cd345103e29145d011bbcfdc3b5b1fa2 *src/row_collapse.c
+53dd437fe1e446226980a8e3c2f24c8a *src/validate.c
+82090dc44b5da981b6b39729bbd01b30 *src/yajl/api/yajl_common.h
+8d59219a0f3e35778495a65c055c73f1 *src/yajl/api/yajl_gen.h
+95792072fd28bbb37dfd4fb25f9ce893 *src/yajl/api/yajl_parse.h
+c1951a11b41bcd2f6fe97072fee6e182 *src/yajl/api/yajl_tree.h
+af5ebc5fff57f84064c2bc5c79420101 *src/yajl/api/yajl_version.h
+9a58fd6f1c6be2244023dabcc77447d8 *src/yajl/readme.txt
+6e750b3ec74e85174ed799cc944e931e *src/yajl/yajl.c
+2be6b0133f8dc6190771e13b71eb0fda *src/yajl/yajl_alloc.c
+c283775f8a3dad48d89b6366588e1c6c *src/yajl/yajl_alloc.h
+184cf783918db8355b385990f5bfbd27 *src/yajl/yajl_buf.c
+6bf3dce93b04e488416f10bad1d37dd6 *src/yajl/yajl_buf.h
+9666a608f17725d307cb6723a273ac3b *src/yajl/yajl_bytestack.h
+22b4abce2656c3db32371fa2df1e256d *src/yajl/yajl_encode.c
+0ddd919c7a1b2593b2cc2cdd41285aaf *src/yajl/yajl_encode.h
+23f9d3424764408da72043650700144f *src/yajl/yajl_gen.c
+0f011605c67b70bd5f54b2aeacc78e55 *src/yajl/yajl_lex.c
+3ddc5742fd9bde4cc387bbae35c01236 *src/yajl/yajl_lex.h
+c3b70695f8a1225a9457a501e201c4f7 *src/yajl/yajl_parser.c
+3a27147e92286b52862bf17665eda478 *src/yajl/yajl_parser.h
+084ecc84d399e3a2b2227331ea145c7b *src/yajl/yajl_tree.c
+ab3de20370cc57144f8e1b449a3e2ab2 *src/yajl/yajl_version.c
+669d289a39b2e8af6c90e562d5213d11 *tests/run-all.R
+59736f7a3e0e0f261b921bcea129edba *vignettes/json-aaquickstart.Rmd
+4e99d2b8c7a7580f31c1bf2ba2957a25 *vignettes/json-apis.Rmd
+b7f884b8f9b4306cf7af157808ec85cb *vignettes/json-apis.Rmd.orig
+0e019ff3da3b7378d6f356311bf8d839 *vignettes/json-mapping.Rnw.orig
+bf707572c3655753491b742bfef2fad0 *vignettes/json-mapping.pdf.asis
+5577cc27f9fe4d7d86faa3ca88b74f90 *vignettes/json-opencpu.Rnw
+738b15230beaf8317aafa65920f692bc *vignettes/json-paging.Rmd
+c901571d19813378301caf291bf0f463 *vignettes/json-paging.Rmd.orig
+3482b70f8858142fbce1f1f65310a696 *vignettes/precompile.R
+bd5d57d6cc98bc3ae5e157fd8eaaff2b *vignettes/references.bib
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..f3bec2e
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,35 @@
+# Generated by roxygen2 (4.1.1): do not edit by hand
+
+S3method(print,json)
+S3method(print,scalar)
+export(flatten)
+export(fromJSON)
+export(minify)
+export(prettify)
+export(rbind.pages)
+export(serializeJSON)
+export(stream_in)
+export(stream_out)
+export(toJSON)
+export(unbox)
+export(unserializeJSON)
+export(validate)
+import(methods)
+useDynLib(jsonlite,C_collapse_array)
+useDynLib(jsonlite,C_collapse_array_pretty_inner)
+useDynLib(jsonlite,C_collapse_array_pretty_outer)
+useDynLib(jsonlite,C_collapse_object)
+useDynLib(jsonlite,C_collapse_object_pretty)
+useDynLib(jsonlite,C_escape_chars)
+useDynLib(jsonlite,C_is_recordlist)
+useDynLib(jsonlite,C_is_scalarlist)
+useDynLib(jsonlite,C_null_to_na)
+useDynLib(jsonlite,C_row_collapse_array)
+useDynLib(jsonlite,C_row_collapse_object)
+useDynLib(jsonlite,R_feed_push_parser)
+useDynLib(jsonlite,R_finalize_push_parser)
+useDynLib(jsonlite,R_integer64_to_char)
+useDynLib(jsonlite,R_num_to_char)
+useDynLib(jsonlite,R_parse)
+useDynLib(jsonlite,R_reformat)
+useDynLib(jsonlite,R_validate)
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..a2585d7
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,129 @@
+0.9.19
+ - Remove the View() workaround because it causes issues in RStudio and
+   because the bug in utils::View has been fixed in R-patched.
+
+0.9.18
+ - Removed support for the archived int64 package. Use bit64 instead.
+ - The stream_in function now skips over blank lines
+ - Remove workaround for View() in rstudio
+ - fromJSON now sets an 'Accept' request header for URLs.
+
+0.9.17
+ - Fix for 1 dimensional array objects
+ - Fix for unnamed data frame
+ - Fix for duplicate _row fields
+ - The 'unbox' function now accepts classed scalars such as dates
+ - Ignore BOM with warning in push parser
+
+0.9.16
+ - Performance optimizations in asJSON.data.frame by avoiding apply()
+ - Prettifying is now done by default in R (thanks to Yihui Xie)
+ - Add json_verbatim option to insert verbatim json
+ - Improve verbose output of streaming functions
+
+0.9.15
+ - Replaced RCurl dependency with the new curl package.
+ - Added push parser for more efficient parsing from connection interfaces (e.g. files or urls).
+ - The toMongo function has been removed. Use stream_out instead.
+ - The `View` function will not be exposed in recent versions of rstudio that supported nested data natively.
+ - Add hidden bigint_as_char argument in fromJSON
+ - Fixed a memory leak in fromJSON.
+ - I() forces boxing of a scalar when 'auto_unbox = TRUE' for RJSONIO compatibility.
+ - toJSON now supports 'keep_vec_names=TRUE' will mimic RJSONIO legacy behavior.
+ - toJSON now supports 'time_format' argument to customize formatting of POSIXt strings.
+
+0.9.14
+ - Change license to MIT (for GPL2 compatibility).
+ - Add support for digits = I(n) to use significant precision.
+ - When 'pretty' in toJSON is numeric, it specifies the number of spaces to indent.
+ - Bug fix in validate() related to ScalarLogical
+ - Add support for dataframe = "values"
+ - Byte-order-marks are now ignored as suggested by rfc7159
+ - Add support for integer64 class (bit64 package)
+
+0.9.13
+ - Ported some number formatting to C
+ - Fix when http server gives no content-type header
+ - Prevent auto unboxing within a matrix
+ - Assume UTF8 but fall back on native encoding for files and urls in fromJSON
+ - Fix for 32 bit windows XP.
+
+0.9.12
+ - New JSON parser based on yajl. Some changes:
+    * smaller and faster than libjson
+    * better error messages
+    * automatic validation
+    * native \uXXXX unicode parsing
+    * integer parsing
+    * uses spaces intead of tabs for prettify
+ - Other functions ported to C: collapse_object, null_to_na, is_recordlist, is_scalarlist
+ - Expose 'flatten' function
+ - Row names are encoded as "_row" instead of "$row"
+ - Start with streaming functions
+ - Some internal changes to support BSON/MongoDB
+
+0.9.11
+ - Added toJSON null argument
+ - Fix bug in prettify object name escaping
+ - Use C code for escaping and collapsing vectors
+ - Several performance enhancements in fromJSON/simplify code
+ - The auto_unbox argument is ignored for dataframes when dataframe="column"
+
+0.9.10
+ - Add support for escaped (hexadecimal) unicode characters in fromJSON(unicode = TRUE)
+ - Exporting a wrapper for View() that flattens nested data frames
+ - Add print method for scalar (unbox)
+
+0.9.9
+ - Fix bug where 'flatten' argument wasn't passed down
+ - Make asJSON automatically unname any named lists within a data frame
+ - fromJSON(url) now sets Accept header
+ - Increase default to digits=4 in toJSON
+
+0.9.8
+ - Remove whitespace from default output of toJSON
+ - Split vignette in 3 documents
+ - Add support for simplifyMatrix within a data frame
+ - Add class "json" output of toJSON, simplify, minify
+ - Add print method for json
+ - Cleaned up unit tests
+ - Workaround for SSL handshake issues in https downloads
+ - Raise warnings for SSL or Content-Type abnormalities
+
+0.9.7
+ - formally expose 'force' argument in toJSON
+ - formally expose 'flatten' argument in fromJSON
+ - fix bug in simplifyDataframe
+ - fix in rlibjson code that converted empty array to AsIs object
+ - auto coerse mongo type dates in fromJSON
+
+0.9.6
+ - toJSON gains argument auto_unbox
+ - Minor fixes
+
+0.9.5
+ - Never raise error in toJSON when force=true
+ - Fix typo in line 2 of JSONDefs/GNU_C.h (GUN -> GNU)
+ - Run unit tests during R CMD check
+ - Update vignette
+
+0.9.4
+ - Added minify function
+ - Added unbox function
+ - Fixed bug where toJSON 'force' argument wasn't passed down
+ - Removed -DJSON_VALIDATE -DJSON_STREAM=1 from Makevars.in (already in JSONoptions.h)
+
+0.9.3
+ - Changes by Prof Ripley for Solaris/CRAN
+
+0.9.2
+ - Update libjson to 7.6.1 (fixes bug in the validator)
+ - Generalized toJSON method 'matrix' to 'array' to work for higher dimensions.
+ - Add option to encode matrix either row-major or column-major.
+ - Set default SSL version to 3 for fromJSON("https://...")
+
+0.9.1
+ - Major performance improvements for toJSON.data.frame
+
+0.9.0
+ - Initial release
diff --git a/R/apply_by_pages.R b/R/apply_by_pages.R
new file mode 100644
index 0000000..a940154
--- /dev/null
+++ b/R/apply_by_pages.R
@@ -0,0 +1,30 @@
+apply_by_pages <- function(x, FUN, pagesize, verbose, ...){
+  stopifnot(is.data.frame(x))
+  nr <- nrow(x)
+  npages <- nr %/% pagesize;
+  lastpage <- nr %% pagesize;
+
+  for(i in seq_len(npages)){
+    from <- pagesize * (i-1) + 1;
+    to <- pagesize * i
+    FUN(x[from:to, ,drop = FALSE], ...)
+    if(verbose) cat("\rProcessed", i * pagesize, "rows...")
+  }
+
+  if(lastpage){
+    from <- nr - lastpage + 1;
+    FUN(x[from:nr, ,drop = FALSE], ...)
+  }
+  if(verbose) cat("\rComplete! Processed total of", nr, "rows.\n")
+  invisible();
+}
+
+#this is another slightly slower implementation
+apply_by_pages2 <- function(x, FUN, pagesize, verbose, ...){
+  x2 <- split(x, seq_len(nrow(x)) %/% pagesize)
+  for(page in x2){
+    if(verbose) message("Writing ", nrow(page), " lines (", ").")
+    FUN(page)
+  }
+  invisible()
+}
diff --git a/R/as.scalar.R b/R/as.scalar.R
new file mode 100644
index 0000000..3bacfa4
--- /dev/null
+++ b/R/as.scalar.R
@@ -0,0 +1,19 @@
+as.scalar <- function(obj) {
+  # Lists can never be a scalar (this can arise if a dataframe contains a column
+  # with lists)
+  if(length(dim(obj)) > 1){
+    if(!identical(nrow(obj), 1L)){
+      warning("Tried to use as.scalar on an array or dataframe with ", nrow(obj), " rows.", call.=FALSE)
+      return(obj)
+    }
+  } else if(!identical(length(obj), 1L)) {
+    warning("Tried to use as.scalar on an object of length ", length(obj), call.=FALSE)
+    return(obj)
+  } else if(is.namedlist(obj)){
+    warning("Tried to use as.scalar on a named list.", call.=FALSE)
+    return(obj)
+  }
+
+  class(obj) <- c("scalar", class(obj))
+  return(obj)
+}
diff --git a/R/asJSON.AAAgeneric.R b/R/asJSON.AAAgeneric.R
new file mode 100644
index 0000000..03ff146
--- /dev/null
+++ b/R/asJSON.AAAgeneric.R
@@ -0,0 +1,3 @@
+setGeneric("asJSON", function(x, ...) {
+  standardGeneric("asJSON")
+})
diff --git a/R/asJSON.ANY.R b/R/asJSON.ANY.R
new file mode 100644
index 0000000..e1fbb47
--- /dev/null
+++ b/R/asJSON.ANY.R
@@ -0,0 +1,24 @@
+#' @import methods
+setMethod("asJSON", "ANY", function(x, force = FALSE, ...) {
+  if (isS4(x) && !is(x, "classRepresentation")) {
+    if (isTRUE(force)) {
+      return(asJSON(attributes(x), force = force, ...))
+    } else {
+      stop("No method for S4 class:", class(x))
+    }
+  } else if (length(class(x)) > 1) {
+    # If an object has multiple classes, we recursively try the next class. This is
+    # S3 style dispatching that doesn't work by default for formal method definitions
+    # There should be a more native way to accomplish this
+    return(asJSON(structure(x, class = class(x)[-1]), force = force, ...))
+  } else if (isTRUE(force) && existsMethod("asJSON", class(unclass(x)))) {
+    # As a last resort we can force encoding using the unclassed object
+    return(asJSON(unclass(x), force = force, ...))
+  } else if (isTRUE(force)) {
+    return(asJSON(NULL))
+    warning("No method asJSON S3 class: ", class(x))
+  } else {
+    # If even that doesn't work, we give up.
+    stop("No method asJSON S3 class: ", class(x))
+  }
+})
diff --git a/R/asJSON.AsIs.R b/R/asJSON.AsIs.R
new file mode 100644
index 0000000..06951f3
--- /dev/null
+++ b/R/asJSON.AsIs.R
@@ -0,0 +1,14 @@
+setOldClass("AsIs")
+setMethod("asJSON", "AsIs", function(x, auto_unbox = FALSE, ...) {
+
+  # Strip off the AsIs class so we can dispatch to other asJSON methods.
+  class(x) <- setdiff(class(x), "AsIs")
+
+  if (is.atomic(x) && length(x) == 1) {
+    # Never auto_unbox single values when wrapped with I()
+    asJSON(x, auto_unbox = FALSE, ...)
+
+  } else {
+    asJSON(x, auto_unbox = auto_unbox, ...)
+  }
+})
diff --git a/R/asJSON.Date.R b/R/asJSON.Date.R
new file mode 100644
index 0000000..1a350a8
--- /dev/null
+++ b/R/asJSON.Date.R
@@ -0,0 +1,15 @@
+setMethod("asJSON", "Date", function(x, Date = c("ISO8601", "epoch"), ...) {
+
+  # Validate argument
+  Date <- match.arg(Date)
+
+  # select a schema
+  output <- switch(Date,
+    ISO8601 = as.character(x),
+    epoch = unclass(x),
+    default = stop("Invalid argument for 'Date':", Date)
+  )
+
+  # Dispatch to character encoding
+  asJSON(output, ...)
+})
diff --git a/R/asJSON.NULL.R b/R/asJSON.NULL.R
new file mode 100644
index 0000000..eb8dbe5
--- /dev/null
+++ b/R/asJSON.NULL.R
@@ -0,0 +1,8 @@
+# Note that this is different from RJSONIO because null values are NA.
+setMethod("asJSON", "NULL", function(x, null = "list", ...) {
+  if(null == "null"){
+    return("null")
+  } else {
+    return("{}")
+  }
+})
diff --git a/R/asJSON.POSIXt.R b/R/asJSON.POSIXt.R
new file mode 100644
index 0000000..8931a84
--- /dev/null
+++ b/R/asJSON.POSIXt.R
@@ -0,0 +1,38 @@
+setMethod("asJSON", "POSIXt", function(x, POSIXt = c("string", "ISO8601", "epoch",
+  "mongo"), UTC = FALSE, digits, time_format = NULL, ...) {
+  # note: UTC argument doesn't seem to be working consistently maybe use ?format
+  # instead of ?as.character
+
+  # Validate
+  POSIXt <- match.arg(POSIXt)
+
+  # Encode based on a schema
+  if (POSIXt == "mongo") {
+    if (is(x, "POSIXlt")) {
+      x <- as.POSIXct(x)
+    }
+    return(asJSON(data.frame("$date" = floor(unclass(x) * 1000), check.names = FALSE), digits = 0, ...))
+  }
+
+  # Epoch millis
+  if (POSIXt == "epoch") {
+    return(asJSON(floor(unclass(as.POSIXct(x)) * 1000), digits = digits, ...))
+  }
+
+  # Strings
+  if(is.null(time_format)){
+    time_format <- if(POSIXt == "string"){
+      ""
+    } else if(isTRUE(UTC)){
+      "%Y-%m-%dT%H:%M:%SZ"
+    } else {
+      "%Y-%m-%dT%H:%M:%S"
+    }
+  }
+
+  if (isTRUE(UTC)) {
+    asJSON(as.character(x, format = time_format, tz = "UTC"), ...)
+  } else {
+    asJSON(as.character(x, format = time_format), ...)
+  }
+})
diff --git a/R/asJSON.array.R b/R/asJSON.array.R
new file mode 100644
index 0000000..6be24c5
--- /dev/null
+++ b/R/asJSON.array.R
@@ -0,0 +1,33 @@
+setMethod("asJSON", "array", function(x, collapse = TRUE, na = NULL, oldna = NULL,
+  matrix = c("rowmajor", "columnmajor"), auto_unbox = FALSE, keep_vec_names = FALSE,
+  indent = NA_integer_, ...) {
+
+  #validate
+  matrix <- match.arg(matrix);
+
+  # reset na arg when called from data frame
+  if(identical(na, "NA")){
+    na <- oldna;
+  }
+
+  # 1D arrays are vectors
+  if(length(dim(x)) < 2){
+    return(asJSON(c(x), matrix = matrix, na = na, indent = indent + 2L, ...))
+  }
+
+  # if collapse == FALSE, then this matrix is nested inside a data frame,
+  # and therefore row major is required to match dimensions
+  # dont pass auto_unbox (never unbox within matrix)
+  margin <- ifelse(identical(matrix, "columnmajor") && isTRUE(collapse), length(dim(x)), 1);
+  tmp <- apply(x, margin, asJSON, matrix = matrix, na = na, indent = indent + 2L, ...)
+
+  # collapse it
+  if (collapse) {
+    collapse(tmp, inner = FALSE, indent = indent)
+  } else {
+    tmp
+  }
+})
+
+# Some objects have class Matrix but not class Array
+setMethod("asJSON", "matrix", getMethod("asJSON", "array"))
diff --git a/R/asJSON.character.R b/R/asJSON.character.R
new file mode 100644
index 0000000..b938db0
--- /dev/null
+++ b/R/asJSON.character.R
@@ -0,0 +1,42 @@
+setMethod("asJSON", "character", function(x, collapse = TRUE, na = c("null", "string", "NA"),
+  auto_unbox = FALSE, keep_vec_names = FALSE, indent = NA_integer_, ...) {
+
+  # shiny legacy exception
+  if(isTRUE(keep_vec_names) && length(names(x))){
+    warn_keep_vec_names()
+    return(asJSON(as.list(x), na = na, auto_unbox = TRUE, collapse = collapse, ...))
+  }
+
+  # vectorized escaping
+  tmp <- deparse_vector(x)
+
+  # this was used with deparse_vector_old
+  #if(identical(Encoding(x), "UTF-8")){
+  #  if(!grepl("UTF", Sys.getlocale("LC_CTYPE"), ignore.case=TRUE)){
+  #    tmp <- utf8conv(tmp);
+  #  }
+  #}
+
+  # validate NA
+  if (any(missings <- which(is.na(x)))) {
+    na <- match.arg(na)
+    if (na %in% c("null")) {
+      tmp[missings] <- "null"
+    } else if(na %in% "string") {
+      tmp[missings] <- "\"NA\""
+    } else {
+      tmp[missings] <- NA_character_
+    }
+  }
+
+  if(isTRUE(auto_unbox) && length(tmp) == 1){
+    return(tmp);
+  }
+
+  # this is almost always true, except for class 'scalar'
+  if (isTRUE(collapse)) {
+    collapse(tmp, indent = indent)
+  } else {
+    tmp
+  }
+})
diff --git a/R/asJSON.classRepresentation.R b/R/asJSON.classRepresentation.R
new file mode 100644
index 0000000..e93c29c
--- /dev/null
+++ b/R/asJSON.classRepresentation.R
@@ -0,0 +1,10 @@
+# classRepresentation is an object that defines an S4 class encoding it usually
+# doesn't serve much purpose, however as we don't wnat to encode it as a regular
+# S4 data object.
+
+# it currently only encodes the slots. we could add encoding of methods of that
+# would be desired.
+
+setMethod("asJSON", "classRepresentation", function(x, ...) {
+  return(asJSON(attributes(x)$slots, ...))
+})
diff --git a/R/asJSON.complex.R b/R/asJSON.complex.R
new file mode 100644
index 0000000..c793415
--- /dev/null
+++ b/R/asJSON.complex.R
@@ -0,0 +1,24 @@
+setMethod("asJSON", "complex", function(x, digits = 5, collapse = TRUE, complex = c("string",
+  "list"), na = c("string", "null", "NA"), oldna = NULL, ...) {
+
+  # validate
+  na <- match.arg(na);
+  complex <- match.arg(complex)
+
+  #turn into strings
+  if (complex == "string") {
+    #default NA is "NA"
+    mystring <- prettyNum(x = x, digits = digits)
+    if (any(missings <- which(!is.finite(x)))){
+      if (na %in% c("null", "NA")) {
+        mystring[missings] <- NA_character_;
+      }
+    }
+    asJSON(mystring, collapse = collapse, na = na, ...)
+  } else {
+    if(na == "NA"){
+      na <- oldna;
+    }
+    asJSON(list(real = Re(x), imaginary = Im(x)), na = na, digits = digits, ...)
+  }
+})
diff --git a/R/asJSON.data.frame.R b/R/asJSON.data.frame.R
new file mode 100644
index 0000000..8633e5e
--- /dev/null
+++ b/R/asJSON.data.frame.R
@@ -0,0 +1,111 @@
+setMethod("asJSON", "data.frame", function(x, na = c("NA", "null", "string"), collapse = TRUE,
+  dataframe = c("rows", "columns", "values"), complex = "string", oldna = NULL, rownames = NULL,
+  keep_vec_names = FALSE, indent = NA_integer_, ...) {
+
+  # Coerse pairlist if needed
+  if (is.pairlist(x)) {
+    x <- as.vector(x, mode = "list")
+  }
+
+  # Validate some args
+  dataframe <- match.arg(dataframe)
+  has_names <- identical(length(names(x)), ncol(x))
+
+  # Default to adding row names only if they are strings and not just stringified numbers
+  if(isTRUE(rownames) || (is.null(rownames) && is.character(attr(x, "row.names")) && !all(grepl("^\\d+$", row.names(x))))){
+    # we don't use row.names() because this converts numbers to strings,
+    # which will break sorting
+    if(has_names){
+      x[["_row"]] <- attr(x, "row.names")
+    }
+  }
+
+  # Unname named lists columns. These are very rare.
+  namedlistvars <- which(vapply(x, is.namedlistnotdf, logical(1)))
+  for (i in namedlistvars) {
+    x[[i]] <- unname(x[[i]])
+  }
+
+  # Convert POSIXlt to POSIXct before we start messing with lists
+  posvars <- which(vapply(x, is, logical(1), "POSIXlt"))
+  for (i in posvars) {
+    x[[i]] <- as.POSIXct(x[[i]])
+  }
+
+  # Column based is same as list. Do not pass collapse arg because it is a named list.
+  if (dataframe == "columns") {
+    return(asJSON(as.list(x), is_df = TRUE, na = na, dataframe = dataframe,
+      complex = complex, rownames = rownames, indent = indent, ...))
+  }
+
+  # Determine "oldna". This is needed when the data frame contains a list column
+  if(missing(na) || !length(na) || identical(na, "NA")){
+    oldna <- NULL
+  } else {
+    oldna <- na;
+  }
+
+  # Set default for row based, don't do it earlier because it will affect 'oldna' or dataframe="columns"
+  if(dataframe == "rows" && has_names){
+    na <- match.arg(na)
+  }
+
+  # no records
+  if (!nrow(x)) {
+    return(asJSON(list(), collapse=collapse, indent=indent))
+  }
+
+  # Convert raw vectors
+  rawvars <- which(vapply(x, is.raw, logical(1)))
+  for (i in rawvars) {
+    x[[i]] <- as.character.hexmode(x[[i]])
+  }
+
+  # Turn complex vectors into data frames
+  if(complex == "list"){
+    complxvars <- which(vapply(x, is.complex, logical(1)))
+    for (i in complxvars) {
+      x[[i]] <- data.frame(real=Re(x[[i]]), imaginary=Im(x[[i]]))
+    }
+  }
+
+  #create a matrix of json elements
+  dfnames <- deparse_vector(cleannames(names(x)))
+  out <- vapply(x, asJSON, character(nrow(x)), collapse=FALSE, complex = complex, na = na,
+    oldna = oldna, rownames = rownames, dataframe = dataframe, indent = indent + 2L,
+    ..., USE.NAMES = FALSE)
+
+  # This would be another way of doing the missing values
+  # This does not require the individual classes to support na="NA"
+  #if(identical(na, "NA")){
+  #  namatrix <- vapply(x, is.na, logical(nrow(x)))
+  #  out[namatrix] <- NA;
+  #}
+
+  #this is a workaround for vapply simplifying into a vector for n=1 (not for n=0 surprisingly)
+  if(!is.matrix(out)){
+    out <- t(out)
+  }
+
+  # turn the matrix into json records
+  # note: special row_collapse functions because apply is slow!
+  tmp <- if(dataframe == "rows" && (length(dfnames) == ncol(out))) {
+    #apply(out, 1, collapse_object, x = dfnames, indent = indent + 2L);
+    row_collapse_object(dfnames, out, indent = indent + 2L)
+  } else {
+    # for dataframe = "values"
+    #apply(out, 1, collapse, indent = indent);
+    row_collapse(out, indent = indent)
+  }
+
+  #collapse
+  if(isTRUE(collapse)){
+    collapse(tmp, inner = FALSE, indent = indent)
+  } else {
+    tmp
+  }
+})
+
+is.namedlistnotdf <- function(x){
+  isTRUE(is.list(x) && !is.data.frame(x) && !is.null(names(x)))
+}
diff --git a/R/asJSON.factor.R b/R/asJSON.factor.R
new file mode 100644
index 0000000..12e5a35
--- /dev/null
+++ b/R/asJSON.factor.R
@@ -0,0 +1,17 @@
+setMethod("asJSON", "factor", function(x, factor = c("string", "integer"), keep_vec_names = FALSE, ...) {
+  # validate
+  factor <- match.arg(factor)
+
+  # dispatch
+  if (factor == "integer") {
+    # encode factor as enum
+    asJSON(unclass(x), ...)
+  } else {
+    # encode as strings
+    xc <- as.character(x)
+    if(isTRUE(keep_vec_names)){
+      names(xc) <- names(x)
+    }
+    asJSON(xc, keep_vec_names = keep_vec_names, ...)
+  }
+})
diff --git a/R/asJSON.function.R b/R/asJSON.function.R
new file mode 100644
index 0000000..2bf6450
--- /dev/null
+++ b/R/asJSON.function.R
@@ -0,0 +1,11 @@
+setMethod("asJSON", "function", function(x, collapse = TRUE, fun = c("source", "list"), 
+  ...) {
+  # validate
+  fun <- match.arg(fun)
+  
+  if (fun == "source") {
+    return(asJSON(deparse(x), ...))
+  } else {
+    return(asJSON(as.list(x), ...))
+  }
+}) 
diff --git a/R/asJSON.int64.R b/R/asJSON.int64.R
new file mode 100644
index 0000000..39323e2
--- /dev/null
+++ b/R/asJSON.int64.R
@@ -0,0 +1,4 @@
+#setOldClass("int64")
+#setMethod("asJSON", "int64", function(x, digits, ...) {
+#  asJSON(as.double(as.character(x)), digits = 0, ...)
+#})
diff --git a/R/asJSON.json.R b/R/asJSON.json.R
new file mode 100644
index 0000000..18274ab
--- /dev/null
+++ b/R/asJSON.json.R
@@ -0,0 +1,9 @@
+# If an object has already been encoded by toJSON(), do not encode it again
+setOldClass("json")
+setMethod("asJSON", "json", function(x, json_verbatim = FALSE, ...) {
+  if(isTRUE(json_verbatim)){
+    x
+  } else {
+    asJSON(as.character(x), ...)
+  }
+})
diff --git a/R/asJSON.list.R b/R/asJSON.list.R
new file mode 100644
index 0000000..ef316a1
--- /dev/null
+++ b/R/asJSON.list.R
@@ -0,0 +1,54 @@
+setMethod("asJSON", "list", function(x, collapse = TRUE, na = NULL, oldna = NULL,
+  is_df = FALSE, auto_unbox = FALSE, indent = NA_integer_, ...) {
+
+  # reset na arg when called from data frame
+  if(identical(na, "NA")){
+    na <- oldna;
+  }
+
+  # coerse pairlist if needed
+  if (is.pairlist(x)) {
+    x <- as.vector(x, mode = "list")
+  }
+
+  # empty vector
+  #if (!length(x)) {
+  #  if(collapse) {
+  #    return(if (is.null(names(x))) "[]" else "{}")
+  #  } else {
+  #    return(character())
+  #  }
+  #}
+
+  # this condition appears when a dataframe contains a column with lists we need to
+  # do this, because the [ operator always returns a list of length 1
+  # if (length(x) == 1 && is.null(names(x)) && collapse == FALSE) {
+  #   return(asJSON(x[[1]], ...))
+  # }
+
+  # note we are NOT passing on the container argument.
+  tmp <- if(is_df && auto_unbox){
+    vapply(x, function(y, ...) {
+      asJSON(y, auto_unbox = is.list(y), ...)
+    }, character(1), na = na, indent = indent + 2L, ...)
+  } else {
+    vapply(x, asJSON, character(1), na = na, auto_unbox = auto_unbox, indent = indent + 2L, ...)
+  }
+
+  if (!is.null(names(x))) {
+    if(!collapse){
+      #this should never happen
+      warning("collapse=FALSE called for named list.")
+    }
+    #in case of named list:
+    objnames <- deparse_vector(cleannames(names(x)))
+    collapse_object(objnames, tmp, indent)
+  } else {
+    #in case of unnamed list:
+    if(collapse){
+      collapse(tmp, inner = FALSE, indent)
+    } else {
+      tmp
+    }
+  }
+})
diff --git a/R/asJSON.logical.R b/R/asJSON.logical.R
new file mode 100644
index 0000000..d63a2d4
--- /dev/null
+++ b/R/asJSON.logical.R
@@ -0,0 +1,39 @@
+setMethod("asJSON", "logical", function(x, collapse = TRUE, na = c("null", "string", "NA"),
+  auto_unbox = FALSE, keep_vec_names = FALSE, indent = NA_integer_, ...) {
+
+  # shiny legacy exception
+  if(isTRUE(keep_vec_names) && length(names(x))){
+    warn_keep_vec_names()
+    return(asJSON(as.list(x), collapse = collapse, na = na, auto_unbox = TRUE, ...))
+  }
+
+  # validate arg
+  na <- match.arg(na)
+
+  # json true/false
+  tmp <- ifelse(x, "true", "false")
+
+  # replace missing values, unless na="NA"
+  if(!identical(na, "NA")){
+    # logical values can have NA (but not Inf/NaN). Default is to encode as null.
+    if (any(missings <- which(is.na(x)))) {
+      tmp[missings] <- ifelse(identical(na, "string"), "\"NA\"", "null")
+    }
+  }
+
+  #this is needed when !length(tmp) or all(is.na(tmp))
+  if(!is.character(tmp)){
+    tmp <- as.character(tmp);
+  }
+
+  if(isTRUE(auto_unbox) && length(tmp) == 1){
+    return(tmp);
+  }
+
+  # collapse it
+  if(collapse) {
+    collapse(tmp, indent = indent)
+  } else {
+    tmp
+  }
+})
diff --git a/R/asJSON.numeric.R b/R/asJSON.numeric.R
new file mode 100644
index 0000000..5acbeb3
--- /dev/null
+++ b/R/asJSON.numeric.R
@@ -0,0 +1,43 @@
+setMethod("asJSON", "numeric", function(x, digits = 5, use_signif = is(digits, "AsIs"),
+  na = c("string", "null", "NA"), auto_unbox = FALSE, collapse = TRUE,
+  keep_vec_names = FALSE, indent = NA_integer_, ...) {
+
+  # shiny legacy exception
+  if(isTRUE(keep_vec_names) && length(names(x))){
+    warn_keep_vec_names()
+    return(asJSON(as.list(x), digits = digits, use_signif = use_signif, na = na,
+      auto_unbox = TRUE, collapse = collapse, ...))
+  }
+
+  na <- match.arg(na);
+  na_as_string <- switch(na,
+    "string" = TRUE,
+    "null" = FALSE,
+    "NA" = NA,
+    stop("invalid na_as_string")
+  )
+
+  # old R implementation
+  # tmp <- num_to_char_R(x, digits, na_as_string);
+
+  # fast C implementation
+  tmp <- if(is(x, "integer64")){
+    integer64_to_char(x, na_as_string)
+  } else {
+    num_to_char(x, digits, na_as_string, use_signif);
+  }
+
+  if(isTRUE(auto_unbox) && length(tmp) == 1){
+    return(tmp);
+  }
+
+  if(collapse){
+    collapse(tmp, indent = indent)
+  } else {
+    tmp
+  }
+})
+
+# This is for the bit64 package
+setOldClass("integer64")
+setMethod("asJSON", "integer64", getMethod("asJSON", "numeric"));
diff --git a/R/asJSON.pairlist.R b/R/asJSON.pairlist.R
new file mode 100644
index 0000000..6a4ed5b
--- /dev/null
+++ b/R/asJSON.pairlist.R
@@ -0,0 +1,4 @@
+setOldClass("pairlist")
+setMethod("asJSON", "pairlist", function(x, ...) {
+  asJSON(as.vector(x, mode = "list"), ...)
+})
diff --git a/R/asJSON.raw.R b/R/asJSON.raw.R
new file mode 100644
index 0000000..13acb73
--- /dev/null
+++ b/R/asJSON.raw.R
@@ -0,0 +1,15 @@
+setMethod("asJSON", "raw", function(x, raw = c("base64", "hex", "mongo"), ...) {
+
+  # validate
+  raw <- match.arg(raw)
+
+  # encode based on schema
+  if (raw == "mongo") {
+    return(asJSON(list(`$binary` = as.scalar(base64_encode(x)), `$type` = as.scalar("5"))))
+  } else if (raw == "hex") {
+    return(asJSON(as.character.hexmode(x), ...))
+  } else {
+    # no as scalar here!
+    return(asJSON(base64_encode(x), ...))
+  }
+})
diff --git a/R/asJSON.scalar.R b/R/asJSON.scalar.R
new file mode 100644
index 0000000..16929ef
--- /dev/null
+++ b/R/asJSON.scalar.R
@@ -0,0 +1,12 @@
+setOldClass("scalar")
+setMethod("asJSON", "scalar", function(x, collapse, ...) {
+  # TODO: There must be a way to do this with NextMethod()
+  if (length(class(x)) > 1) {
+    class(x) <- class(x)[-1]
+  } else {
+    x <- unclass(x)
+  }
+
+  # Print JSON without []
+  return(asJSON(x, collapse = FALSE, ...))
+})
diff --git a/R/asJSON.ts.R b/R/asJSON.ts.R
new file mode 100644
index 0000000..4422e1b
--- /dev/null
+++ b/R/asJSON.ts.R
@@ -0,0 +1,5 @@
+#this is a placeholder for something better, hopefully
+#I have no idea what is appropriate for time series
+setMethod("asJSON", "ts", function(x, ...) {
+  asJSON(as.vector(x), ...)
+})
diff --git a/R/base64.R b/R/base64.R
new file mode 100644
index 0000000..7cbdb87
--- /dev/null
+++ b/R/base64.R
@@ -0,0 +1,19 @@
+# These functions have been taken from the base64 package by Francois Romain. It
+# was easier to copy then to import.  They will not be exported
+base64_decode <- function(input) {
+  stopifnot(is.character(input))
+  inputtf <- tempfile()
+  writeLines(input, inputtf)
+  output <- tempfile()
+  invisible(.Call("base64_decode_", inputtf, output))
+  readBin(output, "raw", file.info(output)$size)
+}
+
+base64_encode <- function(input, linesize = 1e+09) {
+  stopifnot(is.raw(input))
+  inputtf <- tempfile()
+  writeBin(input, inputtf)
+  output <- tempfile()
+  invisible(.Call("base64_encode_", inputtf, output, as.integer(linesize)))
+  return(readLines(output))
+}
diff --git a/R/cleannames.R b/R/cleannames.R
new file mode 100644
index 0000000..8c6beb5
--- /dev/null
+++ b/R/cleannames.R
@@ -0,0 +1,6 @@
+cleannames <- function(objnames){
+  objnames[objnames == ""] <- NA_character_
+  is_missing <- is.na(objnames)
+  objnames[is_missing] <- as.character(seq_len(length(objnames)))[is_missing]
+  make.unique(objnames)
+}
diff --git a/R/collapse.R b/R/collapse.R
new file mode 100644
index 0000000..46582b4
--- /dev/null
+++ b/R/collapse.R
@@ -0,0 +1,15 @@
+#' @useDynLib jsonlite C_collapse_array C_collapse_array_pretty_inner C_collapse_array_pretty_outer
+collapse <- function(x, inner = TRUE, indent = 0L) {
+  if(is.na(indent)){
+    .Call(C_collapse_array, x)
+  } else if(isTRUE(inner)){
+    .Call(C_collapse_array_pretty_inner, x, indent)
+  } else {
+    .Call(C_collapse_array_pretty_outer, x, indent)
+  }
+}
+
+#' @useDynLib jsonlite C_row_collapse_array
+row_collapse <- function(m, indent = NA_integer_){
+  .Call(C_row_collapse_array, m, indent = indent)
+}
diff --git a/R/collapse_object.R b/R/collapse_object.R
new file mode 100644
index 0000000..635bb60
--- /dev/null
+++ b/R/collapse_object.R
@@ -0,0 +1,13 @@
+#' @useDynLib jsonlite C_collapse_object C_collapse_object_pretty
+collapse_object <- function(x, y, indent = 0L) {
+  if(is.na(indent)){
+    .Call(C_collapse_object, x, y)
+  } else {
+    .Call(C_collapse_object_pretty, x, y, indent)
+  }
+}
+
+#' @useDynLib jsonlite C_row_collapse_object
+row_collapse_object <- function(x, m, indent = NA_integer_){
+  .Call(C_row_collapse_object, x, m, indent = indent)
+}
diff --git a/R/deparse_vector.R b/R/deparse_vector.R
new file mode 100644
index 0000000..4c874c6
--- /dev/null
+++ b/R/deparse_vector.R
@@ -0,0 +1,27 @@
+#' @useDynLib jsonlite C_escape_chars
+deparse_vector_c <- function(x) {
+  .Call(C_escape_chars, x)
+}
+
+deparse_vector_r <- function(x) {
+  stopifnot(is.character(x))
+  if(!length(x)) return(x)
+  x <- gsub("\\", "\\\\", x, fixed=TRUE)
+  x <- gsub("\"", "\\\"", x, fixed=TRUE)
+  x <- gsub("\n", "\\n", x, fixed=TRUE)
+  x <- gsub("\r", "\\r", x, fixed=TRUE)
+  x <- gsub("\t", "\\t", x, fixed=TRUE)
+  x <- gsub("\b", "\\b", x, fixed=TRUE)
+  x <- gsub("\f", "\\f", x, fixed=TRUE)
+  paste0("\"", x, "\"")
+}
+
+# Which implementation to use
+deparse_vector <- deparse_vector_c
+
+#Below are older implementations of the same function
+deparse_vector_old <- function(x) {
+  stopifnot(is.character(x))
+  x <- gsub("[\v\a]", "", x)
+  vapply(x, deparse, character(1), USE.NAMES=FALSE)
+}
diff --git a/R/fixNativeSymbol.R b/R/fixNativeSymbol.R
new file mode 100644
index 0000000..514e112
--- /dev/null
+++ b/R/fixNativeSymbol.R
@@ -0,0 +1,25 @@
+fixNativeSymbol <- function(symbol) {
+  if (is(symbol, "NativeSymbolInfo")) {
+    # method depends on version
+    rVersion <- getRversion()
+
+    if (rVersion >= "3.0") {
+      # in R 3.0 determine the dll that the symbol lives in
+      name <- ifelse(is.null(symbol$package), symbol$dll[["name"]], symbol$package[["name"]])
+
+      # load package if not yet loaded
+      try(getNamespace(name))
+      pkgDLL <- getLoadedDLLs()[[name]]
+
+      # reconstruct the native symbol address
+      newsymbol <- getNativeSymbolInfo(name = symbol$name, PACKAGE = pkgDLL,
+        withRegistrationInfo = TRUE)
+      symbol$address <- newsymbol$address
+      return(symbol)
+    } else if (rVersion >= "2.14") {
+      return(getNativeSymbolInfo(symbol$name))
+    }
+  } else {
+    return(symbol)
+  }
+}
diff --git a/R/flatten.R b/R/flatten.R
new file mode 100644
index 0000000..57f1f31
--- /dev/null
+++ b/R/flatten.R
@@ -0,0 +1,46 @@
+#' Flatten nested data frames
+#'
+#' In a nested data frame, one or more of the columns consist of another data
+#' frame. These structures frequently appear when parsing JSON data from the web.
+#' We can flatten such data frames into a regular 2 dimensional tabular structure.
+#'
+#' @export
+#' @param x a data frame
+#' @param recursive flatten recursively
+#' @examples options(stringsAsFactors=FALSE)
+#' x <- data.frame(driver = c("Bowser", "Peach"), occupation = c("Koopa", "Princess"))
+#' x$vehicle <- data.frame(model = c("Piranha Prowler", "Royal Racer"))
+#' x$vehicle$stats <- data.frame(speed = c(55, 34), weight = c(67, 24), drift = c(35, 32))
+#' str(x)
+#' str(flatten(x))
+#' str(flatten(x, recursive = FALSE))
+#'
+#' \dontrun{
+#' data1 <- fromJSON("https://api.github.com/users/hadley/repos")
+#' colnames(data1)
+#' colnames(data1$owner)
+#' colnames(flatten(data1))
+#'
+#' # or for short:
+#' data2 <- fromJSON("https://api.github.com/users/hadley/repos", flatten = TRUE)
+#' colnames(data2)
+#' }
+#'
+flatten <- function(x, recursive = TRUE){
+  stopifnot(is.data.frame(x))
+  nr <- nrow(x)
+  dfcolumns <- vapply(x, is.data.frame, logical(1))
+  if(!any(dfcolumns)){
+    return(x)
+  }
+  x <- if(recursive){
+    c(x[!dfcolumns], do.call(c, lapply(x[dfcolumns], flatten)))
+  } else {
+    c(x[!dfcolumns], do.call(c, x[dfcolumns]))
+  }
+  class(x) <- "data.frame"
+  row.names(x) <- if(!nr) character(0) else 1:nr;
+  x
+}
+
+#1,2,3,df1,5,6,7,df2,9
diff --git a/R/fromJSON.R b/R/fromJSON.R
new file mode 100644
index 0000000..5c1ab87
--- /dev/null
+++ b/R/fromJSON.R
@@ -0,0 +1,125 @@
+#' These functions are used to convert between JSON data and \R{} objects. The \code{\link{toJSON}} and \code{\link{fromJSON}}
+#' functions use a class based mapping, which follows conventions outlined in this paper:  \url{http://arxiv.org/abs/1403.2805} (also available as vignette).
+#'
+#' The \code{\link{toJSON}} and \code{\link{fromJSON}} functions are drop-in replacements for the identically named functions
+#' in packages \code{rjson} and \code{RJSONIO}. Our implementation uses an alternative, somewhat more consistent mapping
+#' between \R{} objects and JSON strings.
+#'
+#' The \code{\link{serializeJSON}} and \code{\link{unserializeJSON}} functions in this package use an
+#' alternative system to convert between \R{} objects and JSON, which supports more classes but is much more verbose.
+#'
+#' A JSON string is always unicode, using \code{UTF-8} by default, hence there is usually no need to escape any characters.
+#' However, the JSON format does support escaping of unicode characters, which are encoded using a backslash followed by
+#' a lower case \code{"u"} and 4 hex characters, for example: \code{"Z\\u00FCrich"}. The \code{fromJSON} function
+#' will parse such escape sequences but it is usually preferable to encode unicode characters in JSON using native
+#' \code{UTF-8} rather than escape sequences.
+#
+#' @rdname fromJSON
+#' @title Convert \R{} objects to/from JSON
+#' @name toJSON, fromJSON
+#' @aliases fromJSON toJSON jsonlite
+#' @export fromJSON toJSON
+#' @param txt a JSON string, URL or file
+#' @param simplifyVector coerce JSON arrays containing only primitives into an atomic vector
+#' @param simplifyDataFrame coerce JSON arrays containing only records (JSON objects) into a data frame
+#' @param simplifyMatrix coerce JSON arrays containing vectors of equal mode and dimension into matrix or array
+#' @param flatten automatically \code{\link{flatten}} nested data frames into a single non-nested data frame
+#' @param x the object to be encoded
+#' @param dataframe how to encode data.frame objects: must be one of 'rows', 'columns' or 'values'
+#' @param matrix how to encode matrices and higher dimensional arrays: must be one of 'rowmajor' or 'columnmajor'.
+#' @param Date how to encode Date objects: must be one of 'ISO8601' or 'epoch'
+#' @param POSIXt how to encode POSIXt (datetime) objects: must be one of 'string', 'ISO8601', 'epoch' or 'mongo'
+#' @param factor how to encode factor objects: must be one of 'string' or 'integer'
+#' @param complex how to encode complex numbers: must be one of 'string' or 'list'
+#' @param raw how to encode raw objects: must be one of 'base64', 'hex' or 'mongo'
+#' @param null how to encode NULL values within a list: must be one of 'null' or 'list'
+#' @param na how to print NA values: must be one of 'null' or 'string'. Defaults are class specific
+#' @param auto_unbox automatically \code{\link{unbox}} all atomic vectors of length 1. It is usually safer to avoid this and instead use the \code{\link{unbox}} function to unbox individual elements.
+#'   An exception is that objects of class \code{AsIs} (i.e. wrapped in \code{I()}) are not automatically unboxed. This is a way to mark single values as length-1 arrays.
+#' @param digits max number of decimal digits to print for numeric values. Use \code{I()} to specify significant digits.
+#' @param force unclass/skip objects of classes with no defined JSON mapping
+#' @param pretty adds indentation whitespace to JSON output. Can be TRUE/FALSE or a number specifying the number of spaces to indent. See \code{\link{prettify}}
+#' @param ... arguments passed on to class specific \code{print} methods
+#' @references Jeroen Ooms (2014). The \code{jsonlite} Package: A Practical and Consistent Mapping Between JSON Data and \R{} Objects. \emph{arXiv:1403.2805}. \url{http://arxiv.org/abs/1403.2805}
+#' @examples # Stringify some data
+#' jsoncars <- toJSON(mtcars, pretty=TRUE)
+#' cat(jsoncars)
+#'
+#' # Parse it back
+#' fromJSON(jsoncars)
+#'
+#' # Parse escaped unicode
+#' fromJSON('{"city" : "Z\\u00FCrich"}')
+#'
+#' # Decimal vs significant digits
+#' toJSON(pi, digits=3)
+#' toJSON(pi, digits=I(3))
+#'
+#' \dontrun{retrieve data frame
+#' data1 <- fromJSON("https://api.github.com/users/hadley/orgs")
+#' names(data1)
+#' data1$login
+#'
+#' # Nested data frames:
+#' data2 <- fromJSON("https://api.github.com/users/hadley/repos")
+#' names(data2)
+#' names(data2$owner)
+#' data2$owner$login
+#'
+#' # Flatten the data into a regular non-nested dataframe
+#' names(flatten(data2))
+#'
+#' # Flatten directly (more efficient):
+#' data3 <- fromJSON("https://api.github.com/users/hadley/repos", flatten = TRUE)
+#' identical(data3, flatten(data2))
+#' }
+fromJSON <- function(txt, simplifyVector = TRUE, simplifyDataFrame = simplifyVector,
+  simplifyMatrix = simplifyVector, flatten = FALSE, ...) {
+
+  # check type
+  if (!is.character(txt) && !is(txt, "connection")) {
+    stop("Argument 'txt' must be a JSON string, URL or file.")
+  }
+
+  # overload for URL or path
+  if (is.character(txt) && length(txt) == 1 && nchar(txt, type="bytes") < 10000) {
+    if (grepl("^https?://", txt, useBytes=TRUE)) {
+      loadpkg("curl")
+      h <- curl::new_handle(useragent = paste("jsonlite /", R.version.string))
+      curl::handle_setheaders(h, Accept = "application/json, text/*, */*")
+      txt <- curl::curl(txt, handle = h)
+    } else if (file.exists(txt)) {
+      # With files we can never know for sure the encoding. Lets try UTF8 first.
+      # txt <- raw_to_json(readBin(txt, raw(), file.info(txt)$size));
+      txt <- file(txt)
+    }
+  }
+
+  # call the actual function (with deprecated arguments)
+  fromJSON_string(txt = txt, simplifyVector = simplifyVector, simplifyDataFrame = simplifyDataFrame,
+    simplifyMatrix = simplifyMatrix, flatten = flatten, ...)
+}
+
+fromJSON_string <- function(txt, simplifyVector = TRUE, simplifyDataFrame = simplifyVector,
+  simplifyMatrix = simplifyVector, flatten = FALSE, unicode = TRUE, validate = TRUE, bigint_as_char = FALSE, ...){
+
+  if(!missing(unicode)){
+    message("Argument unicode has been deprecated. YAJL always parses unicode.")
+  }
+
+  if(!missing(validate)){
+    message("Argument validate has been deprecated. YAJL automatically validates json while parsing.")
+  }
+
+  # parse
+  obj <- parseJSON(txt, bigint_as_char)
+
+  # post processing
+  if (any(isTRUE(simplifyVector), isTRUE(simplifyDataFrame), isTRUE(simplifyMatrix))) {
+    return(simplify(obj, simplifyVector = simplifyVector, simplifyDataFrame = simplifyDataFrame,
+      simplifyMatrix = simplifyMatrix, flatten = flatten, ...))
+  } else {
+    return(obj)
+  }
+}
+
diff --git a/R/helpfunctions.R b/R/helpfunctions.R
new file mode 100644
index 0000000..c6893e8
--- /dev/null
+++ b/R/helpfunctions.R
@@ -0,0 +1,29 @@
+# S4 to list object. Not quite sure if this really works in general. You probably
+# shouldn't use S4 instances with JSON anyway because you don't know the class
+# definition.
+
+S4tolist <- function(x) {
+  structure(lapply(slotNames(x), slot, object = x), .Names = slotNames(x))
+}
+
+# ENCODING TOOLS
+
+# opposite of unname: force list into named list to get key/value json encodings
+givename <- function(obj) {
+  return(structure(obj, names = as.character(names(obj))))
+}
+
+# trim whitespace
+trim <- function(x) {
+  gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
+}
+
+# put double quotes around a string
+wrapinquotes <- function(x) {
+  paste("\"", x, "\"", sep = "")
+}
+
+# DECODING TOOLS
+evaltext <- function(text) {
+  return(eval(parse(text = text)))
+}
diff --git a/R/is.recordlist.R b/R/is.recordlist.R
new file mode 100644
index 0000000..feaa38a
--- /dev/null
+++ b/R/is.recordlist.R
@@ -0,0 +1,26 @@
+#' @useDynLib jsonlite C_is_recordlist
+is_recordlist_c <- function(x){
+  .Call(C_is_recordlist, x)
+}
+
+is_recordlist_r <- function(x) {
+  if (!(is.unnamedlist(x) && length(x))) {
+    return(FALSE)
+  }
+  at_least_one_object = FALSE
+  for(i in x){
+    if(!(is.namedlist(i) || is.null(i))) return(FALSE)
+    if(!at_least_one_object && is.namedlist(i)) at_least_one_object <- TRUE
+  }
+  return(at_least_one_object)
+}
+
+is.recordlist <- is_recordlist_c;
+
+is.namedlist <- function(x) {
+  isTRUE(is.list(x) && !is.null(names(x)))
+}
+
+is.unnamedlist <- function(x) {
+  isTRUE(is.list(x) && is.null(names(x)))
+}
diff --git a/R/is.scalarlist.R b/R/is.scalarlist.R
new file mode 100644
index 0000000..a9dd024
--- /dev/null
+++ b/R/is.scalarlist.R
@@ -0,0 +1,14 @@
+is_scalarlist_r <- function(x) {
+  if(!is.list(x)) return(FALSE)
+  for(i in x){
+    if(!is.atomic(i) || length(i) > 1) return(FALSE)
+  }
+  return(TRUE)
+}
+
+#' @useDynLib jsonlite C_is_scalarlist
+is_scalarlist_c <- function(x){
+  .Call(C_is_scalarlist, x)
+}
+
+is.scalarlist <- is_scalarlist_c;
diff --git a/R/list_to_vec.R b/R/list_to_vec.R
new file mode 100644
index 0000000..46a64aa
--- /dev/null
+++ b/R/list_to_vec.R
@@ -0,0 +1,3 @@
+list_to_vec <- function(x) {
+  return(unlist(null_to_na(x), recursive = FALSE, use.names = FALSE))
+}
diff --git a/R/loadpkg.R b/R/loadpkg.R
new file mode 100644
index 0000000..60dc233
--- /dev/null
+++ b/R/loadpkg.R
@@ -0,0 +1,6 @@
+loadpkg <- function(pkg){
+  tryCatch(getNamespace(pkg), error = function(e) {
+    stop("Required package ", pkg, " not found. Please run: install.packages('", pkg, "')",
+      call. = FALSE)
+  })
+}
diff --git a/R/makesymbol.R b/R/makesymbol.R
new file mode 100644
index 0000000..1cefe9b
--- /dev/null
+++ b/R/makesymbol.R
@@ -0,0 +1,9 @@
+# Note: 'symbol' is the same thing as 'name' For some reason, as.name('') gives
+# an error, even though it is needed sometimes. This is a workaround
+makesymbol <- function(x) {
+  if (missing(x) || nchar(x) == 0) {
+    return(substitute())
+  } else {
+    as.name(x)
+  }
+}
diff --git a/R/null_to_na.R b/R/null_to_na.R
new file mode 100644
index 0000000..160f30b
--- /dev/null
+++ b/R/null_to_na.R
@@ -0,0 +1,38 @@
+#' @useDynLib jsonlite C_null_to_na
+null_to_na_c <- function(x) {
+  .Call(C_null_to_na, x)
+}
+
+null_to_na_r <- function(x){
+  if (!length(x)) {
+    return(vector())
+  }
+
+  #Start parsing missing values
+  x2 <- x
+  looks_like_character_vector = FALSE
+  for(i in seq_along(x2)){
+    if(is.character(x2[[i]])){
+      x2[[i]] <- switch(x2[[i]],
+        "NA" = NA,
+        "NaN" = NaN,
+        "Inf" = Inf,
+        "-Inf" = -Inf,
+        {looks_like_character_vector=TRUE; break}
+      )
+    }
+  }
+
+  # Set x
+  if(!looks_like_character_vector){
+    x <- x2
+  }
+
+  # Convert NULL to NA
+  x[vapply(x, is.null, logical(1))] <- NA
+
+  #return
+  return(x)
+}
+
+null_to_na <- null_to_na_c;
diff --git a/R/num_to_char.R b/R/num_to_char.R
new file mode 100644
index 0000000..0be4677
--- /dev/null
+++ b/R/num_to_char.R
@@ -0,0 +1,40 @@
+#' @useDynLib jsonlite R_num_to_char
+num_to_char <- function(x, digits = NA, na_as_string = NA, use_signif = FALSE){
+  if(is.na(digits)) digits <- NA_integer_;
+  stopifnot(is.numeric(x))
+  stopifnot(is.numeric(digits))
+  stopifnot(is.logical(na_as_string))
+  .Call(R_num_to_char, x, digits, na_as_string, use_signif)
+}
+
+#' @useDynLib jsonlite R_integer64_to_char
+integer64_to_char <- function(x, na_as_string = TRUE){
+  .Call(R_integer64_to_char, x, na_as_string)
+}
+
+num_to_char_R <- function(x, digits = NA, na_as_string = NA){
+  if(is.na(digits)) digits <- NA_integer_;
+  stopifnot(is.numeric(x))
+  stopifnot(is.numeric(digits))
+  stopifnot(is.logical(na_as_string))
+  if(!is.integer(x) && !is.null(digits) && !is.na(digits)){
+    x <- round(x, digits)
+  }
+
+  #convert to strings
+  tmp <- as.character(x)
+
+  # in numeric variables, NA, NaN, Inf are replaced by character strings
+  if (any(missings <- which(!is.finite(x)))) {
+    if(is.na(na_as_string)){
+      tmp[missings] <- NA_character_;
+    } else if(na_as_string){
+      tmp[missings] <- wrapinquotes(x[missings])
+    } else {
+      tmp[missings] <- "null"
+    }
+  }
+
+  #returns a character vector
+  return(tmp)
+}
diff --git a/R/pack.R b/R/pack.R
new file mode 100644
index 0000000..3456808
--- /dev/null
+++ b/R/pack.R
@@ -0,0 +1,107 @@
+# Note: For S4, the value is the class defintion. The slots (data) are in the
+# attributes.
+pack <- function(obj, ...) {
+
+  # encode by storage mode
+  encoding.mode <- typeof(obj)
+
+  # needed because formals become attributes, etc
+  if (encoding.mode == "closure") {
+    obj <- as.list(obj)
+  }
+
+  # special exception
+  if (encoding.mode == "environment" && isNamespace(obj)) {
+    encoding.mode <- "namespace"
+  }
+
+  # encode recursively
+  list(
+    type = as.scalar(encoding.mode),
+    attributes = givename(lapply(attributes(obj), pack, ...)),
+    value = switch(encoding.mode,
+      `NULL` = obj,
+      environment = NULL,
+      externalptr = NULL,
+      namespace = lapply(as.list(getNamespaceInfo(obj, "spec")), as.scalar),
+      S4 = list(class = as.scalar(as.character(attr(obj, "class"))), package = as.scalar(attr(attr(obj, "class"), "package"))),
+      raw = as.scalar(base64_encode(unclass(obj))),
+      logical = as.vector(unclass(obj), mode = "logical"),
+      integer = as.vector(unclass(obj), mode = "integer"),
+      numeric = as.vector(unclass(obj), mode = "numeric"),
+      double = as.vector(unclass(obj), mode = "double"),
+      character = as.vector(unclass(obj), mode = "character"),
+      complex = as.vector(unclass(obj), mode = "complex"),
+      list = unname(lapply(unclass(obj), pack, ...)),
+      pairlist = unname(lapply(as.vector(obj, mode = "list"), pack, ...)),
+      closure = unname(lapply(obj, pack, ...)),
+      builtin = as.scalar(base64_encode(serialize(unclass(obj), NULL))),
+      special = as.scalar(base64_encode(serialize(unclass(obj), NULL))),
+      language = deparse(unclass(obj)),
+      name = deparse(unclass(obj)),
+      symbol = deparse(unclass(obj)),
+      expression = deparse(obj[[1]]),
+      warning("No encoding has been defined for objects with storage mode ", encoding.mode, " and will be skipped.")
+    )
+  )
+}
+
+unpack <- function(obj) {
+
+  encoding.mode <- obj$type
+
+  newdata <- c(
+    list(.Data = switch(encoding.mode,
+      `NULL` = NULL,
+      environment = new.env(parent=emptyenv()),
+      namespace = getNamespace(obj$value$name),
+      externalptr = NULL,
+      S4 = getClass(obj$value$class, where = getNamespace(obj$value$package)),
+      raw = base64_decode(obj$value),
+      logical = as.logical(list_to_vec(obj$value)),
+      integer = as.integer(list_to_vec(obj$value)),
+      numeric = as.numeric(list_to_vec(obj$value)),
+      double = as.double(list_to_vec(obj$value)),
+      character = as.character(list_to_vec(obj$value)),
+      complex = as.complex(list_to_vec(obj$value)),
+      list = lapply(obj$value, unpack),
+      pairlist = lapply(obj$value, unpack),
+      symbol = makesymbol(x = unlist(obj$value)),
+      name = makesymbol(x = unlist(obj$value)),
+      expression = parse(text = obj$value),
+      language = as.call(parse(text = unlist(obj$value)))[[1]],
+      special = unserialize(base64_decode(obj$value)),
+      builtin = unserialize(base64_decode(obj$value)),
+      closure = lapply(obj$value, unpack),
+      stop("Switch falling through for encode.mode: ", encoding.mode)
+    )
+  ), lapply(obj$attributes, unpack))
+
+  # this is for serializing functions arguments: as.list(lm)$data
+  if (identical(newdata[[1]], substitute())) {
+    return(substitute())
+  }
+
+  # build the output object
+  output <- do.call("structure", newdata, quote = TRUE)
+
+  # functions are special
+  if (encoding.mode == "closure") {
+    myfn <- as.function(output)
+    environment(myfn) <- globalenv()
+    return(myfn)
+  }
+
+  # functions are special
+  if (encoding.mode == "pairlist") {
+    return(as.pairlist(output))
+  }
+
+  # try to fix native symbols
+  if (is(output, "NativeSymbolInfo")) {
+    try(output <- fixNativeSymbol(output))
+  }
+
+  # return
+  return(output)
+}
diff --git a/R/parseJSON.R b/R/parseJSON.R
new file mode 100644
index 0000000..4cbc1a2
--- /dev/null
+++ b/R/parseJSON.R
@@ -0,0 +1,15 @@
+parseJSON <- function(txt, bigint_as_char = FALSE) {
+  if(is(txt, "connection")){
+    parse_con(txt, 1024^2, bigint_as_char)
+  } else {
+    parse_string(txt, bigint_as_char)
+  }
+}
+
+#' @useDynLib jsonlite R_parse
+parse_string <- function(txt, bigint_as_char){
+  if (length(txt) > 1) {
+    txt <- paste(txt, collapse = "\n")
+  }
+  .Call(R_parse, txt, bigint_as_char)
+}
diff --git a/R/prettify.R b/R/prettify.R
new file mode 100644
index 0000000..ceaa20c
--- /dev/null
+++ b/R/prettify.R
@@ -0,0 +1,33 @@
+#' Prettify adds indentation to a JSON string; minify removes all indentation/whitespace.
+#'
+#' @rdname prettify
+#' @title Prettify or minify a JSON string
+#' @name prettify, minify
+#' @aliases minify prettify
+#' @export prettify minify
+#' @param txt JSON string
+#' @param indent number of spaces to indent
+#' @useDynLib jsonlite R_reformat
+#' @examples myjson <- toJSON(cars)
+#' cat(myjson)
+#' prettify(myjson)
+#' minify(myjson)
+prettify <- function(txt, indent = 4) {
+  txt <- paste(txt, collapse = "\n")
+  reformat(txt, TRUE, indent_string = paste(rep(" ", as.integer(indent)), collapse=""))
+}
+
+#' @rdname prettify
+minify <- function(txt) {
+  txt <- paste(txt, collapse = "\n")
+  reformat(txt, FALSE)
+}
+
+reformat <- function(x, pretty, indent_string = ""){
+  out <- .Call(R_reformat, x, pretty, indent_string = indent_string);
+  if(out[[1]] == 0) {
+    return(out[[2]])
+  } else {
+    stop(out[[2]], call.=FALSE)
+  }
+}
diff --git a/R/print.R b/R/print.R
new file mode 100644
index 0000000..3b1533f
--- /dev/null
+++ b/R/print.R
@@ -0,0 +1,19 @@
+#' @method print json
+#' @export
+print.json <- function(x, ...){
+  cat(x, "\n")
+}
+
+#' @method print scalar
+#' @export
+print.scalar <- function(x, ...){
+  original <- x;
+  class(x) <- class(x)[-1]
+  if(is.data.frame(x)){
+    row.names(x) <- "[x]"
+    print(x)
+  } else {
+    cat("[x] ", asJSON(x, collapse = FALSE), "\n", sep="")
+  }
+  invisible(original)
+}
diff --git a/R/push_parser.R b/R/push_parser.R
new file mode 100644
index 0000000..fa32c29
--- /dev/null
+++ b/R/push_parser.R
@@ -0,0 +1,27 @@
+# Default to 100kb chunks.
+parse_con <- function(con, n , bigint_as_char){
+  stopifnot(is(con, "connection"))
+  if(!isOpen(con)){
+    open(con, "rb")
+    on.exit(close(con))
+  }
+  feed_push_parser(readBin(con, raw(), n), reset = TRUE)
+  while(length(buf <- readBin(con, raw(), n))) {
+    feed_push_parser(buf)
+  }
+  finalize_push_parser(bigint_as_char)
+}
+
+#' @useDynLib jsonlite R_feed_push_parser
+feed_push_parser <- function(data, reset = FALSE){
+  if(is.character(data)){
+    data <- charToRaw(data)
+  }
+  stopifnot(is.raw(data))
+  .Call(R_feed_push_parser, data, reset)
+}
+
+#' @useDynLib jsonlite R_finalize_push_parser
+finalize_push_parser <- function(bigint_as_char){
+  .Call(R_finalize_push_parser, bigint_as_char)
+}
diff --git a/R/raw_to_json.R b/R/raw_to_json.R
new file mode 100644
index 0000000..124f467
--- /dev/null
+++ b/R/raw_to_json.R
@@ -0,0 +1,13 @@
+# This function deals with some uncertainty in character encoding when reading
+# from files and URLs. It tries UTF8 first, but falls back on native if it is
+# certainly not UTF8.
+raw_to_json <- function(x){
+  txt <- rawToChar(x);
+  Encoding(txt) <- "UTF-8"
+  isvalid <- validate(txt)
+  if(!isvalid && grepl("invalid bytes in UTF8", attr(isvalid, "err"), fixed=TRUE, useBytes=TRUE)){
+    warning("The json string is not valid UTF-8. Assuming native encoding.", call. = FALSE)
+    Encoding(txt) <- "";
+  }
+  return(txt)
+}
diff --git a/R/rbind.pages.R b/R/rbind.pages.R
new file mode 100644
index 0000000..bac5db1
--- /dev/null
+++ b/R/rbind.pages.R
@@ -0,0 +1,87 @@
+#' Combine pages into a single data frame
+#'
+#' The \code{rbind.pages} function is used to combine a list of data frames into a single
+#' data frame. This is often needed when working with a JSON API that limits the amount
+#' of data per request. If we need more data than what fits in a single request, we need to
+#' perform multiple requests that each retrieve a fragment of data, not unlike pages in a
+#' book. In practice this is often implemented using a \code{page} parameter in the API. The
+#' \code{rbind.pages} function can be used to combine these pages back into a single dataset.
+#'
+#' The \code{\link{rbind.pages}} function generalizes \code{\link[base:rbind]{base::rbind}} and
+#' \code{\link[plyr:rbind.fill]{plyr::rbind.fill}} with added support for nested data frames. Not each column
+#' has to be present in each of the individual data frames; missing columns will be filled
+#' up in \code{NA} values.
+#'
+#' @export
+#' @param pages a list of data frames, each representing a \emph{page} of data
+#' @examples # Basic example
+#' x <- data.frame(foo = rnorm(3), bar = c(TRUE, FALSE, TRUE))
+#' y <- data.frame(foo = rnorm(2), col = c("blue", "red"))
+#' rbind.pages(list(x, y))
+#'
+#' \dontrun{
+#' baseurl <- "http://projects.propublica.org/nonprofits/api/v1/search.json"
+#' pages <- list()
+#' for(i in 0:20){
+#'   mydata <- fromJSON(paste0(baseurl, "?order=revenue&sort_order=desc&page=", i))
+#'   message("Retrieving page ", i)
+#'   pages[[i+1]] <- mydata$filings
+#' }
+#' filings <- rbind.pages(pages)
+#' nrow(filings)
+#' colnames(filings)
+#' }
+rbind.pages <- function(pages){
+  #Load plyr
+  loadpkg("plyr")
+
+  #validate input
+  stopifnot(is.list(pages))
+
+  # edge case
+  if(!length(pages)){
+    return(data.frame())
+  }
+
+  # All elements must be data frames or NULL.
+  pages <- Filter(function(x) {!is.null(x)}, pages);
+  stopifnot(all(vapply(pages, is.data.frame, logical(1))))
+
+  # Extract data frame column names
+  dfdf <- lapply(pages, vapply, is.data.frame, logical(1))
+  dfnames <- unique(names(which(unlist(dfdf))))
+
+  # No sub data frames
+  if(!length(dfnames)){
+    return(plyr::rbind.fill(pages))
+  }
+
+  # Extract the nested data frames
+  subpages <- lapply(dfnames, function(colname){
+    rbind.pages(lapply(pages, function(df) {
+      if(!is.null(df[[colname]]))
+        df[[colname]]
+      else
+        as.data.frame(matrix(nrow=nrow(df), ncol=0))
+    }))
+  })
+
+  # Remove data frame columns
+  pages <- lapply(pages, function(df){
+    issubdf <- vapply(df, is.data.frame, logical(1))
+    if(any(issubdf))
+      df[issubdf] <- rep(NA, nrow(df))
+    df
+  })
+
+  # Bind rows
+  outdf <- plyr::rbind.fill(pages)
+
+  # Combine wih sub dataframes
+  for(i in seq_along(subpages)){
+    outdf[[dfnames[i]]] <- subpages[[i]]
+  }
+
+  #out
+  outdf
+}
diff --git a/R/serializeJSON.R b/R/serializeJSON.R
new file mode 100644
index 0000000..a89fc91
--- /dev/null
+++ b/R/serializeJSON.R
@@ -0,0 +1,47 @@
+#' The \code{\link{serializeJSON}} and \code{\link{unserializeJSON}} functions convert between
+#' \R{} objects to JSON data. Instead of using a class based mapping like
+#' \code{\link{toJSON}} and \code{\link{fromJSON}}, the serialize functions base the encoding
+#' schema on the storage type, and capture all data and attributes from any object.
+#' Thereby the object can be restored almost perfectly from its JSON representation, but
+#' the resulting JSON output is very verbose. Apart from environments, all standard storage
+#' types are supported.
+#'
+#' @rdname serializeJSON
+#' @title serialize R objects to JSON
+#' @name serializeJSON
+#' @aliases serializeJSON unserializeJSON
+#' @export serializeJSON unserializeJSON
+#' @param x an \R{} object to be serialized
+#' @param digits max number of digits (after the dot) to print for numeric values
+#' @param pretty add indentation/whitespace to JSON output. See \code{\link{prettify}}
+#' @note JSON is a text based format which leads to loss of precision when printing numbers.
+#' @examples jsoncars <- serializeJSON(mtcars)
+#' mtcars2 <- unserializeJSON(jsoncars)
+#' identical(mtcars, mtcars2)
+#'
+#' set.seed('123')
+#' myobject <- list(
+#'   mynull = NULL,
+#'   mycomplex = lapply(eigen(matrix(-rnorm(9),3)), round, 3),
+#'   mymatrix = round(matrix(rnorm(9), 3),3),
+#'   myint = as.integer(c(1,2,3)),
+#'   mydf = cars,
+#'   mylist = list(foo='bar', 123, NA, NULL, list('test')),
+#'   mylogical = c(TRUE,FALSE,NA),
+#'   mychar = c('foo', NA, 'bar'),
+#'   somemissings = c(1,2,NA,NaN,5, Inf, 7 -Inf, 9, NA),
+#'   myrawvec = charToRaw('This is a test')
+#' );
+#' identical(unserializeJSON(serializeJSON(myobject)), myobject);
+serializeJSON <- function(x, digits = 8, pretty = FALSE) {
+  # just to verify that obj exists
+  is(x)
+  # we pass arguments both to asJSON as well as packaging object.
+  asJSON(pack(x), digits = digits, indent = if (isTRUE(pretty)) 0L else NA_integer_)
+}
+
+#' @param txt a JSON string which was created using \code{serializeJSON}
+#' @rdname serializeJSON
+unserializeJSON <- function(txt) {
+  unpack(parseJSON(txt))
+}
diff --git a/R/simplify.R b/R/simplify.R
new file mode 100644
index 0000000..73fad90
--- /dev/null
+++ b/R/simplify.R
@@ -0,0 +1,127 @@
+simplify <- function(x, simplifyVector = TRUE, simplifyDataFrame = TRUE, simplifyMatrix = TRUE,
+  simplifyDate = simplifyVector, homoList = TRUE, flatten = FALSE, columnmajor = FALSE,
+  simplifySubMatrix = simplifyMatrix) {
+
+  #This includes '[]' and '{}')
+  if (!is.list(x) || !length(x)) {
+    return(x)
+  }
+
+  # list can be a dataframe recordlist
+  if (isTRUE(simplifyDataFrame) && is.recordlist(x)) {
+    mydf <- simplifyDataFrame(x, flatten = flatten, simplifyMatrix = simplifySubMatrix)
+    if(isTRUE(simplifyDate) && is.data.frame(mydf) && is.datelist(mydf)){
+      return(structure(mydf[["$date"]]/1000, class=c("POSIXct", "POSIXt")))
+    }
+    return(mydf)
+  }
+
+  # or a scalar list (atomic vector)
+  if (isTRUE(simplifyVector) && is.null(names(x)) && is.scalarlist(x)) {
+    return(list_to_vec(x))
+  }
+
+  # apply recursively
+  out <- lapply(x, simplify, simplifyVector = simplifyVector, simplifyDataFrame = simplifyDataFrame,
+    simplifyMatrix = simplifySubMatrix, columnmajor = columnmajor, flatten = flatten)
+
+  # fix for mongo style dates turning into scalars *after* simplifying
+  # only happens when simplifyDataframe=FALSE
+  if(isTRUE(simplifyVector) && is.scalarlist(out) && all(vapply(out, is, logical(1), "POSIXt"))){
+    return(structure(list_to_vec(out), class=c("POSIXct", "POSIXt")))
+  }
+
+  # test for matrix. Note that we have to take another look at x (before
+  # list_to_vec on its elements) to differentiate between matrix and vector.
+  if (isTRUE(simplifyMatrix) && isTRUE(simplifyVector) && is.matrixlist(out) && all(unlist(vapply(x, is.scalarlist, logical(1))))) {
+    if(isTRUE(columnmajor)){
+      return(do.call(cbind, out))
+    } else {
+      #this is currently the default
+      return(do.call(rbind, out))
+    }
+  }
+
+  # Simplify higher arrays
+  if (isTRUE(simplifyMatrix) && is.arraylist(out)){
+    if(isTRUE(columnmajor)){
+      return(array(
+        data = do.call(cbind, out),
+        dim = c(dim(out[[1]]), length(out))
+      ));
+    } else {
+      #this is currently the default
+      return(array(
+        data = do.call(rbind, lapply(out, as.vector)),
+        dim = c(length(out), dim(out[[1]]))
+      ));
+    }
+  }
+
+  # try to enfoce homoList on unnamed lists
+  if (isTRUE(homoList) && is.null(names(out))) {
+    # coerse empty lists, caused by the ambiguous fromJSON('[]')
+    isemptylist <- vapply(out, identical, logical(1), list())
+    if (any(isemptylist) & !all(isemptylist)) {
+      # if all the others look like data frames, coerse to data frames!
+      if (all(vapply(out[!isemptylist], is.data.frame, logical(1)))) {
+        for (i in which(isemptylist)) {
+        out[[i]] <- data.frame()
+        }
+        return(out)
+      }
+
+      # if all others look like atomic vectors, unlist all
+      if (all(vapply(out[!isemptylist], function(z) {
+        isTRUE(is.vector(z) && is.atomic(z))
+      }, logical(1)))) {
+        for (i in which(isemptylist)) {
+        out[[i]] <- vector(mode = typeof(out[[which(!isemptylist)[1]]]))
+        }
+        return(out)
+      }
+    }
+  }
+
+  # convert date object
+  if( isTRUE(simplifyDate) && is.datelist(out) ){
+    return(structure(out[["$date"]]/1000, class=c("POSIXct", "POSIXt")))
+  }
+
+  # return object
+  return(out)
+}
+
+is.matrixlist <- function(x) {
+  isTRUE(is.list(x)
+    && length(x)
+    && is.null(names(x))
+    && all(vapply(x, is.atomic, logical(1)))
+    && all.identical(vapply(x, length, integer(1)))
+    #&& all.identical(vapply(x, mode, character(1))) #this fails for: [ [ 1, 2 ], [ "NA", "NA" ] ]
+  );
+}
+
+is.arraylist <- function(x) {
+  isTRUE(is.list(x)
+    && length(x)
+    && is.null(names(x))
+    && all(vapply(x, is.array, logical(1)))
+    && all.identical(vapply(x, function(y){paste(dim(y), collapse="-")}, character(1)))
+  );
+}
+
+is.datelist <- function(x){
+  isTRUE(is.list(x)
+     && identical(names(x), "$date")
+     && is.numeric(x[["$date"]])
+  );
+}
+
+all.identical <- function(x){
+  if(!length(x)) return(FALSE)
+  for(i in x){
+    if(x[1] != i) return(FALSE)
+  }
+  return(TRUE)
+}
diff --git a/R/simplifyDataFrame.R b/R/simplifyDataFrame.R
new file mode 100644
index 0000000..a66ad69
--- /dev/null
+++ b/R/simplifyDataFrame.R
@@ -0,0 +1,87 @@
+simplifyDataFrame <- function(recordlist, columns, flatten, simplifyMatrix) {
+
+  # no records at all
+  if (!length(recordlist)) {
+    if (!missing(columns)) {
+      return(as.data.frame(matrix(ncol = length(columns), nrow = 0, dimnames = list(NULL,
+        columns))))
+    } else {
+      return(data.frame())
+    }
+  }
+
+  # only empty records and unknown columns
+  if (!any(vapply(recordlist, length, integer(1), USE.NAMES = FALSE)) && missing(columns)) {
+    return(data.frame(matrix(nrow = length(recordlist), ncol = 0)))
+  }
+
+  # find columns if not specified
+  if (missing(columns)) {
+    columns <- unique(unlist(lapply(recordlist, names), recursive = FALSE, use.names = FALSE))
+  }
+
+  # make new recordlist with requested only requested values
+  #recordlist <- lapply(recordlist, function(x) {
+  #  # a new record with each requested column
+  #  x <- as.list(x)[columns]
+  #  names(x) <- columns
+  #  x
+  #})
+
+  # Convert row lists to column lists. This is the heavy lifting
+  columnlist <- lapply(columns, function(x) lapply(recordlist, "[[", x))
+
+  # simplify vectors and nested data frames
+  columnlist <- lapply(columnlist, simplify, simplifyVector = TRUE, simplifyDataFrame = TRUE,
+    simplifyMatrix = FALSE, simplifySubMatrix = simplifyMatrix, flatten = flatten)
+
+  # check that all elements have equal length
+  columnlengths <- unlist(vapply(columnlist, function(z) {
+    ifelse(length(dim(z)) > 1, nrow(z), length(z))
+  }, integer(1)))
+  n <- unique(columnlengths)
+  if (length(n) > 1) {
+    stop("Elements not of equal length: ", paste(columnlengths, collapse = " "))
+  }
+
+  # add the column names before flattening
+  names(columnlist) <- columns
+
+  # flatten nested data frames
+  if(isTRUE(flatten)) {
+    dfcolumns <- vapply(columnlist, is.data.frame, logical(1))
+    if(any(dfcolumns)){
+      columnlist <- c(columnlist[!dfcolumns], do.call(c, columnlist[dfcolumns]))
+    }
+  }
+
+  # make into data frame
+  class(columnlist) <- "data.frame"
+
+  # set row names
+  if("_row" %in% names(columnlist)) {
+    rn <- columnlist[["_row"]];
+    columnlist["_row"] <- NULL;
+
+    # row.names() casts double to character which is undesired.
+    if(is.double(rn)) {
+      rn <- as.integer(rn);
+    }
+
+    # data frames MUST have row names
+    if(any(duplicated(rn))){
+      warning('Duplicate names in "_row" field. Data frames must have unique row names.', call. = FALSE)
+      if(is.character(rn)) {
+        row.names(columnlist)  <- make.unique(rn)
+      } else {
+        row.names(columnlist) <- seq_len(n)
+      }
+    } else {
+      row.names(columnlist) <- rn;
+    }
+  } else {
+    row.names(columnlist) <- seq_len(n)
+  }
+
+  return(columnlist)
+}
diff --git a/R/stop.R b/R/stop.R
new file mode 100644
index 0000000..b637d5a
--- /dev/null
+++ b/R/stop.R
@@ -0,0 +1,3 @@
+stop <- function(..., call. = FALSE){
+  base::stop(..., call. = FALSE)
+}
diff --git a/R/stream.R b/R/stream.R
new file mode 100644
index 0000000..3b5b72f
--- /dev/null
+++ b/R/stream.R
@@ -0,0 +1,208 @@
+#' Streaming JSON input/output
+#'
+#' The \code{stream_in} and \code{stream_out} functions implement line-by-line processing
+#' of JSON data over a \code{\link{connection}}, such as a socket, url, file or pipe. JSON
+#' streaming requires the \href{http://ndjson.org}{ndjson} format, which slightly differs
+#' from \code{\link{fromJSON}} and \code{\link{toJSON}}, see details.
+#'
+#' Because parsing huge JSON strings is difficult and inefficient, JSON streaming is done
+#' using \strong{lines of minified JSON records}, a.k.a. \href{http://ndjson.org}{ndjson}.
+#' This is pretty standard: JSON databases such as \href{https://github.com/maxogden/dat}{dat}
+#' or MongoDB use the same format to import/export datasets. Note that this means that the
+#' total stream combined is not valid JSON itself; only the individual lines are. Also note
+#' that because line-breaks are used as separators, prettified JSON is not permitted: the
+#' JSON lines \emph{must} be minified. In this respect, the format is a bit different from
+#' \code{\link{fromJSON}} and \code{\link{toJSON}} where all lines are part of a single JSON
+#' structure with optional line breaks.
+#'
+#' The \code{handler} is a callback function which is called for each page (batch) of
+#' JSON data with exactly one argument (usually a data frame with \code{pagesize} rows).
+#' If \code{handler} is missing or \code{NULL}, a default handler is used which stores all
+#' intermediate pages of data, and at the very end binds all pages together into one single
+#' data frame that is returned by \code{stream_in}. When a custom \code{handler} function
+#' is specified, \code{stream_in} does not store any intermediate results and always returns
+#' \code{NULL}. It is then up to the \code{handler} to process or store data pages.
+#' A \code{handler} function that does not store intermediate results in memory (for
+#' example by writing output to another connection) results in a pipeline that can process an
+#' unlimited amount of data. See example.
+#'
+#' If a connection is not opened yet, \code{stream_in} and \code{stream_out}
+#' will automatically open and later close the connection. Because R destroys connections
+#' when they are closed, they cannot be reused. To use a single connection for multiple
+#' calls to \code{stream_in} or \code{stream_out}, it needs to be opened
+#' beforehand. See example.
+#'
+#' @param con a \code{\link{connection}} object. If the connection is not open,
+#' \code{stream_in} and \code{stream_out} will automatically open
+#' and later close (and destroy) the connection. See details.
+#' @param handler a custom function that is called on each page of JSON data. If not specified,
+#' the default handler stores all pages and binds them into a single data frame that will be
+#' returned by \code{stream_in}. See details.
+#' @param x object to be streamed out. Currently only data frames are supported.
+#' @param pagesize number of lines to read/write from/to the connection per iteration.
+#' @param verbose print some information on what is going on.
+#' @param ... arguments for \code{\link{fromJSON}} and \code{\link{toJSON}} that
+#' control JSON formatting/parsing where applicable. Use with caution.
+#' @name stream_in, stream_out
+#' @aliases stream_in stream_out
+#' @export stream_in stream_out
+#' @rdname stream_in
+#' @references MongoDB export format: \url{http://docs.mongodb.org/manual/reference/program/mongoexport/#cmdoption--query}
+#' @references Documentation for the JSON Lines text file format: \url{http://jsonlines.org/}
+#' @return The \code{stream_out} function always returns \code{NULL}.
+#' When no custom handler is specified, \code{stream_in} returns a data frame of all pages binded together.
+#' When a custom handler function is specified, \code{stream_in} always returns \code{NULL}.
+#' @examples # compare formats
+#' x <- iris[1:3,]
+#' toJSON(x)
+#' stream_out(x)
+#'
+#' # Trivial example
+#' mydata <- stream_in(url("http://httpbin.org/stream/100"))
+#'
+#' \dontrun{stream large dataset to file and back
+#' library(nycflights13)
+#' stream_out(flights, file(tmp <- tempfile()))
+#' flights2 <- stream_in(file(tmp))
+#' unlink(tmp)
+#' all.equal(flights2, as.data.frame(flights))
+#'
+#' # stream over HTTP
+#' diamonds2 <- stream_in(url("http://jeroenooms.github.io/data/diamonds.json"))
+#'
+#' # stream over HTTP with gzip compression
+#' flights3 <- stream_in(gzcon(url("http://jeroenooms.github.io/data/nycflights13.json.gz")))
+#' all.equal(flights3, as.data.frame(flights))
+#'
+#' # stream over HTTPS (HTTP+SSL) via curl
+#' library(curl)
+#' flights4 <- stream_in(gzcon(curl("https://jeroenooms.github.io/data/nycflights13.json.gz")))
+#' all.equal(flights4, as.data.frame(flights))
+#'
+#' # or alternatively:
+#' flights5 <- stream_in(gzcon(pipe("curl https://jeroenooms.github.io/data/nycflights13.json.gz")))
+#' all.equal(flights5, as.data.frame(flights))
+#'
+#' # Full JSON IO stream from URL to file connection.
+#' # Calculate delays for flights over 1000 miles in batches of 5k
+#' library(dplyr)
+#' con_in <- gzcon(url("http://jeroenooms.github.io/data/nycflights13.json.gz"))
+#' con_out <- file(tmp <- tempfile(), open = "wb")
+#' stream_in(con_in, handler = function(df){
+#'   df <- dplyr::filter(df, distance > 1000)
+#'   df <- dplyr::mutate(df, delta = dep_delay - arr_delay)
+#'   stream_out(df, con_out, pagesize = 1000)
+#' }, pagesize = 5000)
+#' close(con_out)
+#'
+#' # stream it back in
+#' mydata <- stream_in(file(tmp))
+#' nrow(mydata)
+#' unlink(tmp)
+#'
+#' # Data from http://openweathermap.org/current#bulk
+#' # Each row contains a nested data frame.
+#' daily14 <- stream_in(gzcon(url("http://78.46.48.103/sample/daily_14.json.gz")), pagesize=50)
+#' subset(daily14, city$name == "Berlin")$data[[1]]
+#'
+#' # Or with dplyr:
+#' library(dplyr)
+#' daily14f <- flatten(daily14)
+#' filter(daily14f, city.name == "Berlin")$data[[1]]
+#'
+#' # Stream import large data from zip file
+#' tmp <- tempfile()
+#' download.file("http://jsonstudio.com/wp-content/uploads/2014/02/companies.zip", tmp)
+#' companies <- stream_in(unz(tmp, "companies.json"))
+#' }
+stream_in <- function(con, handler = NULL, pagesize = 500, verbose = TRUE, ...) {
+
+  # Maybe also handle URLs here in future.
+  if(!is(con, "connection")){
+    stop("Argument 'con' must be a connection.")
+  }
+
+  # Same as mongolite
+  count <- 0
+  cb <- if(is.null(handler)){
+    out <- new.env()
+    function(x){
+      if(length(x)){
+        count <<- count + length(x)
+        out[[as.character(count)]] <<- x
+      }
+    }
+  } else {
+    if(verbose)
+      message("using a custom handler function.")
+    function(x){
+      handler(post_process(x))
+      count <<- count + length(x)
+    }
+  }
+
+  if(!isOpen(con, "r")){
+    if(verbose)
+      message("opening ", is(con) ," input connection.")
+
+    # binary connection prevents recoding of utf8 to latin1 on windows
+    open(con, "rb")
+    on.exit({
+      if(verbose)
+        message("closing ", is(con) ," input connection.")
+      close(con)
+    })
+  }
+
+  # Read data page by page
+  repeat {
+    page <- readLines(con, n = pagesize, encoding = "UTF-8")
+    if(length(page)){
+      page <- Filter(nchar, page)
+      cb(lapply(page, parseJSON))
+      if(verbose)
+        cat("\r Found", count, "records...")
+    }
+    if(length(page) < pagesize)
+      break
+  }
+
+  # Either return a big data frame, or nothing.
+  if(is.null(handler)){
+    if(verbose) cat("\r Imported", count, "records. Simplifying into dataframe...\n")
+    out <- as.list(out, sorted = FALSE)
+    post_process(unlist(out[order(as.numeric(names(out)))], FALSE, FALSE))
+  } else {
+    invisible()
+  }
+}
+
+post_process <- function(x){
+  as.data.frame(simplify(x))
+}
+
+#' @rdname stream_in
+stream_out <- function(x, con = stdout(), pagesize = 500, verbose = TRUE, ...) {
+
+  if(!is(con, "connection")){
+    # Maybe handle URLs here in future.
+    stop("Argument 'con' must be a connection.")
+  }
+
+  if(!isOpen(con, "w")){
+    if(verbose) message("opening ", is(con) ," output connection.")
+    open(con, "wb")
+    on.exit({
+      if(verbose) message("closing ", is(con) ," output connection.")
+      close(con)
+    })
+  }
+
+  apply_by_pages(x, stream_out_page, pagesize = pagesize, con = con, verbose = verbose, ...);
+}
+
+stream_out_page <- function(page, con, ...){
+  # useBytes can sometimes prevent recoding of utf8 to latin1 on windows.
+  # on windows there is a bug when useBytes is used with a (non binary) text connection.
+  writeLines(enc2utf8(asJSON(page, collapse = FALSE, ...)), con = con, useBytes = TRUE)
+}
diff --git a/R/toJSON.R b/R/toJSON.R
new file mode 100644
index 0000000..1e49750
--- /dev/null
+++ b/R/toJSON.R
@@ -0,0 +1,47 @@
+#' @rdname fromJSON
+toJSON <- function(x, dataframe = c("rows", "columns", "values"), matrix = c("rowmajor", "columnmajor"),
+  Date = c("ISO8601", "epoch"), POSIXt = c("string", "ISO8601", "epoch", "mongo"),
+  factor = c("string", "integer"), complex = c("string", "list"), raw = c("base64", "hex", "mongo"),
+  null = c("list", "null"), na = c("null", "string"), auto_unbox = FALSE, digits = 4,
+  pretty = FALSE, force = FALSE, ...) {
+
+  # validate args
+  dataframe <- match.arg(dataframe)
+  matrix <- match.arg(matrix)
+  Date <- match.arg(Date)
+  POSIXt <- match.arg(POSIXt)
+  factor <- match.arg(factor)
+  complex <- match.arg(complex)
+  raw <- match.arg(raw)
+  null <- match.arg(null)
+
+  # force
+  x <- force(x)
+
+  # edge case because 'null' in itself is not valid json
+  if(is.null(x)){
+    null <- "list"
+  }
+
+  #this is just to check, we keep method-specific defaults
+  if(!missing(na)){
+    na <- match.arg(na)
+  } else {
+    na <- NULL
+  }
+
+  indent <- if (isTRUE(pretty)) 0L else NA_integer_
+
+  # dispatch
+  ans <- asJSON(x, dataframe = dataframe, Date = Date, POSIXt = POSIXt, factor = factor,
+    complex = complex, raw = raw, matrix = matrix, auto_unbox = auto_unbox, digits = digits,
+    na = na, null = null, force = force, indent = indent, ...)
+
+  #prettify with yajl
+  if(is.numeric(pretty)) {
+    prettify(ans, pretty)
+  } else {
+    class(ans) <- "json"
+    return(ans)
+  }
+}
diff --git a/R/unbox.R b/R/unbox.R
new file mode 100644
index 0000000..763513b
--- /dev/null
+++ b/R/unbox.R
@@ -0,0 +1,52 @@
+#' Unbox a vector or data frame
+#'
+#' This function marks an atomic vector or data frame as a
+#' \href{http://en.wikipedia.org/wiki/Singleton_(mathematics)}{singleton}, i.e.
+#' a set with exactly 1 element. Thereby, the value will not turn into an
+#' \code{array} when encoded into JSON. This can only be done for
+#' atomic vectors of length 1, or data frames with exactly 1 row. To automatically
+#' unbox all vectors of length 1 within an object, use the \code{auto_unbox} argument
+#'in \code{\link{toJSON}}.
+#'
+#' It is usually recommended to avoid this function and stick with the default
+#' encoding schema for the various \R{} classes. The only use case for this function
+#' is if you are bound to some specific predefined JSON structure (e.g. to
+#' submit to an API), which has no natural \R{} representation. Note that the default
+#' encoding for data frames naturally results in a collection of key-value pairs,
+#' without using \code{unbox}.
+#'
+#' @param x atomic vector of length 1, or data frame with 1 row.
+#' @return Returns a singleton version of \code{x}.
+#' @export
+#' @references \url{http://en.wikipedia.org/wiki/Singleton_(mathematics)}
+#' @examples toJSON(list(foo=123))
+#' toJSON(list(foo=unbox(123)))
+#'
+#' # Auto unbox vectors of length one:
+#' x = list(x=1:3, y = 4, z = "foo", k = NULL)
+#' toJSON(x)
+#' toJSON(x, auto_unbox = TRUE)
+#'
+#' x <- iris[1,]
+#' toJSON(list(rec=x))
+#' toJSON(list(rec=unbox(x)))
+unbox <- function(x){
+  if(is.null(x)){
+    return(x)
+  }
+  if(is.data.frame(x)){
+    if(nrow(x) == 1){
+      return(as.scalar(x))
+    } else {
+      stop("Tried to unbox dataframe with ", nrow(x), " rows.")
+    }
+  }
+  if(!is.vector(unclass(x)) || !is.atomic(x) || length(dim(x)) > 1){
+    stop("Only atomic vectors of length 1 or data frames with 1 row can be unboxed.")
+  }
+  if(identical(length(x), 1L)){
+    return(as.scalar(x))
+  } else {
+    stop("Tried to unbox a vector of length ", length(x))
+  }
+}
diff --git a/R/unescape_unicode.R b/R/unescape_unicode.R
new file mode 100644
index 0000000..111f87c
--- /dev/null
+++ b/R/unescape_unicode.R
@@ -0,0 +1,19 @@
+unescape_unicode <- function(x){
+  #single string only
+  stopifnot(is.character(x) && length(x) == 1)
+
+  #find matches
+  m <- gregexpr("(\\\\)+u[0-9a-z]{4}", x, ignore.case = TRUE)
+
+  if(m[[1]][1] > -1){
+    #parse matches
+    p <- vapply(regmatches(x, m)[[1]], function(txt){
+      gsub("\\", "\\\\", parse(text=paste0('"', txt, '"'))[[1]], fixed = TRUE, useBytes = TRUE)
+    }, character(1), USE.NAMES = FALSE)
+
+    #substitute parsed into original
+    regmatches(x, m) <- list(p)
+  }
+
+  x
+}
diff --git a/R/utf8conv.R b/R/utf8conv.R
new file mode 100644
index 0000000..802316f
--- /dev/null
+++ b/R/utf8conv.R
@@ -0,0 +1,3 @@
+utf8conv <- function(x) {
+  gsub("<U\\+([0-9A-F]{4})>","\\\\u\\1",x)
+}
diff --git a/R/validate.R b/R/validate.R
new file mode 100644
index 0000000..e861159
--- /dev/null
+++ b/R/validate.R
@@ -0,0 +1,19 @@
+#' Validate JSON
+#'
+#' Test if a string contains valid JSON. Characters vectors will be collapsed into a single string.
+#'
+#' @param txt JSON string
+#' @export
+#' @useDynLib jsonlite R_validate
+#' @examples #Output from toJSON and serializeJSON should pass validation
+#' myjson <- toJSON(mtcars)
+#' validate(myjson) #TRUE
+#'
+#' #Something bad happened
+#' truncated <- substring(myjson, 1, 100)
+#' validate(truncated) #FALSE
+validate <- function(txt) {
+  stopifnot(is.character(txt))
+  txt <- paste(txt, collapse = "\n")
+  .Call(R_validate, as.character(txt))
+}
diff --git a/R/warn_keep_vec_names.R b/R/warn_keep_vec_names.R
new file mode 100644
index 0000000..db3959d
--- /dev/null
+++ b/R/warn_keep_vec_names.R
@@ -0,0 +1,6 @@
+warn_keep_vec_names <- function() {
+  message("Input to asJSON(keep_vec_names=TRUE) is a named vector. ",
+    "In a future version of jsonlite, this option will not be supported, ",
+    "and named vectors will be translated into arrays instead of objects. ",
+    "If you want JSON object output, please use a named list instead. See ?toJSON.")
+}
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..4d558b7
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100644
index 0000000..35bad7d
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,15 @@
+citHeader("To cite jsonlite in publications use:")
+
+citEntry(entry = "Article",
+  title        = "The jsonlite Package: A Practical and Consistent Mapping Between JSON Data and R Objects",
+  author       = personList(as.person("Jeroen Ooms")),
+  journal      = "arXiv:1403.2805 [stat.CO]",
+  year         = "2014",
+  url          = "http://arxiv.org/abs/1403.2805",
+
+  textVersion  =
+  paste("Jeroen Ooms (2014).",
+        "The jsonlite Package: A Practical and Consistent Mapping Between JSON Data and R Objects.",
+        "arXiv:1403.2805 [stat.CO]",
+        "URL http://arxiv.org/abs/1403.2805.")
+)
diff --git a/inst/doc/json-aaquickstart.R b/inst/doc/json-aaquickstart.R
new file mode 100644
index 0000000..150e526
--- /dev/null
+++ b/inst/doc/json-aaquickstart.R
@@ -0,0 +1,65 @@
+## ----echo=FALSE----------------------------------------------------------
+library(knitr)
+opts_chunk$set(comment="")
+
+#this replaces tabs by spaces because latex-verbatim doesn't like tabs
+#no longer needed because yajl does not use tabs.
+#toJSON <- function(...){
+#  gsub("\t", "  ", jsonlite::toJSON(...), fixed=TRUE);
+#}
+
+## ----message=FALSE-------------------------------------------------------
+library(jsonlite)
+all.equal(mtcars, fromJSON(toJSON(mtcars)))
+
+## ------------------------------------------------------------------------
+# A JSON array of primitives
+json <- '["Mario", "Peach", null, "Bowser"]'
+
+# Simplifies into an atomic vector
+fromJSON(json)
+
+## ------------------------------------------------------------------------
+# No simplification:
+fromJSON(json, simplifyVector = FALSE)
+
+## ------------------------------------------------------------------------
+json <-
+'[
+  {"Name" : "Mario", "Age" : 32, "Occupation" : "Plumber"}, 
+  {"Name" : "Peach", "Age" : 21, "Occupation" : "Princess"},
+  {},
+  {"Name" : "Bowser", "Occupation" : "Koopa"}
+]'
+mydf <- fromJSON(json)
+mydf
+
+## ------------------------------------------------------------------------
+mydf$Ranking <- c(3, 1, 2, 4)
+toJSON(mydf, pretty=TRUE)
+
+## ------------------------------------------------------------------------
+json <- '[
+  [1, 2, 3, 4],
+  [5, 6, 7, 8],
+  [9, 10, 11, 12]
+]'
+mymatrix <- fromJSON(json)
+mymatrix
+
+## ------------------------------------------------------------------------
+toJSON(mymatrix, pretty = TRUE)
+
+## ------------------------------------------------------------------------
+json <- '[
+   [[1, 2], 
+    [3, 4]],
+   [[5, 6], 
+    [7, 8]],
+   [[9, 10],
+    [11, 12]]
+]'
+myarray <- fromJSON(json)
+myarray[1, , ]
+myarray[ , ,1]
+
diff --git a/inst/doc/json-aaquickstart.Rmd b/inst/doc/json-aaquickstart.Rmd
new file mode 100644
index 0000000..7724c27
--- /dev/null
+++ b/inst/doc/json-aaquickstart.Rmd
@@ -0,0 +1,126 @@
+---
+Title: "Getting started with JSON and jsonlite"
+date: "`r Sys.Date()`"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Getting started with JSON and jsonlite}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+
+```{r echo=FALSE}
+library(knitr)
+opts_chunk$set(comment="")
+
+#this replaces tabs by spaces because latex-verbatim doesn't like tabs
+#no longer needed because yajl does not use tabs.
+#toJSON <- function(...){
+#  gsub("\t", "  ", jsonlite::toJSON(...), fixed=TRUE);
+#}
+```
+
+# Getting started with JSON and jsonlite
+
+The jsonlite package is a JSON parser/generator optimized for the web. Its main strength is that it implements a bidirectional mapping between JSON data and the most important R data types. Thereby we can convert between R objects and JSON without loss of type or information, and without the need for any manual data munging. This is ideal for interacting with web APIs, or to build pipelines where data structures seamlessly flow in and out of R using JSON.
+
+```{r message=FALSE}
+library(jsonlite)
+all.equal(mtcars, fromJSON(toJSON(mtcars)))
+```
+
+This vignette introduces basic concepts to get started with jsonlite. For a more detailed outline and motivation of the mapping, see: [arXiv:1403.2805](http://arxiv.org/abs/1403.2805).
+
+## Simplification
+
+Simplification is the process where JSON arrays automatically get converted from a list into a more specific R class. The `fromJSON` function has 3 arguments which control the simplification process: `simplifyVector`, `simplifyDataFrame` and `simplifyMatrix`. Each one is enabled by default.
+
+| JSON structure        | Example JSON data                                        | Simplifies to R class | Argument in fromJSON | 
+| ----------------------|----------------------------------------------------------|-----------------------|----------------------|
+| Array of primitives   | `["Amsterdam", "Rotterdam", "Utrecht", "Den Haag"]`      | Atomic Vector         | simplifyVector       | 
+| Array of objects      | `[{"name":"Erik", "age":43}, {"name":"Anna", "age":32}]` | Data Frame            | simplifyDataFrame    | 
+| Array of arrays       | `[ [1, 2, 3], [4, 5, 6] ]`                               | Matrix                | simplifyMatrix       |
+
+### Atomic Vectors
+
+When `simplifyVector` is enabled, JSON arrays containing **primitives** (strings, numbers, booleans or null) simplify into an atomic vector:
+
+```{r}
+# A JSON array of primitives
+json <- '["Mario", "Peach", null, "Bowser"]'
+
+# Simplifies into an atomic vector
+fromJSON(json)
+```
+
+Without simplification, any JSON array turns into a list: 
+
+```{r}
+# No simplification:
+fromJSON(json, simplifyVector = FALSE)
+```
+
+
+### Data Frames
+
+When `simplifyDataFrame` is enabled, JSON arrays containing **objects** (key-value pairs) simplify into a data frame:
+
+```{r}
+json <-
+'[
+  {"Name" : "Mario", "Age" : 32, "Occupation" : "Plumber"}, 
+  {"Name" : "Peach", "Age" : 21, "Occupation" : "Princess"},
+  {},
+  {"Name" : "Bowser", "Occupation" : "Koopa"}
+]'
+mydf <- fromJSON(json)
+mydf
+```
+
+The data frame gets converted back into the original JSON structure by `toJSON` (whitespace and line breaks are ignorable in JSON).
+
+```{r}
+mydf$Ranking <- c(3, 1, 2, 4)
+toJSON(mydf, pretty=TRUE)
+```
+
+Hence you can go back and forth between dataframes and JSON, without any manual data restructuring.
+
+### Matrices and Arrays
+
+When `simplifyMatrix` is enabled, JSON arrays containing **equal-length sub-arrays** simplify into a matrix (or higher order R array):
+
+```{r}
+json <- '[
+  [1, 2, 3, 4],
+  [5, 6, 7, 8],
+  [9, 10, 11, 12]
+]'
+mymatrix <- fromJSON(json)
+mymatrix
+```
+
+Again, we can use `toJSON` to convert the matrix or array back into the original JSON structure:
+
+```{r}
+toJSON(mymatrix, pretty = TRUE)
+```
+
+The simplification works for arrays of arbitrary dimensionality, as long as the dimensions match (R does not support ragged arrays).
+
+```{r}
+json <- '[
+   [[1, 2], 
+    [3, 4]],
+   [[5, 6], 
+    [7, 8]],
+   [[9, 10],
+    [11, 12]]
+]'
+myarray <- fromJSON(json)
+myarray[1, , ]
+myarray[ , ,1]
+```
+
+This is all there is to it! For a more detailed outline and motivation of the mapping, see: [arXiv:1403.2805](http://arxiv.org/abs/1403.2805).
diff --git a/inst/doc/json-aaquickstart.html b/inst/doc/json-aaquickstart.html
new file mode 100644
index 0000000..4ec5fbb
--- /dev/null
+++ b/inst/doc/json-aaquickstart.html
@@ -0,0 +1,241 @@
+<!DOCTYPE html>
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+<head>
+
+<meta charset="utf-8">
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="pandoc" />
+
+
+<meta name="date" content="2015-09-06" />
+
+<title></title>
+
+<script src="data:application/x-javascript,%2F%2A%21%20jQuery%20v1%2E11%2E0%20%7C%20%28c%29%202005%2C%202014%20jQuery%20Foundation%2C%20Inc%2E%20%7C%20jquery%2Eorg%2Flicense%20%2A%2F%0A%21function%28a%2Cb%29%7B%22object%22%3D%3Dtypeof%20module%26%26%22object%22%3D%3Dtypeof%20module%2Eexports%3Fmodule%2Eexports%3Da%2Edocument%3Fb%28a%2C%210%29%3Afunction%28a%29%7Bif%28%21a%2Edocument%29throw%20new%20Error%28%22jQuery%20requires%20a%20window%20with%20a%20document%22%29%3Breturn%20b%28a%29% [...]
+<meta name="viewport" content="width=device-width, initial-scale=1" />
+<link href="data:text/css,%2F%2A%21%0A%20%2A%20Bootstrap%20v3%2E3%2E1%20%28http%3A%2F%2Fgetbootstrap%2Ecom%29%0A%20%2A%20Copyright%202011%2D2014%20Twitter%2C%20Inc%2E%0A%20%2A%20Licensed%20under%20MIT%20%28https%3A%2F%2Fgithub%2Ecom%2Ftwbs%2Fbootstrap%2Fblob%2Fmaster%2FLICENSE%29%0A%20%2A%2F%2F%2A%21%20normalize%2Ecss%20v3%2E0%2E2%20%7C%20MIT%20License%20%7C%20git%2Eio%2Fnormalize%20%2A%2Fhtml%7Bfont%2Dfamily%3Asans%2Dserif%3B%2Dwebkit%2Dtext%2Dsize%2Dadjust%3A100%25%3B%2Dms%2Dtext%2Dsiz [...]
+<script src="data:application/x-javascript,%2F%2A%21%0A%20%2A%20Bootstrap%20v3%2E3%2E1%20%28http%3A%2F%2Fgetbootstrap%2Ecom%29%0A%20%2A%20Copyright%202011%2D2014%20Twitter%2C%20Inc%2E%0A%20%2A%20Licensed%20under%20MIT%20%28https%3A%2F%2Fgithub%2Ecom%2Ftwbs%2Fbootstrap%2Fblob%2Fmaster%2FLICENSE%29%0A%20%2A%2F%0Aif%28%22undefined%22%3D%3Dtypeof%20jQuery%29throw%20new%20Error%28%22Bootstrap%27s%20JavaScript%20requires%20jQuery%22%29%3B%2Bfunction%28a%29%7Bvar%20b%3Da%2Efn%2Ejquery%2Esplit%2 [...]
+<script src="data:application/x-javascript,%2F%2A%2A%0A%2A%20%40preserve%20HTML5%20Shiv%203%2E7%2E2%20%7C%20%40afarkas%20%40jdalton%20%40jon%5Fneal%20%40rem%20%7C%20MIT%2FGPL2%20Licensed%0A%2A%2F%0A%2F%2F%20Only%20run%20this%20code%20in%20IE%208%0Aif%20%28%21%21window%2Enavigator%2EuserAgent%2Ematch%28%22MSIE%208%22%29%29%20%7B%0A%21function%28a%2Cb%29%7Bfunction%20c%28a%2Cb%29%7Bvar%20c%3Da%2EcreateElement%28%22p%22%29%2Cd%3Da%2EgetElementsByTagName%28%22head%22%29%5B0%5D%7C%7Ca%2Edocum [...]
+<script src="data:application/x-javascript,%2F%2A%21%20Respond%2Ejs%20v1%2E4%2E2%3A%20min%2Fmax%2Dwidth%20media%20query%20polyfill%20%2A%20Copyright%202013%20Scott%20Jehl%0A%20%2A%20Licensed%20under%20https%3A%2F%2Fgithub%2Ecom%2Fscottjehl%2FRespond%2Fblob%2Fmaster%2FLICENSE%2DMIT%0A%20%2A%20%20%2A%2F%0A%0Aif%20%28%21%21window%2Enavigator%2EuserAgent%2Ematch%28%22MSIE%208%22%29%29%20%7B%0A%21function%28a%29%7B%22use%20strict%22%3Ba%2EmatchMedia%3Da%2EmatchMedia%7C%7Cfunction%28a%29%7Bvar [...]
+
+<style type="text/css">code{white-space: pre;}</style>
+<link href="data:text/css,pre%20%2Eoperator%2C%0Apre%20%2Eparen%20%7B%0A%20color%3A%20rgb%28104%2C%20118%2C%20135%29%0A%7D%0A%0Apre%20%2Eliteral%20%7B%0A%20color%3A%20%23990073%0A%7D%0A%0Apre%20%2Enumber%20%7B%0A%20color%3A%20%23099%3B%0A%7D%0A%0Apre%20%2Ecomment%20%7B%0A%20color%3A%20%23998%3B%0A%20font%2Dstyle%3A%20italic%0A%7D%0A%0Apre%20%2Ekeyword%20%7B%0A%20color%3A%20%23900%3B%0A%20font%2Dweight%3A%20bold%0A%7D%0A%0Apre%20%2Eidentifier%20%7B%0A%20color%3A%20rgb%280%2C%200%2C%200%29 [...]
+<script src="data:application/x-javascript,%0Avar%20hljs%3Dnew%20function%28%29%7Bfunction%20m%28p%29%7Breturn%20p%2Ereplace%28%2F%26%2Fgm%2C%22%26amp%3B%22%29%2Ereplace%28%2F%3C%2Fgm%2C%22%26lt%3B%22%29%7Dfunction%20f%28r%2Cq%2Cp%29%7Breturn%20RegExp%28q%2C%22m%22%2B%28r%2EcI%3F%22i%22%3A%22%22%29%2B%28p%3F%22g%22%3A%22%22%29%29%7Dfunction%20b%28r%29%7Bfor%28var%20p%3D0%3Bp%3Cr%2EchildNodes%2Elength%3Bp%2B%2B%29%7Bvar%20q%3Dr%2EchildNodes%5Bp%5D%3Bif%28q%2EnodeName%3D%3D%22CODE%22%29%7B [...]
+<style type="text/css">
+  pre:not([class]) {
+    background-color: white;
+  }
+</style>
+<script type="text/javascript">
+if (window.hljs && document.readyState && document.readyState === "complete") {
+   window.setTimeout(function() {
+      hljs.initHighlighting();
+   }, 0);
+}
+</script>
+
+
+
+</head>
+
+<body>
+
+<style type="text/css">
+.main-container {
+  max-width: 940px;
+  margin-left: auto;
+  margin-right: auto;
+}
+code {
+  color: inherit;
+  background-color: rgba(0, 0, 0, 0.04);
+}
+img { 
+  max-width:100%; 
+  height: auto; 
+}
+</style>
+<div class="container-fluid main-container">
+
+
+
+
+<div id="getting-started-with-json-and-jsonlite" class="section level1">
+<h1>Getting started with JSON and jsonlite</h1>
+<p>The jsonlite package is a JSON parser/generator optimized for the web. Its main strength is that it implements a bidirectional mapping between JSON data and the most important R data types. Thereby we can convert between R objects and JSON without loss of type or information, and without the need for any manual data munging. This is ideal for interacting with web APIs, or to build pipelines where data structures seamlessly flow in and out of R using JSON.</p>
+<pre class="r"><code>library(jsonlite)
+all.equal(mtcars, fromJSON(toJSON(mtcars)))</code></pre>
+<pre><code>[1] TRUE</code></pre>
+<p>This vignette introduces basic concepts to get started with jsonlite. For a more detailed outline and motivation of the mapping, see: <a href="http://arxiv.org/abs/1403.2805">arXiv:1403.2805</a>.</p>
+<div id="simplification" class="section level2">
+<h2>Simplification</h2>
+<p>Simplification is the process where JSON arrays automatically get converted from a list into a more specific R class. The <code>fromJSON</code> function has 3 arguments which control the simplification process: <code>simplifyVector</code>, <code>simplifyDataFrame</code> and <code>simplifyMatrix</code>. Each one is enabled by default.</p>
+<table>
+<thead>
+<tr class="header">
+<th align="left">JSON structure</th>
+<th align="left">Example JSON data</th>
+<th align="left">Simplifies to R class</th>
+<th align="left">Argument in fromJSON</th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td align="left">Array of primitives</td>
+<td align="left"><code>["Amsterdam", "Rotterdam", "Utrecht", "Den Haag"]</code></td>
+<td align="left">Atomic Vector</td>
+<td align="left">simplifyVector</td>
+</tr>
+<tr class="even">
+<td align="left">Array of objects</td>
+<td align="left"><code>[{"name":"Erik", "age":43}, {"name":"Anna", "age":32}]</code></td>
+<td align="left">Data Frame</td>
+<td align="left">simplifyDataFrame</td>
+</tr>
+<tr class="odd">
+<td align="left">Array of arrays</td>
+<td align="left"><code>[ [1, 2, 3], [4, 5, 6] ]</code></td>
+<td align="left">Matrix</td>
+<td align="left">simplifyMatrix</td>
+</tr>
+</tbody>
+</table>
+<div id="atomic-vectors" class="section level3">
+<h3>Atomic Vectors</h3>
+<p>When <code>simplifyVector</code> is enabled, JSON arrays containing <strong>primitives</strong> (strings, numbers, booleans or null) simplify into an atomic vector:</p>
+<pre class="r"><code># A JSON array of primitives
+json <- '["Mario", "Peach", null, "Bowser"]'
+
+# Simplifies into an atomic vector
+fromJSON(json)</code></pre>
+<pre><code>[1] "Mario"  "Peach"  NA       "Bowser"</code></pre>
+<p>Without simplification, any JSON array turns into a list:</p>
+<pre class="r"><code># No simplification:
+fromJSON(json, simplifyVector = FALSE)</code></pre>
+<pre><code>[[1]]
+[1] "Mario"
+
+[[2]]
+[1] "Peach"
+
+[[3]]
+NULL
+
+[[4]]
+[1] "Bowser"</code></pre>
+</div>
+<div id="data-frames" class="section level3">
+<h3>Data Frames</h3>
+<p>When <code>simplifyDataFrame</code> is enabled, JSON arrays containing <strong>objects</strong> (key-value pairs) simplify into a data frame:</p>
+<pre class="r"><code>json <-
+'[
+  {"Name" : "Mario", "Age" : 32, "Occupation" : "Plumber"}, 
+  {"Name" : "Peach", "Age" : 21, "Occupation" : "Princess"},
+  {},
+  {"Name" : "Bowser", "Occupation" : "Koopa"}
+]'
+mydf <- fromJSON(json)
+mydf</code></pre>
+<pre><code>    Name Age Occupation
+1  Mario  32    Plumber
+2  Peach  21   Princess
+3   <NA>  NA       <NA>
+4 Bowser  NA      Koopa</code></pre>
+<p>The data frame gets converted back into the original JSON structure by <code>toJSON</code> (whitespace and line breaks are ignorable in JSON).</p>
+<pre class="r"><code>mydf$Ranking <- c(3, 1, 2, 4)
+toJSON(mydf, pretty=TRUE)</code></pre>
+<pre><code>[
+  {
+    "Name": "Mario",
+    "Age": 32,
+    "Occupation": "Plumber",
+    "Ranking": 3
+  },
+  {
+    "Name": "Peach",
+    "Age": 21,
+    "Occupation": "Princess",
+    "Ranking": 1
+  },
+  {
+    "Ranking": 2
+  },
+  {
+    "Name": "Bowser",
+    "Occupation": "Koopa",
+    "Ranking": 4
+  }
+] </code></pre>
+<p>Hence you can go back and forth between dataframes and JSON, without any manual data restructuring.</p>
+</div>
+<div id="matrices-and-arrays" class="section level3">
+<h3>Matrices and Arrays</h3>
+<p>When <code>simplifyMatrix</code> is enabled, JSON arrays containing <strong>equal-length sub-arrays</strong> simplify into a matrix (or higher order R array):</p>
+<pre class="r"><code>json <- '[
+  [1, 2, 3, 4],
+  [5, 6, 7, 8],
+  [9, 10, 11, 12]
+]'
+mymatrix <- fromJSON(json)
+mymatrix</code></pre>
+<pre><code>     [,1] [,2] [,3] [,4]
+[1,]    1    2    3    4
+[2,]    5    6    7    8
+[3,]    9   10   11   12</code></pre>
+<p>Again, we can use <code>toJSON</code> to convert the matrix or array back into the original JSON structure:</p>
+<pre class="r"><code>toJSON(mymatrix, pretty = TRUE)</code></pre>
+<pre><code>[
+  [1, 2, 3, 4],
+  [5, 6, 7, 8],
+  [9, 10, 11, 12]
+] </code></pre>
+<p>The simplification works for arrays of arbitrary dimensionality, as long as the dimensions match (R does not support ragged arrays).</p>
+<pre class="r"><code>json <- '[
+   [[1, 2], 
+    [3, 4]],
+   [[5, 6], 
+    [7, 8]],
+   [[9, 10],
+    [11, 12]]
+]'
+myarray <- fromJSON(json)
+myarray[1, , ]</code></pre>
+<pre><code>     [,1] [,2]
+[1,]    1    2
+[2,]    3    4</code></pre>
+<pre class="r"><code>myarray[ , ,1]</code></pre>
+<pre><code>     [,1] [,2]
+[1,]    1    3
+[2,]    5    7
+[3,]    9   11</code></pre>
+<p>This is all there is to it! For a more detailed outline and motivation of the mapping, see: <a href="http://arxiv.org/abs/1403.2805">arXiv:1403.2805</a>.</p>
+</div>
+</div>
+</div>
+
+
+</div>
+
+<script>
+
+// add bootstrap table styles to pandoc tables
+$(document).ready(function () {
+  $('tr.header').parent('thead').parent('table').addClass('table table-condensed');
+});
+
+</script>
+
+<!-- dynamically load mathjax for compatibility with self-contained -->
+<script>
+  (function () {
+    var script = document.createElement("script");
+    script.type = "text/javascript";
+    script.src  = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
+    document.getElementsByTagName("head")[0].appendChild(script);
+  })();
+</script>
+
+</body>
+</html>
diff --git a/inst/doc/json-apis.Rmd b/inst/doc/json-apis.Rmd
new file mode 100644
index 0000000..c55d0b9
--- /dev/null
+++ b/inst/doc/json-apis.Rmd
@@ -0,0 +1,376 @@
+---
+title: "Fetching JSON data from REST APIs"
+date: "2015-09-06"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Fetching JSON data from REST APIs}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+
+
+This section lists some examples of public HTTP APIs that publish data in JSON format. These are great to get a sense of the complex structures that are encountered in real world JSON data. All services are free, but some require registration/authentication. Each example returns lots of data, therefore not all output is printed in this document.
+
+
+```r
+library(jsonlite)
+```
+
+## Github
+
+Github is an online code repository and has APIs to get live data on almost all activity. Below some examples from a well known R package and author:
+
+
+```r
+hadley_orgs <- fromJSON("https://api.github.com/users/hadley/orgs")
+hadley_repos <- fromJSON("https://api.github.com/users/hadley/repos")
+gg_commits <- fromJSON("https://api.github.com/repos/hadley/ggplot2/commits")
+gg_issues <- fromJSON("https://api.github.com/repos/hadley/ggplot2/issues")
+
+#latest issues
+paste(format(gg_issues$user$login), ":", gg_issues$title)
+```
+
+```
+ [1] "idavydov     : annotate(\"segment\") wrong position if limits are inverted"                      
+ [2] "ben519       : geom_polygon doesn't make NA values grey when using continuous fill"              
+ [3] "has2k1       : Fix multiple tiny issues in the position classes"                                 
+ [4] "neggert      : Problem with geom_bar position=fill and faceting"                                 
+ [5] "robertzk     : Fix typo in geom_linerange docs."                                                 
+ [6] "lionel-      : stat_bar() gets confused with numeric discrete data?"                             
+ [7] "daattali     : Request: support theme axis.ticks.length.x and axis.ticks.length.y"               
+ [8] "sethchandler : Documentation error on %+replace% ?"                                              
+ [9] "daattali     : dev version 1.0.1.9003 has some breaking changes"                                 
+[10] "lionel-      : Labels"                                                                           
+[11] "nutterb      : legend for `geom_line` colour disappears when `alpha` < 1.0"                      
+[12] "wch          : scale_name property should be removed from Scale objects"                         
+[13] "wch          : scale_details arguments in Coords should be renamed panel_scales or scale"        
+[14] "wch          : ScalesList-related functions should be moved into ggproto object"                 
+[15] "wch          : update_geom_defaults and update_stat_defaults should accept Geom and Stat objects"
+[16] "wch          : Make some ggproto objects immutable. Closes #1237"                                
+[17] "and3k        : Control size of the border and padding of geom_label"                             
+[18] "hadley       : Consistent argument order and formatting for layer functions"                     
+[19] "hadley       : Consistently handle missing values"                                               
+[20] "cmohamma     : fortify causes fatal error"                                                       
+[21] "lionel-      : Flawed `label_bquote()` implementation"                                           
+[22] "beroe        : Create alias for `colors=` in `scale_color_gradientn()`"                          
+[23] "and3k        : hjust broken in y facets"                                                         
+[24] "joranE       : Allow color bar guides for alpha scales"                                          
+[25] "hadley       : dir = \"v\" also needs to swap nrow and ncol"                                     
+[26] "joranE       : Add examples for removing guides"                                                 
+[27] "lionel-      : New approach for horizontal layers"                                               
+[28] "bbolker      : add horizontal linerange geom"                                                    
+[29] "hadley       : Write vignette about grid"                                                        
+[30] "hadley       : Immutable flag for ggproto objects"                                               
+```
+
+## CitiBike NYC
+
+A single public API that shows location, status and current availability for all stations in the New York City bike sharing imitative.
+
+
+```r
+citibike <- fromJSON("http://citibikenyc.com/stations/json")
+stations <- citibike$stationBeanList
+colnames(stations)
+```
+
+```
+ [1] "id"                    "stationName"          
+ [3] "availableDocks"        "totalDocks"           
+ [5] "latitude"              "longitude"            
+ [7] "statusValue"           "statusKey"            
+ [9] "availableBikes"        "stAddress1"           
+[11] "stAddress2"            "city"                 
+[13] "postalCode"            "location"             
+[15] "altitude"              "testStation"          
+[17] "lastCommunicationTime" "landMark"             
+```
+
+```r
+nrow(stations)
+```
+
+```
+[1] 509
+```
+
+## Ergast
+
+The Ergast Developer API is an experimental web service which provides a historical record of motor racing data for non-commercial purposes.
+
+
+```r
+res <- fromJSON('http://ergast.com/api/f1/2004/1/results.json')
+drivers <- res$MRData$RaceTable$Races$Results[[1]]$Driver
+colnames(drivers)
+```
+
+```
+[1] "driverId"        "code"            "url"             "givenName"      
+[5] "familyName"      "dateOfBirth"     "nationality"     "permanentNumber"
+```
+
+```r
+drivers[1:10, c("givenName", "familyName", "code", "nationality")]
+```
+
+```
+   givenName    familyName code nationality
+1    Michael    Schumacher  MSC      German
+2     Rubens   Barrichello  BAR   Brazilian
+3   Fernando        Alonso  ALO     Spanish
+4       Ralf    Schumacher  SCH      German
+5       Juan Pablo Montoya  MON   Colombian
+6     Jenson        Button  BUT     British
+7      Jarno        Trulli  TRU     Italian
+8      David     Coulthard  COU     British
+9     Takuma          Sato  SAT    Japanese
+10 Giancarlo    Fisichella  FIS     Italian
+```
+
+
+## ProPublica
+
+Below an example from the [ProPublica Nonprofit Explorer API](http://projects.propublica.org/nonprofits/api) where we retrieve the first 10 pages of tax-exempt organizations in the USA, ordered by revenue. The `rbind.pages` function is used to combine the pages into a single data frame.
+
+
+
+```r
+#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:10){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i), flatten=TRUE)
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)
+```
+
+```
+[1] 275
+```
+
+```r
+filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]
+```
+
+```
+                              organization.sub_name organization.city
+1                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+2                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+3                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+4  DAVIDSON COUNTY COMMUNITY COLLEGE FOUNDATION INC         LEXINGTON
+5                       KAISER FOUNDATION HOSPITALS           OAKLAND
+6                       KAISER FOUNDATION HOSPITALS           OAKLAND
+7                       KAISER FOUNDATION HOSPITALS           OAKLAND
+8                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+9                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+10                  PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+    totrevenue
+1  42346486950
+2  40148558254
+3  37786011714
+4  30821445312
+5  20013171194
+6  18543043972
+7  17980030355
+8  10619215354
+9  10452560305
+10  9636630380
+```
+
+
+## New York Times
+
+The New York Times has several APIs as part of the NYT developer network. These interface to data from various departments, such as news articles, book reviews, real estate, etc. Registration is required (but free) and a key can be obtained at [here](http://developer.nytimes.com/docs/reference/keys). The code below includes some example keys for illustration purposes.
+
+
+```r
+#search for articles
+article_key <- "&api-key=c2fede7bd9aea57c898f538e5ec0a1ee:6:68700045"
+url <- "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=obamacare+socialism"
+req <- fromJSON(paste0(url, article_key))
+articles <- req$response$docs
+colnames(articles)
+```
+
+```
+ [1] "web_url"          "snippet"          "lead_paragraph"  
+ [4] "abstract"         "print_page"       "blog"            
+ [7] "source"           "multimedia"       "headline"        
+[10] "keywords"         "pub_date"         "document_type"   
+[13] "news_desk"        "section_name"     "subsection_name" 
+[16] "byline"           "type_of_material" "_id"             
+[19] "word_count"      
+```
+
+```r
+#search for best sellers
+bestseller_key <- "&api-key=5e260a86a6301f55546c83a47d139b0d:3:68700045"
+url <- "http://api.nytimes.com/svc/books/v2/lists/overview.json?published_date=2013-01-01"
+req <- fromJSON(paste0(url, bestseller_key))
+bestsellers <- req$results$list
+category1 <- bestsellers[[1, "books"]]
+subset(category1, select = c("author", "title", "publisher"))
+```
+
+```
+           author                title                  publisher
+1   Gillian Flynn            GONE GIRL           Crown Publishing
+2    John Grisham        THE RACKETEER Knopf Doubleday Publishing
+3       E L James FIFTY SHADES OF GREY Knopf Doubleday Publishing
+4 Nicholas Sparks           SAFE HAVEN   Grand Central Publishing
+5  David Baldacci        THE FORGOTTEN   Grand Central Publishing
+```
+
+```r
+#movie reviews
+movie_key <- "&api-key=5a3daaeee6bbc6b9df16284bc575e5ba:0:68700045"
+url <- "http://api.nytimes.com/svc/movies/v2/reviews/dvd-picks.json?order=by-date"
+req <- fromJSON(paste0(url, movie_key))
+reviews <- req$results
+colnames(reviews)
+```
+
+```
+ [1] "nyt_movie_id"     "display_title"    "sort_name"       
+ [4] "mpaa_rating"      "critics_pick"     "thousand_best"   
+ [7] "byline"           "headline"         "capsule_review"  
+[10] "summary_short"    "publication_date" "opening_date"    
+[13] "dvd_release_date" "date_updated"     "seo_name"        
+[16] "link"             "related_urls"     "multimedia"      
+```
+
+```r
+reviews[1:5, c("display_title", "byline", "mpaa_rating")]
+```
+
+```
+       display_title         byline mpaa_rating
+1    Tom at the Farm Stephen Holden          NR
+2     A Little Chaos Stephen Holden           R
+3           Big Game   Andy Webster        PG13
+4          Balls Out   Andy Webster           R
+5 Mad Max: Fury Road    A. O. Scott           R
+```
+
+## CrunchBase
+
+CrunchBase is the free database of technology companies, people, and investors that anyone can edit.
+
+
+```r
+key <- "f6dv6cas5vw7arn5b9d7mdm3"
+res <- fromJSON(paste0("http://api.crunchbase.com/v/1/search.js?query=R&api_key=", key))
+head(res$results)
+```
+
+## Sunlight Foundation
+
+The Sunlight Foundation is a non-profit that helps to make government transparent and accountable through data, tools, policy and journalism. Register a free key at [here](http://sunlightfoundation.com/api/accounts/register/). An example key is provided.
+
+
+```r
+key <- "&apikey=39c83d5a4acc42be993ee637e2e4ba3d"
+
+#Find bills about drones
+drone_bills <- fromJSON(paste0("http://openstates.org/api/v1/bills/?q=drone", key))
+drone_bills$title <- substring(drone_bills$title, 1, 40)
+print(drone_bills[1:5, c("title", "state", "chamber", "type")])
+```
+
+```
+                                     title state chamber type
+1                            WILDLIFE-TECH    il   lower bill
+2 Criminalizes the unlawful use of an unma    ny   lower bill
+3 Criminalizes the unlawful use of an unma    ny   lower bill
+4 Relating to: criminal procedure and prov    wi   lower bill
+5 Relating to: criminal procedure and prov    wi   upper bill
+```
+
+```r
+#Congress mentioning "constitution"
+res <- fromJSON(paste0("http://capitolwords.org/api/1/dates.json?phrase=immigration", key))
+wordcount <- res$results
+wordcount$day <- as.Date(wordcount$day)
+summary(wordcount)
+```
+
+```
+     count              day               raw_count      
+ Min.   :   1.00   Min.   :1996-01-02   Min.   :   1.00  
+ 1st Qu.:   3.00   1st Qu.:2001-01-22   1st Qu.:   3.00  
+ Median :   8.00   Median :2005-11-16   Median :   8.00  
+ Mean   :  25.27   Mean   :2005-10-02   Mean   :  25.27  
+ 3rd Qu.:  21.00   3rd Qu.:2010-05-12   3rd Qu.:  21.00  
+ Max.   :1835.00   Max.   :2015-08-05   Max.   :1835.00  
+```
+
+```r
+#Local legislators
+legislators <- fromJSON(paste0("http://congress.api.sunlightfoundation.com/",
+  "legislators/locate?latitude=42.96&longitude=-108.09", key))
+subset(legislators$results, select=c("last_name", "chamber", "term_start", "twitter_id"))
+```
+
+```
+  last_name chamber term_start      twitter_id
+1    Lummis   house 2015-01-06   CynthiaLummis
+2      Enzi  senate 2015-01-06     SenatorEnzi
+3  Barrasso  senate 2013-01-03 SenJohnBarrasso
+```
+
+## Twitter
+
+The twitter API requires OAuth2 authentication. Some example code:
+
+
+```r
+#Create your own appication key at https://dev.twitter.com/apps
+consumer_key = "EZRy5JzOH2QQmVAe9B4j2w";
+consumer_secret = "OIDC4MdfZJ82nbwpZfoUO4WOLTYjoRhpHRAWj6JMec";
+
+#Use basic auth
+library(httr)
+secret <- RCurl::base64(paste(consumer_key, consumer_secret, sep = ":"));
+req <- POST("https://api.twitter.com/oauth2/token",
+  add_headers(
+    "Authorization" = paste("Basic", secret),
+    "Content-Type" = "application/x-www-form-urlencoded;charset=UTF-8"
+  ),
+  body = "grant_type=client_credentials"
+);
+
+#Extract the access token
+token <- paste("Bearer", content(req)$access_token)
+
+#Actual API call
+url <- "https://api.twitter.com/1.1/statuses/user_timeline.json?count=10&screen_name=Rbloggers"
+req <- GET(url, add_headers(Authorization = token))
+json <- content(req, as = "text")
+tweets <- fromJSON(json)
+substring(tweets$text, 1, 100)
+```
+
+```
+ [1] "Analysing longitudinal data: Multilevel growth models (II) http://t.co/unUxszG7VJ #rstats"           
+ [2] "RcppDE 0.1.4 http://t.co/3qPhFzoOpj #rstats"                                                         
+ [3] "Minimalist Maps http://t.co/fpkNznuCoX #rstats"                                                      
+ [4] "Tutorials freely available of course I taught: including ggplot2, dplyr and shiny http://t.co/WsxX4U"
+ [5] "Deploying Shiny apps with shinyapps.io http://t.co/tjef1pbKLt #rstats"                               
+ [6] "Bootstrap Evaluation of Clusters http://t.co/EbY7ziKCz5 #rstats"                                     
+ [7] "Add external code to Rmarkdown http://t.co/RCJEmS8gyP #rstats"                                       
+ [8] "Linear models with weighted observations http://t.co/pUoHpvxAGC #rstats"                             
+ [9] "dplyr 0.4.3 http://t.co/ze3zc8t7qj #rstats"                                                          
+[10] "xkcd survey and the power to shape the internet http://t.co/vNaKhxWxE4 #rstats"                      
+```
+
diff --git a/inst/doc/json-apis.html b/inst/doc/json-apis.html
new file mode 100644
index 0000000..1055bbc
--- /dev/null
+++ b/inst/doc/json-apis.html
@@ -0,0 +1,347 @@
+<!DOCTYPE html>
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+<head>
+
+<meta charset="utf-8">
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="pandoc" />
+
+
+<meta name="date" content="2015-09-06" />
+
+<title>Fetching JSON data from REST APIs</title>
+
+<script src="data:application/x-javascript,%2F%2A%21%20jQuery%20v1%2E11%2E0%20%7C%20%28c%29%202005%2C%202014%20jQuery%20Foundation%2C%20Inc%2E%20%7C%20jquery%2Eorg%2Flicense%20%2A%2F%0A%21function%28a%2Cb%29%7B%22object%22%3D%3Dtypeof%20module%26%26%22object%22%3D%3Dtypeof%20module%2Eexports%3Fmodule%2Eexports%3Da%2Edocument%3Fb%28a%2C%210%29%3Afunction%28a%29%7Bif%28%21a%2Edocument%29throw%20new%20Error%28%22jQuery%20requires%20a%20window%20with%20a%20document%22%29%3Breturn%20b%28a%29% [...]
+<meta name="viewport" content="width=device-width, initial-scale=1" />
+<link href="data:text/css,%2F%2A%21%0A%20%2A%20Bootstrap%20v3%2E3%2E1%20%28http%3A%2F%2Fgetbootstrap%2Ecom%29%0A%20%2A%20Copyright%202011%2D2014%20Twitter%2C%20Inc%2E%0A%20%2A%20Licensed%20under%20MIT%20%28https%3A%2F%2Fgithub%2Ecom%2Ftwbs%2Fbootstrap%2Fblob%2Fmaster%2FLICENSE%29%0A%20%2A%2F%2F%2A%21%20normalize%2Ecss%20v3%2E0%2E2%20%7C%20MIT%20License%20%7C%20git%2Eio%2Fnormalize%20%2A%2Fhtml%7Bfont%2Dfamily%3Asans%2Dserif%3B%2Dwebkit%2Dtext%2Dsize%2Dadjust%3A100%25%3B%2Dms%2Dtext%2Dsiz [...]
+<script src="data:application/x-javascript,%2F%2A%21%0A%20%2A%20Bootstrap%20v3%2E3%2E1%20%28http%3A%2F%2Fgetbootstrap%2Ecom%29%0A%20%2A%20Copyright%202011%2D2014%20Twitter%2C%20Inc%2E%0A%20%2A%20Licensed%20under%20MIT%20%28https%3A%2F%2Fgithub%2Ecom%2Ftwbs%2Fbootstrap%2Fblob%2Fmaster%2FLICENSE%29%0A%20%2A%2F%0Aif%28%22undefined%22%3D%3Dtypeof%20jQuery%29throw%20new%20Error%28%22Bootstrap%27s%20JavaScript%20requires%20jQuery%22%29%3B%2Bfunction%28a%29%7Bvar%20b%3Da%2Efn%2Ejquery%2Esplit%2 [...]
+<script src="data:application/x-javascript,%2F%2A%2A%0A%2A%20%40preserve%20HTML5%20Shiv%203%2E7%2E2%20%7C%20%40afarkas%20%40jdalton%20%40jon%5Fneal%20%40rem%20%7C%20MIT%2FGPL2%20Licensed%0A%2A%2F%0A%2F%2F%20Only%20run%20this%20code%20in%20IE%208%0Aif%20%28%21%21window%2Enavigator%2EuserAgent%2Ematch%28%22MSIE%208%22%29%29%20%7B%0A%21function%28a%2Cb%29%7Bfunction%20c%28a%2Cb%29%7Bvar%20c%3Da%2EcreateElement%28%22p%22%29%2Cd%3Da%2EgetElementsByTagName%28%22head%22%29%5B0%5D%7C%7Ca%2Edocum [...]
+<script src="data:application/x-javascript,%2F%2A%21%20Respond%2Ejs%20v1%2E4%2E2%3A%20min%2Fmax%2Dwidth%20media%20query%20polyfill%20%2A%20Copyright%202013%20Scott%20Jehl%0A%20%2A%20Licensed%20under%20https%3A%2F%2Fgithub%2Ecom%2Fscottjehl%2FRespond%2Fblob%2Fmaster%2FLICENSE%2DMIT%0A%20%2A%20%20%2A%2F%0A%0Aif%20%28%21%21window%2Enavigator%2EuserAgent%2Ematch%28%22MSIE%208%22%29%29%20%7B%0A%21function%28a%29%7B%22use%20strict%22%3Ba%2EmatchMedia%3Da%2EmatchMedia%7C%7Cfunction%28a%29%7Bvar [...]
+
+<style type="text/css">code{white-space: pre;}</style>
+<link href="data:text/css,pre%20%2Eoperator%2C%0Apre%20%2Eparen%20%7B%0A%20color%3A%20rgb%28104%2C%20118%2C%20135%29%0A%7D%0A%0Apre%20%2Eliteral%20%7B%0A%20color%3A%20%23990073%0A%7D%0A%0Apre%20%2Enumber%20%7B%0A%20color%3A%20%23099%3B%0A%7D%0A%0Apre%20%2Ecomment%20%7B%0A%20color%3A%20%23998%3B%0A%20font%2Dstyle%3A%20italic%0A%7D%0A%0Apre%20%2Ekeyword%20%7B%0A%20color%3A%20%23900%3B%0A%20font%2Dweight%3A%20bold%0A%7D%0A%0Apre%20%2Eidentifier%20%7B%0A%20color%3A%20rgb%280%2C%200%2C%200%29 [...]
+<script src="data:application/x-javascript,%0Avar%20hljs%3Dnew%20function%28%29%7Bfunction%20m%28p%29%7Breturn%20p%2Ereplace%28%2F%26%2Fgm%2C%22%26amp%3B%22%29%2Ereplace%28%2F%3C%2Fgm%2C%22%26lt%3B%22%29%7Dfunction%20f%28r%2Cq%2Cp%29%7Breturn%20RegExp%28q%2C%22m%22%2B%28r%2EcI%3F%22i%22%3A%22%22%29%2B%28p%3F%22g%22%3A%22%22%29%29%7Dfunction%20b%28r%29%7Bfor%28var%20p%3D0%3Bp%3Cr%2EchildNodes%2Elength%3Bp%2B%2B%29%7Bvar%20q%3Dr%2EchildNodes%5Bp%5D%3Bif%28q%2EnodeName%3D%3D%22CODE%22%29%7B [...]
+<style type="text/css">
+  pre:not([class]) {
+    background-color: white;
+  }
+</style>
+<script type="text/javascript">
+if (window.hljs && document.readyState && document.readyState === "complete") {
+   window.setTimeout(function() {
+      hljs.initHighlighting();
+   }, 0);
+}
+</script>
+
+
+
+</head>
+
+<body>
+
+<style type="text/css">
+.main-container {
+  max-width: 940px;
+  margin-left: auto;
+  margin-right: auto;
+}
+code {
+  color: inherit;
+  background-color: rgba(0, 0, 0, 0.04);
+}
+img { 
+  max-width:100%; 
+  height: auto; 
+}
+</style>
+<div class="container-fluid main-container">
+
+
+<div id="header">
+<h1 class="title">Fetching JSON data from REST APIs</h1>
+<h4 class="date"><em>2015-09-06</em></h4>
+</div>
+
+
+<p>This section lists some examples of public HTTP APIs that publish data in JSON format. These are great to get a sense of the complex structures that are encountered in real world JSON data. All services are free, but some require registration/authentication. Each example returns lots of data, therefore not all output is printed in this document.</p>
+<pre class="r"><code>library(jsonlite)</code></pre>
+<div id="github" class="section level2">
+<h2>Github</h2>
+<p>Github is an online code repository and has APIs to get live data on almost all activity. Below some examples from a well known R package and author:</p>
+<pre class="r"><code>hadley_orgs <- fromJSON("https://api.github.com/users/hadley/orgs")
+hadley_repos <- fromJSON("https://api.github.com/users/hadley/repos")
+gg_commits <- fromJSON("https://api.github.com/repos/hadley/ggplot2/commits")
+gg_issues <- fromJSON("https://api.github.com/repos/hadley/ggplot2/issues")
+
+#latest issues
+paste(format(gg_issues$user$login), ":", gg_issues$title)</code></pre>
+<pre><code> [1] "idavydov     : annotate(\"segment\") wrong position if limits are inverted"                      
+ [2] "ben519       : geom_polygon doesn't make NA values grey when using continuous fill"              
+ [3] "has2k1       : Fix multiple tiny issues in the position classes"                                 
+ [4] "neggert      : Problem with geom_bar position=fill and faceting"                                 
+ [5] "robertzk     : Fix typo in geom_linerange docs."                                                 
+ [6] "lionel-      : stat_bar() gets confused with numeric discrete data?"                             
+ [7] "daattali     : Request: support theme axis.ticks.length.x and axis.ticks.length.y"               
+ [8] "sethchandler : Documentation error on %+replace% ?"                                              
+ [9] "daattali     : dev version 1.0.1.9003 has some breaking changes"                                 
+[10] "lionel-      : Labels"                                                                           
+[11] "nutterb      : legend for `geom_line` colour disappears when `alpha` < 1.0"                      
+[12] "wch          : scale_name property should be removed from Scale objects"                         
+[13] "wch          : scale_details arguments in Coords should be renamed panel_scales or scale"        
+[14] "wch          : ScalesList-related functions should be moved into ggproto object"                 
+[15] "wch          : update_geom_defaults and update_stat_defaults should accept Geom and Stat objects"
+[16] "wch          : Make some ggproto objects immutable. Closes #1237"                                
+[17] "and3k        : Control size of the border and padding of geom_label"                             
+[18] "hadley       : Consistent argument order and formatting for layer functions"                     
+[19] "hadley       : Consistently handle missing values"                                               
+[20] "cmohamma     : fortify causes fatal error"                                                       
+[21] "lionel-      : Flawed `label_bquote()` implementation"                                           
+[22] "beroe        : Create alias for `colors=` in `scale_color_gradientn()`"                          
+[23] "and3k        : hjust broken in y facets"                                                         
+[24] "joranE       : Allow color bar guides for alpha scales"                                          
+[25] "hadley       : dir = \"v\" also needs to swap nrow and ncol"                                     
+[26] "joranE       : Add examples for removing guides"                                                 
+[27] "lionel-      : New approach for horizontal layers"                                               
+[28] "bbolker      : add horizontal linerange geom"                                                    
+[29] "hadley       : Write vignette about grid"                                                        
+[30] "hadley       : Immutable flag for ggproto objects"                                               </code></pre>
+</div>
+<div id="citibike-nyc" class="section level2">
+<h2>CitiBike NYC</h2>
+<p>A single public API that shows location, status and current availability for all stations in the New York City bike sharing imitative.</p>
+<pre class="r"><code>citibike <- fromJSON("http://citibikenyc.com/stations/json")
+stations <- citibike$stationBeanList
+colnames(stations)</code></pre>
+<pre><code> [1] "id"                    "stationName"          
+ [3] "availableDocks"        "totalDocks"           
+ [5] "latitude"              "longitude"            
+ [7] "statusValue"           "statusKey"            
+ [9] "availableBikes"        "stAddress1"           
+[11] "stAddress2"            "city"                 
+[13] "postalCode"            "location"             
+[15] "altitude"              "testStation"          
+[17] "lastCommunicationTime" "landMark"             </code></pre>
+<pre class="r"><code>nrow(stations)</code></pre>
+<pre><code>[1] 509</code></pre>
+</div>
+<div id="ergast" class="section level2">
+<h2>Ergast</h2>
+<p>The Ergast Developer API is an experimental web service which provides a historical record of motor racing data for non-commercial purposes.</p>
+<pre class="r"><code>res <- fromJSON('http://ergast.com/api/f1/2004/1/results.json')
+drivers <- res$MRData$RaceTable$Races$Results[[1]]$Driver
+colnames(drivers)</code></pre>
+<pre><code>[1] "driverId"        "code"            "url"             "givenName"      
+[5] "familyName"      "dateOfBirth"     "nationality"     "permanentNumber"</code></pre>
+<pre class="r"><code>drivers[1:10, c("givenName", "familyName", "code", "nationality")]</code></pre>
+<pre><code>   givenName    familyName code nationality
+1    Michael    Schumacher  MSC      German
+2     Rubens   Barrichello  BAR   Brazilian
+3   Fernando        Alonso  ALO     Spanish
+4       Ralf    Schumacher  SCH      German
+5       Juan Pablo Montoya  MON   Colombian
+6     Jenson        Button  BUT     British
+7      Jarno        Trulli  TRU     Italian
+8      David     Coulthard  COU     British
+9     Takuma          Sato  SAT    Japanese
+10 Giancarlo    Fisichella  FIS     Italian</code></pre>
+</div>
+<div id="propublica" class="section level2">
+<h2>ProPublica</h2>
+<p>Below an example from the <a href="http://projects.propublica.org/nonprofits/api">ProPublica Nonprofit Explorer API</a> where we retrieve the first 10 pages of tax-exempt organizations in the USA, ordered by revenue. The <code>rbind.pages</code> function is used to combine the pages into a single data frame.</p>
+<pre class="r"><code>#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:10){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i), flatten=TRUE)
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)</code></pre>
+<pre><code>[1] 275</code></pre>
+<pre class="r"><code>filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]</code></pre>
+<pre><code>                              organization.sub_name organization.city
+1                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+2                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+3                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+4  DAVIDSON COUNTY COMMUNITY COLLEGE FOUNDATION INC         LEXINGTON
+5                       KAISER FOUNDATION HOSPITALS           OAKLAND
+6                       KAISER FOUNDATION HOSPITALS           OAKLAND
+7                       KAISER FOUNDATION HOSPITALS           OAKLAND
+8                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+9                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+10                  PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+    totrevenue
+1  42346486950
+2  40148558254
+3  37786011714
+4  30821445312
+5  20013171194
+6  18543043972
+7  17980030355
+8  10619215354
+9  10452560305
+10  9636630380</code></pre>
+</div>
+<div id="new-york-times" class="section level2">
+<h2>New York Times</h2>
+<p>The New York Times has several APIs as part of the NYT developer network. These interface to data from various departments, such as news articles, book reviews, real estate, etc. Registration is required (but free) and a key can be obtained at <a href="http://developer.nytimes.com/docs/reference/keys">here</a>. The code below includes some example keys for illustration purposes.</p>
+<pre class="r"><code>#search for articles
+article_key <- "&api-key=c2fede7bd9aea57c898f538e5ec0a1ee:6:68700045"
+url <- "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=obamacare+socialism"
+req <- fromJSON(paste0(url, article_key))
+articles <- req$response$docs
+colnames(articles)</code></pre>
+<pre><code> [1] "web_url"          "snippet"          "lead_paragraph"  
+ [4] "abstract"         "print_page"       "blog"            
+ [7] "source"           "multimedia"       "headline"        
+[10] "keywords"         "pub_date"         "document_type"   
+[13] "news_desk"        "section_name"     "subsection_name" 
+[16] "byline"           "type_of_material" "_id"             
+[19] "word_count"      </code></pre>
+<pre class="r"><code>#search for best sellers
+bestseller_key <- "&api-key=5e260a86a6301f55546c83a47d139b0d:3:68700045"
+url <- "http://api.nytimes.com/svc/books/v2/lists/overview.json?published_date=2013-01-01"
+req <- fromJSON(paste0(url, bestseller_key))
+bestsellers <- req$results$list
+category1 <- bestsellers[[1, "books"]]
+subset(category1, select = c("author", "title", "publisher"))</code></pre>
+<pre><code>           author                title                  publisher
+1   Gillian Flynn            GONE GIRL           Crown Publishing
+2    John Grisham        THE RACKETEER Knopf Doubleday Publishing
+3       E L James FIFTY SHADES OF GREY Knopf Doubleday Publishing
+4 Nicholas Sparks           SAFE HAVEN   Grand Central Publishing
+5  David Baldacci        THE FORGOTTEN   Grand Central Publishing</code></pre>
+<pre class="r"><code>#movie reviews
+movie_key <- "&api-key=5a3daaeee6bbc6b9df16284bc575e5ba:0:68700045"
+url <- "http://api.nytimes.com/svc/movies/v2/reviews/dvd-picks.json?order=by-date"
+req <- fromJSON(paste0(url, movie_key))
+reviews <- req$results
+colnames(reviews)</code></pre>
+<pre><code> [1] "nyt_movie_id"     "display_title"    "sort_name"       
+ [4] "mpaa_rating"      "critics_pick"     "thousand_best"   
+ [7] "byline"           "headline"         "capsule_review"  
+[10] "summary_short"    "publication_date" "opening_date"    
+[13] "dvd_release_date" "date_updated"     "seo_name"        
+[16] "link"             "related_urls"     "multimedia"      </code></pre>
+<pre class="r"><code>reviews[1:5, c("display_title", "byline", "mpaa_rating")]</code></pre>
+<pre><code>       display_title         byline mpaa_rating
+1    Tom at the Farm Stephen Holden          NR
+2     A Little Chaos Stephen Holden           R
+3           Big Game   Andy Webster        PG13
+4          Balls Out   Andy Webster           R
+5 Mad Max: Fury Road    A. O. Scott           R</code></pre>
+</div>
+<div id="crunchbase" class="section level2">
+<h2>CrunchBase</h2>
+<p>CrunchBase is the free database of technology companies, people, and investors that anyone can edit.</p>
+<pre class="r"><code>key <- "f6dv6cas5vw7arn5b9d7mdm3"
+res <- fromJSON(paste0("http://api.crunchbase.com/v/1/search.js?query=R&api_key=", key))
+head(res$results)</code></pre>
+</div>
+<div id="sunlight-foundation" class="section level2">
+<h2>Sunlight Foundation</h2>
+<p>The Sunlight Foundation is a non-profit that helps to make government transparent and accountable through data, tools, policy and journalism. Register a free key at <a href="http://sunlightfoundation.com/api/accounts/register/">here</a>. An example key is provided.</p>
+<pre class="r"><code>key <- "&apikey=39c83d5a4acc42be993ee637e2e4ba3d"
+
+#Find bills about drones
+drone_bills <- fromJSON(paste0("http://openstates.org/api/v1/bills/?q=drone", key))
+drone_bills$title <- substring(drone_bills$title, 1, 40)
+print(drone_bills[1:5, c("title", "state", "chamber", "type")])</code></pre>
+<pre><code>                                     title state chamber type
+1                            WILDLIFE-TECH    il   lower bill
+2 Criminalizes the unlawful use of an unma    ny   lower bill
+3 Criminalizes the unlawful use of an unma    ny   lower bill
+4 Relating to: criminal procedure and prov    wi   lower bill
+5 Relating to: criminal procedure and prov    wi   upper bill</code></pre>
+<pre class="r"><code>#Congress mentioning "constitution"
+res <- fromJSON(paste0("http://capitolwords.org/api/1/dates.json?phrase=immigration", key))
+wordcount <- res$results
+wordcount$day <- as.Date(wordcount$day)
+summary(wordcount)</code></pre>
+<pre><code>     count              day               raw_count      
+ Min.   :   1.00   Min.   :1996-01-02   Min.   :   1.00  
+ 1st Qu.:   3.00   1st Qu.:2001-01-22   1st Qu.:   3.00  
+ Median :   8.00   Median :2005-11-16   Median :   8.00  
+ Mean   :  25.27   Mean   :2005-10-02   Mean   :  25.27  
+ 3rd Qu.:  21.00   3rd Qu.:2010-05-12   3rd Qu.:  21.00  
+ Max.   :1835.00   Max.   :2015-08-05   Max.   :1835.00  </code></pre>
+<pre class="r"><code>#Local legislators
+legislators <- fromJSON(paste0("http://congress.api.sunlightfoundation.com/",
+  "legislators/locate?latitude=42.96&longitude=-108.09", key))
+subset(legislators$results, select=c("last_name", "chamber", "term_start", "twitter_id"))</code></pre>
+<pre><code>  last_name chamber term_start      twitter_id
+1    Lummis   house 2015-01-06   CynthiaLummis
+2      Enzi  senate 2015-01-06     SenatorEnzi
+3  Barrasso  senate 2013-01-03 SenJohnBarrasso</code></pre>
+</div>
+<div id="twitter" class="section level2">
+<h2>Twitter</h2>
+<p>The twitter API requires OAuth2 authentication. Some example code:</p>
+<pre class="r"><code>#Create your own appication key at https://dev.twitter.com/apps
+consumer_key = "EZRy5JzOH2QQmVAe9B4j2w";
+consumer_secret = "OIDC4MdfZJ82nbwpZfoUO4WOLTYjoRhpHRAWj6JMec";
+
+#Use basic auth
+library(httr)
+secret <- RCurl::base64(paste(consumer_key, consumer_secret, sep = ":"));
+req <- POST("https://api.twitter.com/oauth2/token",
+  add_headers(
+    "Authorization" = paste("Basic", secret),
+    "Content-Type" = "application/x-www-form-urlencoded;charset=UTF-8"
+  ),
+  body = "grant_type=client_credentials"
+);
+
+#Extract the access token
+token <- paste("Bearer", content(req)$access_token)
+
+#Actual API call
+url <- "https://api.twitter.com/1.1/statuses/user_timeline.json?count=10&screen_name=Rbloggers"
+req <- GET(url, add_headers(Authorization = token))
+json <- content(req, as = "text")
+tweets <- fromJSON(json)
+substring(tweets$text, 1, 100)</code></pre>
+<pre><code> [1] "Analysing longitudinal data: Multilevel growth models (II) http://t.co/unUxszG7VJ #rstats"           
+ [2] "RcppDE 0.1.4 http://t.co/3qPhFzoOpj #rstats"                                                         
+ [3] "Minimalist Maps http://t.co/fpkNznuCoX #rstats"                                                      
+ [4] "Tutorials freely available of course I taught: including ggplot2, dplyr and shiny http://t.co/WsxX4U"
+ [5] "Deploying Shiny apps with shinyapps.io http://t.co/tjef1pbKLt #rstats"                               
+ [6] "Bootstrap Evaluation of Clusters http://t.co/EbY7ziKCz5 #rstats"                                     
+ [7] "Add external code to Rmarkdown http://t.co/RCJEmS8gyP #rstats"                                       
+ [8] "Linear models with weighted observations http://t.co/pUoHpvxAGC #rstats"                             
+ [9] "dplyr 0.4.3 http://t.co/ze3zc8t7qj #rstats"                                                          
+[10] "xkcd survey and the power to shape the internet http://t.co/vNaKhxWxE4 #rstats"                      </code></pre>
+</div>
+
+
+</div>
+
+<script>
+
+// add bootstrap table styles to pandoc tables
+$(document).ready(function () {
+  $('tr.header').parent('thead').parent('table').addClass('table table-condensed');
+});
+
+</script>
+
+<!-- dynamically load mathjax for compatibility with self-contained -->
+<script>
+  (function () {
+    var script = document.createElement("script");
+    script.type = "text/javascript";
+    script.src  = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
+    document.getElementsByTagName("head")[0].appendChild(script);
+  })();
+</script>
+
+</body>
+</html>
diff --git a/inst/doc/json-mapping.pdf b/inst/doc/json-mapping.pdf
new file mode 100644
index 0000000..36ab89e
Binary files /dev/null and b/inst/doc/json-mapping.pdf differ
diff --git a/inst/doc/json-mapping.pdf.asis b/inst/doc/json-mapping.pdf.asis
new file mode 100644
index 0000000..1b7eb64
--- /dev/null
+++ b/inst/doc/json-mapping.pdf.asis
@@ -0,0 +1,6 @@
+%\VignetteIndexEntry{A mapping between JSON data and R objects}
+%\VignetteEngine{R.rsp::asis}
+%\VignetteKeyword{PDF}
+%\VignetteKeyword{HTML}
+%\VignetteKeyword{vignette}
+%\VignetteKeyword{package}
diff --git a/inst/doc/json-opencpu.R b/inst/doc/json-opencpu.R
new file mode 100644
index 0000000..6b130fe
--- /dev/null
+++ b/inst/doc/json-opencpu.R
@@ -0,0 +1,5 @@
+## ----eval=FALSE----------------------------------------------------------
+#  mydata <- airquality[1:2,]
+#  y <- reshape2::melt(data = mydata, id = c("Month", "Day"))
+#  toJSON(y)
+
diff --git a/inst/doc/json-opencpu.Rnw b/inst/doc/json-opencpu.Rnw
new file mode 100644
index 0000000..fae8b9f
--- /dev/null
+++ b/inst/doc/json-opencpu.Rnw
@@ -0,0 +1,132 @@
+%\VignetteEngine{knitr::knitr}
+%\VignetteIndexEntry{Simple JSON RPC with OpenCPU}
+
+%This is a template.
+%Actual text goes in sources/content.Rnw
+\documentclass{article}
+\author{Jeroen Ooms}
+
+%useful packages
+\usepackage{url}
+\usepackage{fullpage}
+\usepackage{xspace}
+\usepackage{hyperref}
+\usepackage{fancyvrb}
+
+%for table positioning
+\usepackage{float}
+\restylefloat{table}
+
+%support for accents
+\usepackage[utf8]{inputenc}
+
+%support for ascii art
+\usepackage{pmboxdraw}
+
+%use vspace instead of indentation for paragraphs
+\usepackage{parskip}
+
+%extra line spacing
+\usepackage{setspace}
+\setstretch{1.25}
+
+%knitr style verbatim blocks
+\newenvironment{codeblock}{
+  \VerbatimEnvironment
+  \definecolor{shadecolor}{rgb}{0.95, 0.95, 0.95}\color{fgcolor}
+  \color{black}
+  \begin{kframe}
+  \begin{BVerbatim}
+}{
+  \end{BVerbatim}
+  \end{kframe}
+}
+
+%placeholders for JSS/RJournal
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\file}[1]{\texttt{#1}}
+\newcommand{\dfn}[1]{\emph{#1}}
+\newcommand{\proglang}[1]{\texttt{#1}}
+
+%shorthands
+\newcommand{\JSON}{\texttt{JSON}\xspace}
+\newcommand{\R}{\texttt{R}\xspace}
+\newcommand{\C}{\texttt{C}\xspace}
+\newcommand{\toJSON}{\texttt{toJSON}\xspace}
+\newcommand{\fromJSON}{\texttt{fromJSON}\xspace}
+\newcommand{\XML}{\pkg{XML}\xspace}
+\newcommand{\jsonlite}{\pkg{jsonlite}\xspace}
+\newcommand{\RJSONIO}{\pkg{RJSONIO}\xspace}
+\newcommand{\API}{\texttt{API}\xspace}
+\newcommand{\JavaScript}{\texttt{JavaScript}\xspace}
+
+%trick for using same content file as chatper and article
+\newcommand{\maintitle}[1]{
+  \title{#1}
+  \maketitle
+}
+
+%actual document
+\begin{document}
+
+
+
+\section*{Simple \JSON RPC with OpenCPU}
+
+The \jsonlite package is used by \texttt{OpenCPU} to convert between \JSON data and \R objects. Thereby clients can retrieve \R objects, or remotely call \R functions using \JSON where the function arguments as well as function return value are \JSON objects. For example to download the \texttt{Boston} data from the \texttt{MASS} package:\\
+
+\begin{tabular}{|l|l|}
+  \hline
+     \textbf{Command in R} & \textbf{Example URL on OpenCPU} \\
+  \hline
+     \texttt{toJSON(Boston, digits=4)} & \url{https://demo.ocpu.io/MASS/data/Boston/json?digits=4} \\
+  \hline
+     \texttt{toJSON(Boston, dataframe="col")} & \url{https://demo.ocpu.io/MASS/data/Boston/json?dataframe=col} \\
+  \hline
+     \texttt{toJSON(Boston, pretty=FALSE)} & \url{https://demo.ocpu.io/MASS/data/Boston/json?pretty=false} \\
+  \hline
+\end{tabular}
+\newline
+
+To calculate the variance of some the numbers \texttt{1:9} in the command line using using \texttt{curl}:
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/stats/R/var/json -d "x=[1,2,3,4,5,6,7,8,9]"
+\end{Verbatim}
+
+Or equivalently post the entire body in \JSON format:
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/stats/R/var/json -H "Content-Type: application/json" \
+-d "{\"x\":[1,2,3,4,5,6,7,8,9]}"
+\end{Verbatim}
+
+Below an example where we call the \texttt{melt} function from the \texttt{reshape2} package using some example rows from the \texttt{airquality} data. Here both input and output consist of a data frame.
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/reshape2/R/melt/json -d 'id=["Month", "Day"]&data=[
+  { "Ozone" : 41, "Solar.R" : 190, "Wind" : 7.4, "Temp" : 67, "Month" : 5, "Day" : 1 },
+  { "Ozone" : 36, "Solar.R" : 118, "Wind" : 8, "Temp" : 72, "Month" : 5, "Day" : 2 } ]'
+\end{Verbatim}
+
+Or equivalently:
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/reshape2/R/melt/json -H "Content-Type: application/json" \
+  -d '{"id" : ["Month", "Day"], "data" : [
+    { "Ozone" : 41, "Solar.R" : 190, "Wind" : 7.4, "Temp" : 67, "Month" : 5, "Day" : 1 },
+    { "Ozone" : 36, "Solar.R" : 118, "Wind" : 8, "Temp" : 72, "Month" : 5, "Day" : 2 }
+  ] }'
+\end{Verbatim}
+
+This request basically executes the following \R code:
+
+<<eval=FALSE>>=
+mydata <- airquality[1:2,]
+y <- reshape2::melt(data = mydata, id = c("Month", "Day"))
+toJSON(y)
+@
+
+%end
+\end{document}
diff --git a/inst/doc/json-opencpu.pdf b/inst/doc/json-opencpu.pdf
new file mode 100644
index 0000000..2af2954
Binary files /dev/null and b/inst/doc/json-opencpu.pdf differ
diff --git a/inst/doc/json-paging.Rmd b/inst/doc/json-paging.Rmd
new file mode 100644
index 0000000..14860b2
--- /dev/null
+++ b/inst/doc/json-paging.Rmd
@@ -0,0 +1,223 @@
+---
+title: "Combining pages of JSON data with jsonlite"
+date: "2015-09-06"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Combining pages of JSON data with jsonlite}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+
+
+
+
+
+The [jsonlite](https://cran.r-project.org/package=jsonlite) package is a `JSON` parser/generator for R which is optimized for pipelines and web APIs. It is used by the OpenCPU system and many other packages to get data in and out of R using the `JSON` format.
+
+## A bidirectional mapping
+
+One of the main strengths of `jsonlite` is that it implements a bidirectional [mapping](http://arxiv.org/abs/1403.2805) between JSON and data frames. Thereby it can convert nested collections of JSON records, as they often appear on the web, immediately into the appropriate R structure. For example to grab some data from ProPublica we can simply use:
+
+
+```r
+library(jsonlite)
+mydata <- fromJSON("https://projects.propublica.org/forensics/geos.json", flatten = TRUE)
+View(mydata)
+```
+
+The `mydata` object is a data frame which can be used directly for modeling or visualization, without the need for any further complicated data manipulation.
+
+## Paging with jsonlite
+
+A question that comes up frequently is how to combine pages of data. Most web APIs limit the amount of data that can be retrieved per request. If the client needs more data than what can fits in a single request, it needs to break down the data into multiple requests that each retrieve a fragment (page) of data, not unlike pages in a book. In practice this is often implemented using a `page` parameter in the API. Below an example from the [ProPublica Nonprofit Explorer API](http://projec [...]
+
+
+```r
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+mydata0 <- fromJSON(paste0(baseurl, "&page=0"), flatten = TRUE)
+mydata1 <- fromJSON(paste0(baseurl, "&page=1"), flatten = TRUE)
+mydata2 <- fromJSON(paste0(baseurl, "&page=2"), flatten = TRUE)
+
+#The actual data is in the filings element
+mydata0$filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]
+```
+
+```
+                              organization.sub_name organization.city
+1                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+2                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+3                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+4  DAVIDSON COUNTY COMMUNITY COLLEGE FOUNDATION INC         LEXINGTON
+5                       KAISER FOUNDATION HOSPITALS           OAKLAND
+6                       KAISER FOUNDATION HOSPITALS           OAKLAND
+7                       KAISER FOUNDATION HOSPITALS           OAKLAND
+8                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+9                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+10                  PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+    totrevenue
+1  42346486950
+2  40148558254
+3  37786011714
+4  30821445312
+5  20013171194
+6  18543043972
+7  17980030355
+8  10619215354
+9  10452560305
+10  9636630380
+```
+
+To analyze or visualize these data, we need to combine the pages into a single dataset. We can do this with the `rbind.pages` function. Note that in this example, the actual data is contained by the `filings` field:
+
+
+```r
+#Rows per data frame
+nrow(mydata0$filings)
+```
+
+```
+[1] 25
+```
+
+```r
+#Combine data frames
+filings <- rbind.pages(
+  list(mydata0$filings, mydata1$filings, mydata2$filings)
+)
+
+#Total number of rows
+nrow(filings)
+```
+
+```
+[1] 75
+```
+
+## Automatically combining many pages
+
+We can write a simple loop that automatically downloads and combines many pages. For example to retrieve the first 20 pages with non-profits from the example above:
+
+
+```r
+#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:20){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i))
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)
+```
+
+```
+[1] 525
+```
+
+```r
+colnames(filings)
+```
+
+```
+  [1] "tax_prd"               "tax_prd_yr"           
+  [3] "formtype"              "pdf_url"              
+  [5] "updated"               "totrevenue"           
+  [7] "totfuncexpns"          "totassetsend"         
+  [9] "totliabend"            "pct_compnsatncurrofcr"
+ [11] "tax_pd"                "subseccd"             
+ [13] "unrelbusinccd"         "initiationfees"       
+ [15] "grsrcptspublicuse"     "grsincmembers"        
+ [17] "grsincother"           "totcntrbgfts"         
+ [19] "totprgmrevnue"         "invstmntinc"          
+ [21] "txexmptbndsproceeds"   "royaltsinc"           
+ [23] "grsrntsreal"           "grsrntsprsnl"         
+ [25] "rntlexpnsreal"         "rntlexpnsprsnl"       
+ [27] "rntlincreal"           "rntlincprsnl"         
+ [29] "netrntlinc"            "grsalesecur"          
+ [31] "grsalesothr"           "cstbasisecur"         
+ [33] "cstbasisothr"          "gnlsecur"             
+ [35] "gnlsothr"              "netgnls"              
+ [37] "grsincfndrsng"         "lessdirfndrsng"       
+ [39] "netincfndrsng"         "grsincgaming"         
+ [41] "lessdirgaming"         "netincgaming"         
+ [43] "grsalesinvent"         "lesscstofgoods"       
+ [45] "netincsales"           "miscrevtot11e"        
+ [47] "compnsatncurrofcr"     "othrsalwages"         
+ [49] "payrolltx"             "profndraising"        
+ [51] "txexmptbndsend"        "secrdmrtgsend"        
+ [53] "unsecurednotesend"     "retainedearnend"      
+ [55] "totnetassetend"        "nonpfrea"             
+ [57] "gftgrntsrcvd170"       "txrevnuelevied170"    
+ [59] "srvcsval170"           "grsinc170"            
+ [61] "grsrcptsrelated170"    "totgftgrntrcvd509"    
+ [63] "grsrcptsadmissn509"    "txrevnuelevied509"    
+ [65] "srvcsval509"           "subtotsuppinc509"     
+ [67] "totsupp509"            "ein"                  
+ [69] "organization"          "eostatus"             
+ [71] "tax_yr"                "operatingcd"          
+ [73] "assetcdgen"            "transinccd"           
+ [75] "subcd"                 "grscontrgifts"        
+ [77] "intrstrvnue"           "dividndsamt"          
+ [79] "totexcapgn"            "totexcapls"           
+ [81] "grsprofitbus"          "otherincamt"          
+ [83] "compofficers"          "contrpdpbks"          
+ [85] "totrcptperbks"         "totexpnspbks"         
+ [87] "excessrcpts"           "totexpnsexempt"       
+ [89] "netinvstinc"           "totaxpyr"             
+ [91] "adjnetinc"             "invstgovtoblig"       
+ [93] "invstcorpstk"          "invstcorpbnd"         
+ [95] "totinvstsec"           "fairmrktvalamt"       
+ [97] "undistribincyr"        "cmpmininvstret"       
+ [99] "sec4940notxcd"         "sec4940redtxcd"       
+[101] "infleg"                "contractncd"          
+[103] "claimstatcd"           "propexchcd"           
+[105] "brwlndmnycd"           "furngoodscd"          
+[107] "paidcmpncd"            "trnsothasstscd"       
+[109] "agremkpaycd"           "undistrinccd"         
+[111] "dirindirintcd"         "invstjexmptcd"        
+[113] "propgndacd"            "excesshldcd"          
+[115] "grntindivcd"           "nchrtygrntcd"         
+[117] "nreligiouscd"          "grsrents"             
+[119] "costsold"              "totrcptnetinc"        
+[121] "trcptadjnetinc"        "topradmnexpnsa"       
+[123] "topradmnexpnsb"        "topradmnexpnsd"       
+[125] "totexpnsnetinc"        "totexpnsadjnet"       
+[127] "othrcashamt"           "mrtgloans"            
+[129] "othrinvstend"          "fairmrktvaleoy"       
+[131] "mrtgnotespay"          "tfundnworth"          
+[133] "invstexcisetx"         "sect511tx"            
+[135] "subtitleatx"           "esttaxcr"             
+[137] "txwithldsrc"           "txpaidf2758"          
+[139] "erronbkupwthld"        "estpnlty"             
+[141] "balduopt"              "crelamt"              
+[143] "tfairmrktunuse"        "distribamt"           
+[145] "adjnetinccola"         "adjnetinccolb"        
+[147] "adjnetinccolc"         "adjnetinccold"        
+[149] "adjnetinctot"          "qlfydistriba"         
+[151] "qlfydistribb"          "qlfydistribc"         
+[153] "qlfydistribd"          "qlfydistribtot"       
+[155] "valassetscola"         "valassetscolb"        
+[157] "valassetscolc"         "valassetscold"        
+[159] "valassetstot"          "qlfyasseta"           
+[161] "qlfyassetb"            "qlfyassetc"           
+[163] "qlfyassetd"            "qlfyassettot"         
+[165] "endwmntscola"          "endwmntscolb"         
+[167] "endwmntscolc"          "endwmntscold"         
+[169] "endwmntstot"           "totsuprtcola"         
+[171] "totsuprtcolb"          "totsuprtcolc"         
+[173] "totsuprtcold"          "totsuprttot"          
+[175] "pubsuprtcola"          "pubsuprtcolb"         
+[177] "pubsuprtcolc"          "pubsuprtcold"         
+[179] "pubsuprttot"           "grsinvstinca"         
+[181] "grsinvstincb"          "grsinvstincc"         
+[183] "grsinvstincd"          "grsinvstinctot"       
+```
+
+From here, we can go straight to analyzing the filings data without any further tedious data manipulation.
diff --git a/inst/doc/json-paging.html b/inst/doc/json-paging.html
new file mode 100644
index 0000000..0155e61
--- /dev/null
+++ b/inst/doc/json-paging.html
@@ -0,0 +1,260 @@
+<!DOCTYPE html>
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+
+<head>
+
+<meta charset="utf-8">
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="pandoc" />
+
+
+<meta name="date" content="2015-09-06" />
+
+<title>Combining pages of JSON data with jsonlite</title>
+
+<script src="data:application/x-javascript,%2F%2A%21%20jQuery%20v1%2E11%2E0%20%7C%20%28c%29%202005%2C%202014%20jQuery%20Foundation%2C%20Inc%2E%20%7C%20jquery%2Eorg%2Flicense%20%2A%2F%0A%21function%28a%2Cb%29%7B%22object%22%3D%3Dtypeof%20module%26%26%22object%22%3D%3Dtypeof%20module%2Eexports%3Fmodule%2Eexports%3Da%2Edocument%3Fb%28a%2C%210%29%3Afunction%28a%29%7Bif%28%21a%2Edocument%29throw%20new%20Error%28%22jQuery%20requires%20a%20window%20with%20a%20document%22%29%3Breturn%20b%28a%29% [...]
+<meta name="viewport" content="width=device-width, initial-scale=1" />
+<link href="data:text/css,%2F%2A%21%0A%20%2A%20Bootstrap%20v3%2E3%2E1%20%28http%3A%2F%2Fgetbootstrap%2Ecom%29%0A%20%2A%20Copyright%202011%2D2014%20Twitter%2C%20Inc%2E%0A%20%2A%20Licensed%20under%20MIT%20%28https%3A%2F%2Fgithub%2Ecom%2Ftwbs%2Fbootstrap%2Fblob%2Fmaster%2FLICENSE%29%0A%20%2A%2F%2F%2A%21%20normalize%2Ecss%20v3%2E0%2E2%20%7C%20MIT%20License%20%7C%20git%2Eio%2Fnormalize%20%2A%2Fhtml%7Bfont%2Dfamily%3Asans%2Dserif%3B%2Dwebkit%2Dtext%2Dsize%2Dadjust%3A100%25%3B%2Dms%2Dtext%2Dsiz [...]
+<script src="data:application/x-javascript,%2F%2A%21%0A%20%2A%20Bootstrap%20v3%2E3%2E1%20%28http%3A%2F%2Fgetbootstrap%2Ecom%29%0A%20%2A%20Copyright%202011%2D2014%20Twitter%2C%20Inc%2E%0A%20%2A%20Licensed%20under%20MIT%20%28https%3A%2F%2Fgithub%2Ecom%2Ftwbs%2Fbootstrap%2Fblob%2Fmaster%2FLICENSE%29%0A%20%2A%2F%0Aif%28%22undefined%22%3D%3Dtypeof%20jQuery%29throw%20new%20Error%28%22Bootstrap%27s%20JavaScript%20requires%20jQuery%22%29%3B%2Bfunction%28a%29%7Bvar%20b%3Da%2Efn%2Ejquery%2Esplit%2 [...]
+<script src="data:application/x-javascript,%2F%2A%2A%0A%2A%20%40preserve%20HTML5%20Shiv%203%2E7%2E2%20%7C%20%40afarkas%20%40jdalton%20%40jon%5Fneal%20%40rem%20%7C%20MIT%2FGPL2%20Licensed%0A%2A%2F%0A%2F%2F%20Only%20run%20this%20code%20in%20IE%208%0Aif%20%28%21%21window%2Enavigator%2EuserAgent%2Ematch%28%22MSIE%208%22%29%29%20%7B%0A%21function%28a%2Cb%29%7Bfunction%20c%28a%2Cb%29%7Bvar%20c%3Da%2EcreateElement%28%22p%22%29%2Cd%3Da%2EgetElementsByTagName%28%22head%22%29%5B0%5D%7C%7Ca%2Edocum [...]
+<script src="data:application/x-javascript,%2F%2A%21%20Respond%2Ejs%20v1%2E4%2E2%3A%20min%2Fmax%2Dwidth%20media%20query%20polyfill%20%2A%20Copyright%202013%20Scott%20Jehl%0A%20%2A%20Licensed%20under%20https%3A%2F%2Fgithub%2Ecom%2Fscottjehl%2FRespond%2Fblob%2Fmaster%2FLICENSE%2DMIT%0A%20%2A%20%20%2A%2F%0A%0Aif%20%28%21%21window%2Enavigator%2EuserAgent%2Ematch%28%22MSIE%208%22%29%29%20%7B%0A%21function%28a%29%7B%22use%20strict%22%3Ba%2EmatchMedia%3Da%2EmatchMedia%7C%7Cfunction%28a%29%7Bvar [...]
+
+<style type="text/css">code{white-space: pre;}</style>
+<link href="data:text/css,pre%20%2Eoperator%2C%0Apre%20%2Eparen%20%7B%0A%20color%3A%20rgb%28104%2C%20118%2C%20135%29%0A%7D%0A%0Apre%20%2Eliteral%20%7B%0A%20color%3A%20%23990073%0A%7D%0A%0Apre%20%2Enumber%20%7B%0A%20color%3A%20%23099%3B%0A%7D%0A%0Apre%20%2Ecomment%20%7B%0A%20color%3A%20%23998%3B%0A%20font%2Dstyle%3A%20italic%0A%7D%0A%0Apre%20%2Ekeyword%20%7B%0A%20color%3A%20%23900%3B%0A%20font%2Dweight%3A%20bold%0A%7D%0A%0Apre%20%2Eidentifier%20%7B%0A%20color%3A%20rgb%280%2C%200%2C%200%29 [...]
+<script src="data:application/x-javascript,%0Avar%20hljs%3Dnew%20function%28%29%7Bfunction%20m%28p%29%7Breturn%20p%2Ereplace%28%2F%26%2Fgm%2C%22%26amp%3B%22%29%2Ereplace%28%2F%3C%2Fgm%2C%22%26lt%3B%22%29%7Dfunction%20f%28r%2Cq%2Cp%29%7Breturn%20RegExp%28q%2C%22m%22%2B%28r%2EcI%3F%22i%22%3A%22%22%29%2B%28p%3F%22g%22%3A%22%22%29%29%7Dfunction%20b%28r%29%7Bfor%28var%20p%3D0%3Bp%3Cr%2EchildNodes%2Elength%3Bp%2B%2B%29%7Bvar%20q%3Dr%2EchildNodes%5Bp%5D%3Bif%28q%2EnodeName%3D%3D%22CODE%22%29%7B [...]
+<style type="text/css">
+  pre:not([class]) {
+    background-color: white;
+  }
+</style>
+<script type="text/javascript">
+if (window.hljs && document.readyState && document.readyState === "complete") {
+   window.setTimeout(function() {
+      hljs.initHighlighting();
+   }, 0);
+}
+</script>
+
+
+
+</head>
+
+<body>
+
+<style type="text/css">
+.main-container {
+  max-width: 940px;
+  margin-left: auto;
+  margin-right: auto;
+}
+code {
+  color: inherit;
+  background-color: rgba(0, 0, 0, 0.04);
+}
+img { 
+  max-width:100%; 
+  height: auto; 
+}
+</style>
+<div class="container-fluid main-container">
+
+
+<div id="header">
+<h1 class="title">Combining pages of JSON data with jsonlite</h1>
+<h4 class="date"><em>2015-09-06</em></h4>
+</div>
+
+
+<p>The <a href="https://cran.r-project.org/package=jsonlite">jsonlite</a> package is a <code>JSON</code> parser/generator for R which is optimized for pipelines and web APIs. It is used by the OpenCPU system and many other packages to get data in and out of R using the <code>JSON</code> format.</p>
+<div id="a-bidirectional-mapping" class="section level2">
+<h2>A bidirectional mapping</h2>
+<p>One of the main strengths of <code>jsonlite</code> is that it implements a bidirectional <a href="http://arxiv.org/abs/1403.2805">mapping</a> between JSON and data frames. Thereby it can convert nested collections of JSON records, as they often appear on the web, immediately into the appropriate R structure. For example to grab some data from ProPublica we can simply use:</p>
+<pre class="r"><code>library(jsonlite)
+mydata <- fromJSON("https://projects.propublica.org/forensics/geos.json", flatten = TRUE)
+View(mydata)</code></pre>
+<p>The <code>mydata</code> object is a data frame which can be used directly for modeling or visualization, without the need for any further complicated data manipulation.</p>
+</div>
+<div id="paging-with-jsonlite" class="section level2">
+<h2>Paging with jsonlite</h2>
+<p>A question that comes up frequently is how to combine pages of data. Most web APIs limit the amount of data that can be retrieved per request. If the client needs more data than what can fits in a single request, it needs to break down the data into multiple requests that each retrieve a fragment (page) of data, not unlike pages in a book. In practice this is often implemented using a <code>page</code> parameter in the API. Below an example from the <a href="http://projects.propublica [...]
+<pre class="r"><code>baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+mydata0 <- fromJSON(paste0(baseurl, "&page=0"), flatten = TRUE)
+mydata1 <- fromJSON(paste0(baseurl, "&page=1"), flatten = TRUE)
+mydata2 <- fromJSON(paste0(baseurl, "&page=2"), flatten = TRUE)
+
+#The actual data is in the filings element
+mydata0$filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]</code></pre>
+<pre><code>                              organization.sub_name organization.city
+1                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+2                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+3                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+4  DAVIDSON COUNTY COMMUNITY COLLEGE FOUNDATION INC         LEXINGTON
+5                       KAISER FOUNDATION HOSPITALS           OAKLAND
+6                       KAISER FOUNDATION HOSPITALS           OAKLAND
+7                       KAISER FOUNDATION HOSPITALS           OAKLAND
+8                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+9                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+10                  PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+    totrevenue
+1  42346486950
+2  40148558254
+3  37786011714
+4  30821445312
+5  20013171194
+6  18543043972
+7  17980030355
+8  10619215354
+9  10452560305
+10  9636630380</code></pre>
+<p>To analyze or visualize these data, we need to combine the pages into a single dataset. We can do this with the <code>rbind.pages</code> function. Note that in this example, the actual data is contained by the <code>filings</code> field:</p>
+<pre class="r"><code>#Rows per data frame
+nrow(mydata0$filings)</code></pre>
+<pre><code>[1] 25</code></pre>
+<pre class="r"><code>#Combine data frames
+filings <- rbind.pages(
+  list(mydata0$filings, mydata1$filings, mydata2$filings)
+)
+
+#Total number of rows
+nrow(filings)</code></pre>
+<pre><code>[1] 75</code></pre>
+</div>
+<div id="automatically-combining-many-pages" class="section level2">
+<h2>Automatically combining many pages</h2>
+<p>We can write a simple loop that automatically downloads and combines many pages. For example to retrieve the first 20 pages with non-profits from the example above:</p>
+<pre class="r"><code>#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:20){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i))
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)</code></pre>
+<pre><code>[1] 525</code></pre>
+<pre class="r"><code>colnames(filings)</code></pre>
+<pre><code>  [1] "tax_prd"               "tax_prd_yr"           
+  [3] "formtype"              "pdf_url"              
+  [5] "updated"               "totrevenue"           
+  [7] "totfuncexpns"          "totassetsend"         
+  [9] "totliabend"            "pct_compnsatncurrofcr"
+ [11] "tax_pd"                "subseccd"             
+ [13] "unrelbusinccd"         "initiationfees"       
+ [15] "grsrcptspublicuse"     "grsincmembers"        
+ [17] "grsincother"           "totcntrbgfts"         
+ [19] "totprgmrevnue"         "invstmntinc"          
+ [21] "txexmptbndsproceeds"   "royaltsinc"           
+ [23] "grsrntsreal"           "grsrntsprsnl"         
+ [25] "rntlexpnsreal"         "rntlexpnsprsnl"       
+ [27] "rntlincreal"           "rntlincprsnl"         
+ [29] "netrntlinc"            "grsalesecur"          
+ [31] "grsalesothr"           "cstbasisecur"         
+ [33] "cstbasisothr"          "gnlsecur"             
+ [35] "gnlsothr"              "netgnls"              
+ [37] "grsincfndrsng"         "lessdirfndrsng"       
+ [39] "netincfndrsng"         "grsincgaming"         
+ [41] "lessdirgaming"         "netincgaming"         
+ [43] "grsalesinvent"         "lesscstofgoods"       
+ [45] "netincsales"           "miscrevtot11e"        
+ [47] "compnsatncurrofcr"     "othrsalwages"         
+ [49] "payrolltx"             "profndraising"        
+ [51] "txexmptbndsend"        "secrdmrtgsend"        
+ [53] "unsecurednotesend"     "retainedearnend"      
+ [55] "totnetassetend"        "nonpfrea"             
+ [57] "gftgrntsrcvd170"       "txrevnuelevied170"    
+ [59] "srvcsval170"           "grsinc170"            
+ [61] "grsrcptsrelated170"    "totgftgrntrcvd509"    
+ [63] "grsrcptsadmissn509"    "txrevnuelevied509"    
+ [65] "srvcsval509"           "subtotsuppinc509"     
+ [67] "totsupp509"            "ein"                  
+ [69] "organization"          "eostatus"             
+ [71] "tax_yr"                "operatingcd"          
+ [73] "assetcdgen"            "transinccd"           
+ [75] "subcd"                 "grscontrgifts"        
+ [77] "intrstrvnue"           "dividndsamt"          
+ [79] "totexcapgn"            "totexcapls"           
+ [81] "grsprofitbus"          "otherincamt"          
+ [83] "compofficers"          "contrpdpbks"          
+ [85] "totrcptperbks"         "totexpnspbks"         
+ [87] "excessrcpts"           "totexpnsexempt"       
+ [89] "netinvstinc"           "totaxpyr"             
+ [91] "adjnetinc"             "invstgovtoblig"       
+ [93] "invstcorpstk"          "invstcorpbnd"         
+ [95] "totinvstsec"           "fairmrktvalamt"       
+ [97] "undistribincyr"        "cmpmininvstret"       
+ [99] "sec4940notxcd"         "sec4940redtxcd"       
+[101] "infleg"                "contractncd"          
+[103] "claimstatcd"           "propexchcd"           
+[105] "brwlndmnycd"           "furngoodscd"          
+[107] "paidcmpncd"            "trnsothasstscd"       
+[109] "agremkpaycd"           "undistrinccd"         
+[111] "dirindirintcd"         "invstjexmptcd"        
+[113] "propgndacd"            "excesshldcd"          
+[115] "grntindivcd"           "nchrtygrntcd"         
+[117] "nreligiouscd"          "grsrents"             
+[119] "costsold"              "totrcptnetinc"        
+[121] "trcptadjnetinc"        "topradmnexpnsa"       
+[123] "topradmnexpnsb"        "topradmnexpnsd"       
+[125] "totexpnsnetinc"        "totexpnsadjnet"       
+[127] "othrcashamt"           "mrtgloans"            
+[129] "othrinvstend"          "fairmrktvaleoy"       
+[131] "mrtgnotespay"          "tfundnworth"          
+[133] "invstexcisetx"         "sect511tx"            
+[135] "subtitleatx"           "esttaxcr"             
+[137] "txwithldsrc"           "txpaidf2758"          
+[139] "erronbkupwthld"        "estpnlty"             
+[141] "balduopt"              "crelamt"              
+[143] "tfairmrktunuse"        "distribamt"           
+[145] "adjnetinccola"         "adjnetinccolb"        
+[147] "adjnetinccolc"         "adjnetinccold"        
+[149] "adjnetinctot"          "qlfydistriba"         
+[151] "qlfydistribb"          "qlfydistribc"         
+[153] "qlfydistribd"          "qlfydistribtot"       
+[155] "valassetscola"         "valassetscolb"        
+[157] "valassetscolc"         "valassetscold"        
+[159] "valassetstot"          "qlfyasseta"           
+[161] "qlfyassetb"            "qlfyassetc"           
+[163] "qlfyassetd"            "qlfyassettot"         
+[165] "endwmntscola"          "endwmntscolb"         
+[167] "endwmntscolc"          "endwmntscold"         
+[169] "endwmntstot"           "totsuprtcola"         
+[171] "totsuprtcolb"          "totsuprtcolc"         
+[173] "totsuprtcold"          "totsuprttot"          
+[175] "pubsuprtcola"          "pubsuprtcolb"         
+[177] "pubsuprtcolc"          "pubsuprtcold"         
+[179] "pubsuprttot"           "grsinvstinca"         
+[181] "grsinvstincb"          "grsinvstincc"         
+[183] "grsinvstincd"          "grsinvstinctot"       </code></pre>
+<p>From here, we can go straight to analyzing the filings data without any further tedious data manipulation.</p>
+</div>
+
+
+</div>
+
+<script>
+
+// add bootstrap table styles to pandoc tables
+$(document).ready(function () {
+  $('tr.header').parent('thead').parent('table').addClass('table table-condensed');
+});
+
+</script>
+
+<!-- dynamically load mathjax for compatibility with self-contained -->
+<script>
+  (function () {
+    var script = document.createElement("script");
+    script.type = "text/javascript";
+    script.src  = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
+    document.getElementsByTagName("head")[0].appendChild(script);
+  })();
+</script>
+
+</body>
+</html>
diff --git a/inst/tests/flatten.R b/inst/tests/flatten.R
new file mode 100644
index 0000000..a310828
--- /dev/null
+++ b/inst/tests/flatten.R
@@ -0,0 +1,9 @@
+context("flatten")
+
+test_that("flattening", {
+  x <- list(test = data.frame(foo=1:3))
+  x$test$bar <- data.frame(x=5:3, y=7:9)
+  expect_that(x, equals(fromJSON(toJSON(x), flatten = FALSE)));
+  expect_that(names(fromJSON(toJSON(x), flatten = TRUE)$test), equals(c("foo", "bar.x", "bar.y")))
+});
+
diff --git a/inst/tests/helper-toJSON.R b/inst/tests/helper-toJSON.R
new file mode 100644
index 0000000..35d2073
--- /dev/null
+++ b/inst/tests/helper-toJSON.R
@@ -0,0 +1,11 @@
+toJSON <- function(...){
+  unclass(minify(jsonlite::toJSON(...)))
+}
+
+toJSON2 <- function(x) {
+  toJSON(x, keep_vec_names = TRUE, auto_unbox = TRUE)
+}
+
+toJSON3 <- function(x) {
+  toJSON(x, keep_vec_names = TRUE, auto_unbox = TRUE, dataframe = "columns", rownames = FALSE)
+}
diff --git a/inst/tests/issues.txt b/inst/tests/issues.txt
new file mode 100644
index 0000000..4ab0221
--- /dev/null
+++ b/inst/tests/issues.txt
@@ -0,0 +1,4 @@
+#For timeseries, numeric precision can result in corrupt objects:
+out <- unserializeJSON(serializeJSON(AirPassengers, digits=5))
+all.equal(out, AirPassengers, tolerance=1e-10)
+print(out)
\ No newline at end of file
diff --git a/inst/tests/readme.txt b/inst/tests/readme.txt
new file mode 100644
index 0000000..5b5508e
--- /dev/null
+++ b/inst/tests/readme.txt
@@ -0,0 +1,6 @@
+This dir contains unit tests for use with the testthat package. 
+They are intended to be tested by a non-root user.
+To run them, install this package and run:
+
+library(testthat)
+test_package("jsonlite")
diff --git a/inst/tests/test-fromJSON-NA-values.R b/inst/tests/test-fromJSON-NA-values.R
new file mode 100644
index 0000000..d5c8637
--- /dev/null
+++ b/inst/tests/test-fromJSON-NA-values.R
@@ -0,0 +1,24 @@
+context("fromJSON NA values")
+
+test_that("fromJSON NA values", {
+  
+  objects <- list(
+    numbers = c(1,2, NA, NaN, Inf, -Inf, 3.14),
+    logical = c(TRUE, FALSE, NA),
+    integers = as.integer(1,2,3),
+    num = 3.14,
+    bool = FALSE,
+    character = c("FOO","NA", NA, "NaN"),
+    integer = 21L,
+    boolNA = as.logical(NA),
+    df = data.frame(foo=c(1,NA))
+  )
+  
+  #test all but list
+  lapply(objects, function(object){
+    expect_that(fromJSON(toJSON(object)), equals(object))   
+  });
+  
+  #test all in list
+  expect_that(fromJSON(toJSON(objects)), equals(objects))
+});
diff --git a/inst/tests/test-fromJSON-array.R b/inst/tests/test-fromJSON-array.R
new file mode 100644
index 0000000..f0c396d
--- /dev/null
+++ b/inst/tests/test-fromJSON-array.R
@@ -0,0 +1,53 @@
+context("fromJSON Array")
+
+test_that("fromJSON Array, row major", {
+
+  # test high dimensional arrays
+  lapply(2:5, function(n){
+    object <- array(1:prod(n), dim=1:n)
+    newobject <- fromJSON(toJSON(object));
+    expect_that(object, equals(newobject));
+  });
+  
+  # adding some flat dimensions
+  lapply(1:5, function(n){
+    object <- array(1:prod(n), dim=c(1:n, 1))
+    newobject <- fromJSON(toJSON(object));
+    expect_that(object, equals(newobject));
+  });  
+});
+
+test_that("fromJSON Array, column major", {
+  
+  # test high dimensional arrays
+  lapply(2:5, function(n){
+    object <- array(1:prod(n), dim=1:n)
+    newobject <- fromJSON(toJSON(object, matrix="columnmajor"), columnmajor=TRUE);
+    expect_that(object, equals(newobject));
+  });
+  
+  # adding some flat dimensions
+  lapply(1:5, function(n){
+    object <- array(1:prod(n), dim=c(1:n, 1))
+    newobject <- fromJSON(toJSON(object, matrix="columnmajor"), columnmajor=TRUE);
+    expect_that(object, equals(newobject));
+  });  
+});
+
+
+test_that("fromJSON Array, character strings", {
+  
+  # test high dimensional arrays
+  lapply(2:5, function(n){
+    object <- array(paste("cell", 1:prod(n)), dim=1:n)
+    newobject <- fromJSON(toJSON(object, matrix="columnmajor"), columnmajor=TRUE);
+    expect_that(object, equals(newobject));
+  });
+  
+  # adding some flat dimensions
+  lapply(1:5, function(n){
+    object <- array(paste("cell", 1:prod(n)), dim=c(1:n, 1))
+    newobject <- fromJSON(toJSON(object, matrix="columnmajor"), columnmajor=TRUE);
+    expect_that(object, equals(newobject));
+  });  
+});
\ No newline at end of file
diff --git a/inst/tests/test-fromJSON-dataframe.R b/inst/tests/test-fromJSON-dataframe.R
new file mode 100644
index 0000000..407a7fd
--- /dev/null
+++ b/inst/tests/test-fromJSON-dataframe.R
@@ -0,0 +1,59 @@
+context("fromJSON dataframes")
+
+options(stringsAsFactors=FALSE);
+
+test_that("recover nested data frames", {
+
+  x1 <- x2 <- x3 <- x4 <- x5 <- x6 <- data.frame(foo=c(1:2));
+  x2$bar <- c("jeroen", "eli");
+  x3$bar <- x4$bar <- x5$bar <- x6$bar <- data.frame(name=c("jeroen", "eli"))
+  x4$bar$age <- x5$bar$age <- c(28, 24);  x6$bar$age <- c(28, NA);
+  x5$bar$food <- data.frame(yum=c("Rice", "Pasta"));
+  x6$bar$food <- data.frame(yum=c(NA, "Pasta"));
+
+  #add to list
+  objects <- list(x1, x2, x3, x4, x5, x6)
+
+  #test all but list
+  lapply(objects, function(object){
+    expect_that(fromJSON(toJSON(object)), equals(object))
+    expect_that(fromJSON(toJSON(object, na="null")), equals(object))
+    expect_that(names(fromJSON(toJSON(object), flatten = TRUE)), equals(names(unlist(object[1,,drop=FALSE]))))
+  });
+
+  #test all in list
+  expect_that(fromJSON(toJSON(objects)), equals(objects))
+});
+
+test_that("recover lists in data frames", {
+  x <- data.frame(author = c("Homer", "Virgil", "Jeroen"));
+  x$poems = list(c("Iliad", "Odyssey"), c("Eclogues", "Georgics", "Aeneid"), character());
+
+  y <- data.frame(author = c("Homer", "Virgil", "Jeroen"));
+  y$poems = list(
+    data.frame(title=c("Iliad", "Odyssey"), year=c(-1194, -800)),
+    data.frame(title=c("Eclogues", "Georgics", "Aeneid"), year=c(-44, -29, -19)),
+    data.frame()
+  );
+
+  z <- list(x=x, y=y);
+  zz <- list(x,y);
+
+  expect_that(fromJSON(toJSON(x)), equals(x))
+  expect_that(fromJSON(toJSON(y)), equals(y))
+  expect_that(fromJSON(toJSON(z)), equals(z))
+  expect_that(fromJSON(toJSON(zz)), equals(zz))
+});
+
+#note: nested matrix does not perfectly restore
+test_that("nested matrix in data frame", {
+  x <- data.frame(foo=1:2)
+  x$bar <- matrix(c(1:5, NA), 2)
+
+  expect_that(validate(toJSON(x)), is_true())
+
+  y <- fromJSON(toJSON(x))
+  expect_that(y, is_a("data.frame"))
+  expect_that(names(x), equals(names(y)))
+  expect_that(length(y[[1,"bar"]]), equals(3))
+});
diff --git a/inst/tests/test-fromJSON-datasets.R b/inst/tests/test-fromJSON-datasets.R
new file mode 100644
index 0000000..188cee9
--- /dev/null
+++ b/inst/tests/test-fromJSON-datasets.R
@@ -0,0 +1,18 @@
+context("fromJSON datasets")
+
+# Note about numeric precision
+# In the unit tests we use digits=10. Lowever values will result in problems for some datasets
+test_that("fromJSON datasets", {
+  objects <- Filter(is.data.frame, lapply(ls("package:datasets"), get));
+  
+  #data frames are never identical because:
+  # - attributes 
+  # - factors, times, dates turn into strings
+  # - integers turn into numeric
+  lapply(objects, function(object){
+    newobject <- fromJSON(toJSON(object))
+    expect_that(newobject, is_a("data.frame"));
+    expect_that(names(object), is_identical_to(names(newobject)));
+    expect_that(nrow(object), is_identical_to(nrow(newobject)))
+  });
+});
diff --git a/inst/tests/test-fromJSON-date.R b/inst/tests/test-fromJSON-date.R
new file mode 100644
index 0000000..b38f83f
--- /dev/null
+++ b/inst/tests/test-fromJSON-date.R
@@ -0,0 +1,18 @@
+context("fromJSON date objects")
+
+test_that("fromJSON date objects", {
+  
+  x <- Sys.time() + c(1, 2, NA, 3)
+  mydf <- data.frame(x=x)
+  expect_that(fromJSON(toJSON(x, POSIXt="mongo")), is_a("POSIXct")) 
+  expect_that(fromJSON(toJSON(x, POSIXt="mongo")), equals(x))
+  expect_that(fromJSON(toJSON(x, POSIXt="mongo", na="string")), is_a("POSIXct")) 
+  expect_that(fromJSON(toJSON(x, POSIXt="mongo", na="null")), is_a("POSIXct"))  
+  
+  expect_that(fromJSON(toJSON(mydf, POSIXt="mongo")), is_a("data.frame"))
+  expect_that(fromJSON(toJSON(mydf, POSIXt="mongo"))$x, is_a("POSIXct"))
+  expect_that(fromJSON(toJSON(mydf, POSIXt="mongo", na="string"))$x, is_a("POSIXct"))
+  expect_that(fromJSON(toJSON(mydf, POSIXt="mongo", na="null"))$x, is_a("POSIXct"))  
+  expect_that(fromJSON(toJSON(mydf, POSIXt="mongo"))$x, equals(x))
+
+});
diff --git a/inst/tests/test-fromJSON-matrix.R b/inst/tests/test-fromJSON-matrix.R
new file mode 100644
index 0000000..911bf9e
--- /dev/null
+++ b/inst/tests/test-fromJSON-matrix.R
@@ -0,0 +1,44 @@
+context("fromJSON Matrix")
+
+# Note about numeric precision
+# In the unit tests we use digits=10. Lowever values will result in problems for some datasets
+test_that("fromJSON Matrix", {
+  objects <- list(
+    matrix(1),
+    matrix(1:2),
+    matrix(1:2, nrow=1),
+    matrix(round(pi,2)),
+    matrix(c(1,NA,2,NA), 2),
+    volcano,
+    matrix(NA)    
+  );
+  
+  lapply(objects, function(object){
+    newobject <- fromJSON(toJSON(object));
+    expect_that(newobject, is_a("matrix"));
+    expect_that(object, equals(newobject));
+  });
+  
+  expect_that(fromJSON(toJSON(objects)), equals(objects));  
+});
+
+test_that("fromJSON Matrix with simplifyMatrix=FALSE", {
+  expect_that(fromJSON(toJSON(matrix(1)), simplifyMatrix=FALSE), equals(list(1)));
+  expect_that(fromJSON(toJSON(matrix(1)), simplifyVector=FALSE), equals(list(list((1)))));
+  expect_that(fromJSON(toJSON(matrix(NA)), simplifyMatrix=FALSE), equals(list(NA)));
+  expect_that(fromJSON(toJSON(matrix(NA)), simplifyVector=FALSE), equals(list(list((NULL)))));
+});
+
+
+test_that("fromJSON Matrix datasets", {
+  objects <- Filter(is.matrix, lapply(ls("package:datasets"), get));
+  
+  lapply(objects, function(object){
+    class(object) <- "matrix";
+    newobject <- fromJSON(toJSON(object, digits=4))
+    expect_that(newobject, is_a("matrix"));
+    expect_that(dim(newobject), equals(dim(object)));
+    attributes(newobject) <- attributes(object);
+    expect_that(newobject, equals(round(object,4)));
+  });
+});
diff --git a/inst/tests/test-libjson-escaping.R b/inst/tests/test-libjson-escaping.R
new file mode 100644
index 0000000..9aa475b
--- /dev/null
+++ b/inst/tests/test-libjson-escaping.R
@@ -0,0 +1,29 @@
+context("libjson Escaping")
+
+test_that("escaping and parsing of special characters", {
+
+  #create random strings
+  mychars <- c('a', 'b', " ", '"', "\\", "\t", "\n", "'", "/", "#", "$");
+  createstring <- function(length){
+    paste(mychars[ceiling(runif(length, 0, length(mychars)))], collapse="")
+  }
+
+  #generate 1000 random strings
+  for(i in 1:200){
+    x <- createstring(i);
+    expect_that(x, equals(fromJSON(toJSON(x))));
+    expect_that(x, equals(fromJSON(toJSON(x, pretty=TRUE))));
+
+    y <- setNames(list(123), x)
+    expect_that(x, equals(fromJSON(toJSON(x, pretty=TRUE))));
+  }
+
+});
+
+test_that("filter invalid escape characters", {
+
+  #The \v and \a characters are not supported by JSON. This is a common bug
+  #expect_that(validate(toJSON("foo\v\bar\abaz")), is_true());
+  #Update: yajl doesn't support \v and \a characters at all. Dropping this test.
+
+});
diff --git a/inst/tests/test-libjson-large.R b/inst/tests/test-libjson-large.R
new file mode 100644
index 0000000..9825ad7
--- /dev/null
+++ b/inst/tests/test-libjson-large.R
@@ -0,0 +1,17 @@
+context("libjson Large strings")
+
+test_that("escaping and parsing of special characters", {
+  
+  #create random strings
+  mychars <- c('a', 'b', " ", '"', "\\", "\t", "\n", "'", "/", "#", "$");
+  createstring <- function(length){
+    paste(mychars[ceiling(runif(length, 0, length(mychars)))], collapse="")
+  }  
+
+  #try some very long strings
+  for(i in 1:10){
+    zz <- list(foo=createstring(1e5))
+    expect_that(zz, equals(fromJSON(toJSON(zz))));
+  }
+  
+});
diff --git a/inst/tests/test-libjson-utf8.R b/inst/tests/test-libjson-utf8.R
new file mode 100644
index 0000000..b8f913d
--- /dev/null
+++ b/inst/tests/test-libjson-utf8.R
@@ -0,0 +1,40 @@
+context("libjson UTF-8 characters")
+
+# Some notes: JSON defines UTF-8 as the default charset. Therefore all encoders and
+# decoders are required to support UTF-8. JSON also allows for escaped unicode, i.e
+# \u00F8 however this is mostly for legacy purposes. Using actual UTF-8 characters
+# is easier and more efficient.
+
+
+test_that("test that non ascii characters are ok", {
+
+  #create random strings
+  objects <- list(
+    "Zürich",
+    "北京填鴨们",
+    "ผัดไทย",
+    "寿司",
+    c("寿司", "Zürich", "foo")
+  );
+
+  lapply(objects, function(x){
+    Encoding(x) <- "UTF-8"
+    myjson <- toJSON(x, pretty=TRUE);
+    expect_that(validate(myjson), is_true());
+    expect_that(fromJSON(myjson), equals(x));
+
+    #prettify needs to parse + output
+    prettyjson <- prettify(myjson);
+    expect_that(validate(prettyjson), is_true());
+    expect_that(fromJSON(prettyjson), equals(x));
+  });
+
+  #Test escaped unicode characters
+  expect_that(fromJSON('["Z\\u00FCrich"]'), equals("Z\u00fcrich"));
+  expect_that(fromJSON(prettify('["Z\\u00FCrich"]')), equals("Z\u00fcrich"));
+
+  expect_that(length(unique(fromJSON('["Z\\u00FCrich", "Z\u00fcrich"]'))), equals(1L))
+  expect_that(fromJSON('["\\u586B"]'), equals("\u586b"));
+  expect_that(fromJSON(prettify('["\\u586B"]')), equals("\u586B"));
+
+});
diff --git a/inst/tests/test-libjson-validator.R b/inst/tests/test-libjson-validator.R
new file mode 100644
index 0000000..54453d0
--- /dev/null
+++ b/inst/tests/test-libjson-validator.R
@@ -0,0 +1,17 @@
+context("libjson Validator")
+
+test_that("test that the validator properly deals with escaped characters", {
+  
+  #create random strings
+  mychars <- c('a', 'b', " ", '"', "\\", "\t", "\n", "'", "/", "#", "$");
+  createstring <- function(length){
+    paste(mychars[ceiling(runif(length, 0, length(mychars)))], collapse="")
+  }  
+  
+  for(i in 1:200){    
+    #create some random strings to validate
+    x <- createstring(i);
+    expect_that(validate(toJSON(x)), is_true());
+  }
+  
+});
diff --git a/inst/tests/test-network-Github.R b/inst/tests/test-network-Github.R
new file mode 100644
index 0000000..a688e45
--- /dev/null
+++ b/inst/tests/test-network-Github.R
@@ -0,0 +1,66 @@
+context("Github API")
+
+test_that("Non Nested", {  
+  mydata <- fromJSON("https://api.github.com/users/hadley/orgs");
+  expect_that(mydata, is_a("data.frame"));  
+});
+
+test_that("Nested 1 Level", {  
+  mydata <- fromJSON("https://api.github.com/users/hadley/repos");
+  expect_that(mydata, is_a("data.frame"));
+  expect_that(mydata$owner, is_a("data.frame"));
+  expect_that(nrow(mydata), equals(nrow(mydata$owner)));
+});
+
+
+test_that("Nested 1 Level", {  
+  mydata <- fromJSON("https://api.github.com/repos/hadley/ggplot2/issues");
+  expect_that(mydata, is_a("data.frame"));
+  expect_that(mydata$user, is_a("data.frame"));
+  expect_that(mydata$pull_request, is_a("data.frame"));
+  expect_that(nrow(mydata), equals(nrow(mydata$pull_request)));
+});
+
+test_that("Nested 1 Level within list", {  
+  mydata <- fromJSON("https://api.github.com/search/repositories?q=tetris+language:assembly&sort=stars&order=desc");
+  expect_that(mydata, is_a("list"));
+  expect_that(mydata$items, is_a("data.frame"));
+  expect_that(mydata$items$owner, is_a("data.frame"));  
+  expect_that(nrow(mydata$items), equals(nrow(mydata$items$owner)));
+});
+
+test_that("Nested 2 Level", {  
+  mydata <- fromJSON("https://api.github.com/repos/hadley/ggplot2/commits");
+  expect_that(mydata, is_a("data.frame"));
+  expect_that(mydata$commit, is_a("data.frame"));
+  expect_that(mydata$commit$author, is_a("data.frame"));
+  expect_that(mydata$commit$author$name, is_a("character"));
+  expect_that(nrow(mydata), equals(nrow(mydata$commit)));
+  expect_that(nrow(mydata), equals(nrow(mydata$commit$author)));
+});
+
+test_that("Nested inconsistent (payload), one-to-many", {  
+  mydata <- fromJSON("https://api.github.com/users/hadley/events");
+  expect_that(mydata, is_a("data.frame"));
+  expect_that(mydata$actor, is_a("data.frame"));
+  expect_that(mydata$repo, is_a("data.frame"));
+  expect_that(mydata$type, is_a("character"));
+  expect_that(mydata$payload, is_a("data.frame"));
+  
+  #this is dynamic, depends on data
+  if(any(mydata$type == "PushEvent")){
+    expect_that(all(vapply(mydata$payload$commits, function(x){is.null(x) || is.data.frame(x)}, logical(1))), is_true());
+  }
+});
+
+test_that("Nested inconsistent (payload), one-to-many", {  
+  mydata <- fromJSON("https://api.github.com/repos/hadley/ggplot2/events");
+  if(any("ForkEvent" %in% mydata$type)){
+    expect_that(mydata$payload$forkee$owner, is_a("data.frame"))
+  }
+  
+  if(any(mydata$type %in% c("IssuesEvent", "IssueCommentEvent"))){
+    expect_that(mydata$payload$issue, is_a("data.frame"));
+    expect_that(mydata$payload$issue$user, is_a("data.frame"));
+  }
+});
diff --git a/inst/tests/test-serializeJSON-datasets.R b/inst/tests/test-serializeJSON-datasets.R
new file mode 100644
index 0000000..b425fff
--- /dev/null
+++ b/inst/tests/test-serializeJSON-datasets.R
@@ -0,0 +1,18 @@
+#test serializeJSON 
+
+context("Serializing Datasets")
+
+# Note about numeric precision
+# In the unit tests we use digits=10. Lowever values will result in problems for some datasets
+test_that("Serializing datasets", {
+  library(datasets);
+  lapply(as.list(ls("package:datasets")), function(x){
+    mycall <- call("expect_that", 
+      call("unserializeJSON", call("serializeJSON", as.name(x), digits=10)),
+      call("equals", as.name(x))
+    );
+    eval(mycall)
+  });
+});
+
+
diff --git a/inst/tests/test-serializeJSON-functions.R b/inst/tests/test-serializeJSON-functions.R
new file mode 100644
index 0000000..5ad81e4
--- /dev/null
+++ b/inst/tests/test-serializeJSON-functions.R
@@ -0,0 +1,26 @@
+#test serializeJSON 
+
+context("Serializing Functions")
+
+# Note about numeric precision
+# In the unit tests we use digits=10. Lowever values will result in problems for some datasets
+test_that("Serializing Functions", {
+  
+  options(keep.source=FALSE);
+  
+  objects <- list(
+    function(x = 0) { x + 1 },
+    function(x) { x + 1 },
+    function(x, ...) { x + 1},
+    lm
+  );
+    
+  
+  #test all but list
+  lapply(objects, function(object){
+    expect_that(unserializeJSON(serializeJSON(object)), equals(object))   
+  });
+  
+  #test all in list
+  expect_that(unserializeJSON(serializeJSON(objects)), equals(objects))
+});
diff --git a/inst/tests/test-serializeJSON-types.R b/inst/tests/test-serializeJSON-types.R
new file mode 100644
index 0000000..6cc5747
--- /dev/null
+++ b/inst/tests/test-serializeJSON-types.R
@@ -0,0 +1,38 @@
+#test serializeJSON
+
+context("Serializing Data Types")
+
+# Note about numeric precision
+# In the unit tests we use digits=10. Lowever values will result in problems for some datasets
+test_that("Serializing Data Objects", {
+
+  objects <- list(
+    NULL,
+    readBin(system.file(package="base", "Meta/package.rds"), "raw", 999),
+    c(TRUE, FALSE, NA, FALSE),
+    c(1L, NA, 9999999),
+    c(round(pi, 4), NA, NaN, Inf, -Inf),
+    c("foo", NA, "bar"),
+    complex(real=1:10, imaginary=1001:1010),
+    Reaction ~ Days + (1|Subject) + (0+Days|Subject),
+    as.name("cars"),
+    as.pairlist(mtcars),
+    quote(rnorm(10)),
+    expression("to be or not to be"),
+    expression(foo),
+    parse(text="rnorm(10);"),
+    call("rnorm", n=10),
+    emptyenv(),
+    `if`, #builtin
+    `list`, #special
+    getNamespace("graphics") #namespace
+  )
+
+  #test all but list
+  lapply(objects, function(object){
+    expect_that(unserializeJSON(serializeJSON(object)), equals(object))
+  });
+
+  #test all in list
+  expect_that(unserializeJSON(serializeJSON(objects)), equals(objects))
+});
diff --git a/inst/tests/test-toJSON-AsIs.R b/inst/tests/test-toJSON-AsIs.R
new file mode 100644
index 0000000..f476343
--- /dev/null
+++ b/inst/tests/test-toJSON-AsIs.R
@@ -0,0 +1,14 @@
+context("toJSON AsIs")
+
+test_that("Encoding AsIs", {
+  expect_that(toJSON(list(1), auto_unbox=TRUE), equals("[1]"));
+  expect_that(toJSON(list(I(1)), auto_unbox=TRUE), equals("[[1]]"));
+  expect_that(toJSON(I(list(1)), auto_unbox=TRUE), equals("[1]"));
+
+  expect_that(toJSON(list(x=1)), equals("{\"x\":[1]}"));
+  expect_that(toJSON(list(x=1), auto_unbox=TRUE), equals("{\"x\":1}"));
+  expect_that(toJSON(list(x=I(1)), auto_unbox=TRUE), equals("{\"x\":[1]}"));
+
+  expect_that(toJSON(list(x=I(list(1))), auto_unbox=TRUE), equals("{\"x\":[1]}"));
+  expect_that(toJSON(list(x=list(I(1))), auto_unbox=TRUE), equals("{\"x\":[[1]]}"));
+});
diff --git a/inst/tests/test-toJSON-Date.R b/inst/tests/test-toJSON-Date.R
new file mode 100644
index 0000000..5e205f9
--- /dev/null
+++ b/inst/tests/test-toJSON-Date.R
@@ -0,0 +1,23 @@
+context("toJSON Date")
+object <- as.Date("1985-06-18");
+
+test_that("Encoding Date Objects", {
+  expect_that(toJSON(object), equals("[\"1985-06-18\"]"));
+  expect_that(toJSON(object, Date="ISO8601"), equals("[\"1985-06-18\"]"));  
+  expect_that(toJSON(object, Date="epoch"), equals("[5647]"));
+  expect_that(toJSON(object, Date="adsfdsfds"), throws_error("should be one of"));  
+});
+
+test_that("Encoding Date Objects in a list", {
+  expect_that(toJSON(list(foo=object)), equals("{\"foo\":[\"1985-06-18\"]}"));
+  expect_that(toJSON(list(foo=object), Date="ISO8601"), equals("{\"foo\":[\"1985-06-18\"]}"));  
+  expect_that(toJSON(list(foo=object), Date="epoch"), equals("{\"foo\":[5647]}"));
+  expect_that(toJSON(list(foo=object), Date="adsfdsfds"), throws_error("should be one of"));  
+});
+
+test_that("Encoding Date Objects in a Data frame", {
+  expect_that(toJSON(data.frame(foo=object)), equals("[{\"foo\":\"1985-06-18\"}]"));
+  expect_that(toJSON(data.frame(foo=object), Date="ISO8601"), equals("[{\"foo\":\"1985-06-18\"}]"));  
+  expect_that(toJSON(data.frame(foo=object), Date="epoch"), equals("[{\"foo\":5647}]"));
+  expect_that(toJSON(data.frame(foo=object), Date="adsfdsfds"), throws_error("should be one of"));  
+});
diff --git a/inst/tests/test-toJSON-NA-values.R b/inst/tests/test-toJSON-NA-values.R
new file mode 100644
index 0000000..2858777
--- /dev/null
+++ b/inst/tests/test-toJSON-NA-values.R
@@ -0,0 +1,13 @@
+context("toJSON NA values")
+
+test_that("Test NA values", {
+  options(stringsAsFactors=FALSE)
+  x <- list(foo=c(TRUE, NA, FALSE, TRUE), bar=c(3.14,NA, 42, NA), zoo=c(NA, "bla", "boe", NA))
+  x$mydf <- data.frame(col1=c(FALSE, NA, NA, TRUE), col2=c(1.23, NA, 23, NA))
+  x$mydf$mylist <- list(c(TRUE, NA, FALSE, NA), NA, c("blabla", NA), c(NA,12,13,NA,NA,NA,1001))
+
+  expect_that(validate(toJSON(x)), is_true())
+  expect_that(fromJSON(toJSON(x)), equals(x))
+  expect_that(fromJSON(toJSON(x, na="null")), equals(x))
+
+});
diff --git a/inst/tests/test-toJSON-NULL-values.R b/inst/tests/test-toJSON-NULL-values.R
new file mode 100644
index 0000000..58b8030
--- /dev/null
+++ b/inst/tests/test-toJSON-NULL-values.R
@@ -0,0 +1,23 @@
+context("toJSON NULL values")
+
+test_that("Test NULL values", {
+  namedlist <- structure(list(), .Names = character(0));
+  x <- NULL
+  y <- list(a=NULL, b=NA)
+  z <- list(a=1, b=character(0))
+
+  expect_that(validate(toJSON(x)), is_true())
+  expect_that(fromJSON(toJSON(x)), equals(namedlist))
+  expect_that(toJSON(x), equals("{}"))
+  expect_that(toJSON(x, null="list"), equals("{}"))
+
+  expect_that(validate(toJSON(y)), is_true())
+  expect_that(toJSON(y, null="list"), equals("{\"a\":{},\"b\":[null]}"))
+  expect_that(toJSON(y, null="null"), equals("{\"a\":null,\"b\":[null]}"))
+  expect_that(fromJSON(toJSON(y, null="null")), equals(y))
+  expect_that(fromJSON(toJSON(y, null="list")), equals(list(a=namedlist, b=NA)))
+
+  expect_that(validate(toJSON(z)), is_true())
+  expect_that(toJSON(z), equals("{\"a\":[1],\"b\":[]}"))
+  expect_that(fromJSON(toJSON(z)), equals(list(a=1, b=list())))
+});
diff --git a/inst/tests/test-toJSON-POSIXt.R b/inst/tests/test-toJSON-POSIXt.R
new file mode 100644
index 0000000..f221629
--- /dev/null
+++ b/inst/tests/test-toJSON-POSIXt.R
@@ -0,0 +1,74 @@
+context("toJSON POSIXt")
+
+objects <- list(
+  as.POSIXlt("2013-06-17 22:33:44"),
+  as.POSIXct("2013-06-17 22:33:44"),
+  as.POSIXlt("2013-06-17 22:33:44", tz="Australia/Darwin"),
+  as.POSIXct("2013-06-17 22:33:44", tz="Australia/Darwin")
+)
+
+test_that("Encoding POSIXt Objects", {
+
+  #string based formats do not depends on the current local timezone
+  invisible(lapply(objects, function(object){
+    expect_that(toJSON(object), equals("[\"2013-06-17 22:33:44\"]"));  
+    expect_that(toJSON(object, POSIXt="string"), equals("[\"2013-06-17 22:33:44\"]"));
+    expect_that(toJSON(object, POSIXt="ISO8601"), equals("[\"2013-06-17T22:33:44\"]")); 
+    expect_that(toJSON(object, POSIXt="sdfsdsdf"), throws_error("one of")); 
+  }));
+  
+  #object 1 and 2 will result in a location specific epoch
+  invisible(lapply(objects[3:4], function(object){
+    expect_that(toJSON(object, POSIXt="epoch"), equals("[1371474224000]"));   
+    expect_that(toJSON(object, POSIXt="mongo"), equals("[{\"$date\":1371474224000}]")); 
+  }));
+
+});
+
+test_that("Encoding POSIXt object in a list", {  
+  #string based formats do not depends on the current local timezone
+  invisible(lapply(objects, function(object){
+    expect_that(toJSON(list(foo=object)), equals("{\"foo\":[\"2013-06-17 22:33:44\"]}"));  
+    expect_that(toJSON(list(foo=object), POSIXt="string"), equals("{\"foo\":[\"2013-06-17 22:33:44\"]}"));
+    expect_that(toJSON(list(foo=object), POSIXt="ISO8601"), equals("{\"foo\":[\"2013-06-17T22:33:44\"]}")); 
+    expect_that(toJSON(list(foo=object), POSIXt="sdfsdsdf"), throws_error("one of")); 
+  }));
+  
+  #list(foo=object) 1 and 2 will result in a location specific epoch
+  invisible(lapply(objects[3:4], function(object){
+    expect_that(toJSON(list(foo=object), POSIXt="epoch"), equals("{\"foo\":[1371474224000]}"));   
+    expect_that(toJSON(list(foo=object), POSIXt="mongo"), equals("{\"foo\":[{\"$date\":1371474224000}]}")); 
+  }));
+});
+
+test_that("Encoding POSIXt object in a list", {  
+  #string based formats do not depends on the current local timezone
+  invisible(lapply(objects, function(object){
+    expect_that(toJSON(data.frame(foo=object)), equals("[{\"foo\":\"2013-06-17 22:33:44\"}]"));  
+    expect_that(toJSON(data.frame(foo=object), POSIXt="string"), equals("[{\"foo\":\"2013-06-17 22:33:44\"}]"));
+    expect_that(toJSON(data.frame(foo=object), POSIXt="ISO8601"), equals("[{\"foo\":\"2013-06-17T22:33:44\"}]")); 
+    expect_that(toJSON(data.frame(foo=object), POSIXt="sdfsdsdf"), throws_error("one of")); 
+  }));
+  
+  #list(foo=object) 1 and 2 will result in a location specific epoch
+  invisible(lapply(objects[3:4], function(object){
+    expect_that(toJSON(data.frame(foo=object), POSIXt="epoch"), equals("[{\"foo\":1371474224000}]"));   
+    expect_that(toJSON(data.frame(foo=object), POSIXt="mongo"), equals("[{\"foo\":{\"$date\":1371474224000}}]")); 
+  }));
+});
+
+test_that("POSIXt NA values", {
+  newobj <- list(
+    c(objects[[1]], NA),
+    c(objects[[2]], NA)
+  );
+  lapply(newobj, function(object){
+    expect_that(toJSON(object), equals("[\"2013-06-17 22:33:44\",null]"));
+    expect_that(toJSON(object, na="string"), equals("[\"2013-06-17 22:33:44\",\"NA\"]"));
+    expect_that(toJSON(data.frame(foo=object)), equals("[{\"foo\":\"2013-06-17 22:33:44\"},{}]"));
+    expect_that(toJSON(data.frame(foo=object), na="null"), equals("[{\"foo\":\"2013-06-17 22:33:44\"},{\"foo\":null}]"));
+    expect_that(toJSON(data.frame(foo=object), na="string"), equals("[{\"foo\":\"2013-06-17 22:33:44\"},{\"foo\":\"NA\"}]")); 
+  });
+});
+
+
diff --git a/inst/tests/test-toJSON-complex.R b/inst/tests/test-toJSON-complex.R
new file mode 100644
index 0000000..516f1f2
--- /dev/null
+++ b/inst/tests/test-toJSON-complex.R
@@ -0,0 +1,24 @@
+context("toJSON Complex")
+
+test_that("Encoding Complex", {
+  expect_that(toJSON(complex(real=2, imaginary=2)), equals("[\"2+2i\"]"));
+  expect_that(toJSON(complex(real=NA, imaginary=2)), equals("[\"NA\"]"));  
+  expect_that(toJSON(complex(real=1, imaginary=NA)), equals("[\"NA\"]"));   
+  expect_that(toJSON(complex(real=NA, imaginary=2), na="null"), equals("[null]")); 
+});
+
+test_that("Encoding Complex in Data Frame", {
+  expect_that(toJSON(data.frame(foo=complex(real=1, imaginary=2))), equals("[{\"foo\":\"1+2i\"}]"));
+  expect_that(toJSON(data.frame(foo=complex(real=NA, imaginary=2))), equals("[{}]"));
+  expect_that(toJSON(data.frame(foo=complex(real=NA, imaginary=2)), na="string"), equals("[{\"foo\":\"NA\"}]"));
+  expect_that(toJSON(data.frame(foo=complex(real=NA, imaginary=2)), na="null"), equals("[{\"foo\":null}]"));
+});
+
+test_that("Encoding Complex as list", {
+  x <- complex(real=c(1,2,NA), imaginary=3:1);
+  expect_that(toJSON(x), equals("[\"1+3i\",\"2+2i\",\"NA\"]"));
+  expect_that(toJSON(x, complex="list"), equals("{\"real\":[1,2,\"NA\"],\"imaginary\":[3,2,1]}"));
+  expect_that(toJSON(data.frame(foo=x), complex="list"), equals("[{\"foo\":{\"real\":1,\"imaginary\":3}},{\"foo\":{\"real\":2,\"imaginary\":2}},{\"foo\":{\"imaginary\":1}}]"));
+  expect_that(toJSON(data.frame(foo=x), complex="list", na="string"), equals("[{\"foo\":{\"real\":1,\"imaginary\":3}},{\"foo\":{\"real\":2,\"imaginary\":2}},{\"foo\":{\"real\":\"NA\",\"imaginary\":1}}]"));
+  expect_that(toJSON(data.frame(foo=x), complex="list", dataframe="columns"), equals("{\"foo\":{\"real\":[1,2,\"NA\"],\"imaginary\":[3,2,1]}}"))
+});
diff --git a/inst/tests/test-toJSON-dataframe.R b/inst/tests/test-toJSON-dataframe.R
new file mode 100644
index 0000000..7a3af97
--- /dev/null
+++ b/inst/tests/test-toJSON-dataframe.R
@@ -0,0 +1,24 @@
+context("toJSON Data Frame")
+
+test_that("data frame edge cases", {
+ #unname named list
+  test <- data.frame(foo=1:2)
+  test$bar <- list(x=123, y=123)
+  test$baz <- data.frame(z=456:457)
+  expect_that(toJSON(test), equals('[{"foo":1,"bar":[123],"baz":{"z":456}},{"foo":2,"bar":[123],"baz":{"z":457}}]'))
+});
+
+test_that("Nested structures", {
+  
+  mydata <- data.frame(row.names=1:2)
+  mydata$d <- list(
+    data.frame(a1=1:2, a2=3:4, a3=5:6, a4=7:8),
+    data.frame(a1=11:12, a2=13:14, a3=15:16, a4=17:18)
+  )
+  mydata$m <- list(
+    matrix(1:6, nrow=2, ncol=3),
+    matrix(6:1, nrow=2, ncol=3)
+  )
+  
+  expect_that(fromJSON(toJSON(mydata)), equals(mydata));
+});
diff --git a/inst/tests/test-toJSON-factor.R b/inst/tests/test-toJSON-factor.R
new file mode 100644
index 0000000..0e87ea5
--- /dev/null
+++ b/inst/tests/test-toJSON-factor.R
@@ -0,0 +1,7 @@
+context("toJSON Factor")
+
+test_that("Encoding Factor Objects", {
+  expect_that(fromJSON(toJSON(iris$Species)), is_identical_to(as.character(iris$Species)));
+  expect_that(fromJSON(toJSON(iris$Species[1])), is_identical_to(as.character(iris$Species[1])));
+  expect_that(fromJSON(toJSON(iris$Species, factor="integer")), equals(structure(unclass(iris$Species), levels=NULL)));
+});
diff --git a/inst/tests/test-toJSON-keep-vec-names.R b/inst/tests/test-toJSON-keep-vec-names.R
new file mode 100644
index 0000000..a34dcfa
--- /dev/null
+++ b/inst/tests/test-toJSON-keep-vec-names.R
@@ -0,0 +1,32 @@
+context("toJSON keep_vec_names")
+
+test_that("keep_vec_names with named vectors", {
+
+  # Basic types should give messages
+  # Length-1 vectors
+  expect_message(expect_equal(toJSON2(c(a=1)), '{"a":1}'))
+  expect_message(expect_equal(toJSON2(c(a="x")), '{"a":"x"}'))
+  expect_message(expect_equal(toJSON2(c(a=TRUE)), '{"a":true}'))
+
+  # Longer vectors
+  expect_message(expect_equal(toJSON2(c(a=1,b=2)), '{"a":1,"b":2}'))
+  expect_message(expect_equal(toJSON2(c(a="x",b="y")), '{"a":"x","b":"y"}'))
+  expect_message(expect_equal(toJSON2(c(a=FALSE,b=TRUE)), '{"a":false,"b":true}'))
+
+  # Some other types
+  expect_message(expect_equal(toJSON2(factor(c(a="x"))), '{"a":"x"}'))
+  expect_message(expect_equal(toJSON2(c(a=as.Date("2015-01-01"))), '{"a":"2015-01-01"}'))
+  expect_message(expect_equal(toJSON2(c(a=as.POSIXct("2015-01-01 3:00:00"))), '{"a":"2015-01-01 03:00:00"}'))
+  expect_message(expect_equal(toJSON2(c(a=as.POSIXlt("2015-01-01 3:00:00"))), '{"a":"2015-01-01 03:00:00"}'))
+
+  # keep_vec_names shouldn't affect unnamed vectors
+  expect_equal(toJSON2(1), '1')
+  expect_equal(toJSON2(c(1:3)), '[1,2,3]')
+})
+
+
+# Data frames generally don't allow named columns, except in very unusual cases
+test_that("keep_vec_names with data frames", {
+  expect_equal(toJSON3(data.frame(x=c(a=1), y=2)), '{"x":[1],"y":[2]}')
+  expect_equal(toJSON3(data.frame(x=c(a=1,b=2), y=c(c=3,d=4))), '{"x":[1,2],"y":[3,4]}')
+})
diff --git a/inst/tests/test-toJSON-logical.R b/inst/tests/test-toJSON-logical.R
new file mode 100644
index 0000000..830976b
--- /dev/null
+++ b/inst/tests/test-toJSON-logical.R
@@ -0,0 +1,21 @@
+context("toJSON Logical")
+
+test_that("Encoding Logical", {
+  expect_that(toJSON(TRUE), equals("[true]"));
+  expect_that(toJSON(FALSE), equals("[false]"));
+  expect_that(toJSON(as.logical(NA)), equals("[null]"))
+  expect_that(toJSON(as.logical(NA), na="string"), equals("[\"NA\"]"))
+  expect_that(toJSON(c(TRUE, NA, FALSE)), equals("[true,null,false]"));
+  expect_that(toJSON(c(TRUE, NA, FALSE), na="string"), equals("[true,\"NA\",false]"));
+  expect_that(toJSON(logical()), equals("[]"));
+});
+
+test_that("Encoding Logical in Data Frame", {
+  expect_that(toJSON(data.frame(foo=TRUE)), equals("[{\"foo\":true}]"));
+  expect_that(toJSON(data.frame(foo=FALSE)), equals("[{\"foo\":false}]"));
+  expect_that(toJSON(data.frame(foo=as.logical(NA))), equals("[{}]"));
+  expect_that(toJSON(data.frame(foo=as.logical(NA)), na="null"), equals("[{\"foo\":null}]"));
+  expect_that(toJSON(data.frame(foo=as.logical(NA)), na="string"), equals("[{\"foo\":\"NA\"}]"));
+  expect_that(toJSON(data.frame(foo=c(TRUE, NA, FALSE))), equals("[{\"foo\":true},{},{\"foo\":false}]"));
+  expect_that(toJSON(data.frame(foo=logical())), equals("[]"));
+});
diff --git a/inst/tests/test-toJSON-matrix.R b/inst/tests/test-toJSON-matrix.R
new file mode 100644
index 0000000..8781416
--- /dev/null
+++ b/inst/tests/test-toJSON-matrix.R
@@ -0,0 +1,9 @@
+context("toJSON Matrix")
+
+test_that("Encoding a Matrix", {
+  expect_that(toJSON(matrix(1)), equals("[[1]]"));
+  expect_that(toJSON(matrix(pi), digits=5), equals("[[3.14159]]"));
+  expect_that(toJSON(matrix(1:2)), equals("[[1],[2]]"));  
+  expect_that(toJSON(matrix(1:2, nrow=1)), equals("[[1,2]]")); 
+  expect_that(toJSON(matrix(state.x77[1,1, drop=FALSE])), equals("[[3615]]"));
+});
diff --git a/inst/tests/test-toJSON-numeric.R b/inst/tests/test-toJSON-numeric.R
new file mode 100644
index 0000000..fd81557
--- /dev/null
+++ b/inst/tests/test-toJSON-numeric.R
@@ -0,0 +1,24 @@
+context("toJSON Numeric")
+
+test_that("Encoding Numbers", {
+  expect_that(toJSON(35), equals("[35]"));
+  expect_that(toJSON(35L), equals("[35]"));
+  expect_that(toJSON(c(35, pi), digits=5), equals("[35,3.14159]"));  
+  expect_that(toJSON(pi, digits=0), equals("[3]")); 
+  expect_that(toJSON(pi, digits=2), equals("[3.14]")); 
+  expect_that(toJSON(pi, digits=10), equals("[3.1415926536]"));
+  expect_that(toJSON(c(pi, NA), na="string", digits=5), equals("[3.14159,\"NA\"]"));
+  expect_that(toJSON(c(pi, NA), na="null", digits=5), equals("[3.14159,null]"));
+});
+
+test_that("Encoding Numbers in Data Frame", {
+  expect_that(toJSON(data.frame(foo=35)), equals("[{\"foo\":35}]"));
+  expect_that(toJSON(data.frame(foo=35L)), equals("[{\"foo\":35}]"));
+  expect_that(toJSON(data.frame(foo=c(35, pi)), digits=5), equals("[{\"foo\":35},{\"foo\":3.14159}]"));  
+  expect_that(toJSON(data.frame(foo=pi), digits=0), equals("[{\"foo\":3}]")); 
+  expect_that(toJSON(data.frame(foo=pi), digits=2), equals("[{\"foo\":3.14}]")); 
+  expect_that(toJSON(data.frame(foo=pi), digits=10), equals("[{\"foo\":3.1415926536}]"));
+  expect_that(toJSON(data.frame(foo=c(pi, NA)), digits=5), equals("[{\"foo\":3.14159},{}]"));              
+  expect_that(toJSON(data.frame(foo=c(pi, NA)), na="string", digits=5), equals("[{\"foo\":3.14159},{\"foo\":\"NA\"}]"));
+  expect_that(toJSON(data.frame(foo=c(pi, NA)), na="null", digits=5), equals("[{\"foo\":3.14159},{\"foo\":null}]"));
+});
diff --git a/inst/tests/test-toJSON-raw.R b/inst/tests/test-toJSON-raw.R
new file mode 100644
index 0000000..e9594ab
--- /dev/null
+++ b/inst/tests/test-toJSON-raw.R
@@ -0,0 +1,11 @@
+context("toJSON raw")
+
+test_that("Encoding raw vector", {
+  x <- list(myraw = charToRaw("bla"))
+  x$mydf <- data.frame(foo=1:3)
+  x$mydf$bar <- as.character.hexmode(charToRaw("bla"))
+  
+  y <- fromJSON(toJSON(x))
+  expect_that(x$mydf$bar, is_identical_to(y$mydf$bar))
+  expect_that(y$myraw, is_identical_to("Ymxh"))
+});
diff --git a/inst/tests/test-toJSON-zerovec.R b/inst/tests/test-toJSON-zerovec.R
new file mode 100644
index 0000000..65983c2
--- /dev/null
+++ b/inst/tests/test-toJSON-zerovec.R
@@ -0,0 +1,23 @@
+context("toJSON zerovec")
+
+test_that("Encoding Factor Objects", {
+  expect_that(toJSON(character()), is_identical_to("[]"))
+  expect_that(toJSON(logical()), is_identical_to("[]"))
+  expect_that(toJSON(complex()), is_identical_to("[]"))
+  expect_that(toJSON(complex(), complex="list"), is_identical_to("{\"real\":[],\"imaginary\":[]}"))
+  expect_that(toJSON(double()), is_identical_to("[]"))
+  expect_that(toJSON(integer()), is_identical_to("[]"))
+  expect_that(toJSON(list()), is_identical_to("[]"))
+  expect_that(toJSON(factor()), is_identical_to("[]"))
+  expect_that(toJSON(factor(levels=c("foo", "bar"))), is_identical_to("[]"))
+  expect_that(toJSON(matrix(nrow=0, ncol=0)), is_identical_to("[]"))
+  expect_that(toJSON(as.matrix(numeric())), is_identical_to("[]"))
+  expect_that(toJSON(data.frame()), is_identical_to("[]"))
+  expect_that(toJSON(data.frame(foo=vector())), is_identical_to("[]"))  
+  expect_that(toJSON(data.frame(foo=vector(), bar=logical())), is_identical_to("[]"))
+  expect_that(toJSON(Sys.time()[0], POSIXt="string"), is_identical_to("[]"))
+  expect_that(toJSON(Sys.time()[0], POSIXt="epoch"), is_identical_to("[]")) 
+  expect_that(toJSON(Sys.time()[0], POSIXt="mongo"), is_identical_to("[]")) 
+  expect_that(toJSON(Sys.time()[0], POSIXt="ISO8601"), is_identical_to("[]")) 
+  expect_that(toJSON(as.Date(Sys.time())[0], POSIXt="ISO8601"), is_identical_to("[]"))
+});
diff --git a/inst/tests/testS4.R b/inst/tests/testS4.R
new file mode 100644
index 0000000..0106bb2
--- /dev/null
+++ b/inst/tests/testS4.R
@@ -0,0 +1,14 @@
+# setClass(
+# 	Class="Trajectories",
+# 	representation=representation(
+# 		times = "numeric",
+# 		traj = "matrix"
+# 	)
+# );
+# 
+# t1 = new(Class="Trajectories")
+# t2 = new(Class="Trajectories",times=c(1,3,4))
+# t3 = new(Class="Trajectories",times=c(1,3),traj=matrix(1:4,ncol=2))
+# 
+# cat(asJSON(t3, pretty=T))
+# cat(encode(t3, pretty=T))
diff --git a/man/flatten.Rd b/man/flatten.Rd
new file mode 100644
index 0000000..b98b548
--- /dev/null
+++ b/man/flatten.Rd
@@ -0,0 +1,39 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/flatten.R
+\name{flatten}
+\alias{flatten}
+\title{Flatten nested data frames}
+\usage{
+flatten(x, recursive = TRUE)
+}
+\arguments{
+\item{x}{a data frame}
+
+\item{recursive}{flatten recursively}
+}
+\description{
+In a nested data frame, one or more of the columns consist of another data
+frame. These structures frequently appear when parsing JSON data from the web.
+We can flatten such data frames into a regular 2 dimensional tabular structure.
+}
+\examples{
+options(stringsAsFactors=FALSE)
+x <- data.frame(driver = c("Bowser", "Peach"), occupation = c("Koopa", "Princess"))
+x$vehicle <- data.frame(model = c("Piranha Prowler", "Royal Racer"))
+x$vehicle$stats <- data.frame(speed = c(55, 34), weight = c(67, 24), drift = c(35, 32))
+str(x)
+str(flatten(x))
+str(flatten(x, recursive = FALSE))
+
+\dontrun{
+data1 <- fromJSON("https://api.github.com/users/hadley/repos")
+colnames(data1)
+colnames(data1$owner)
+colnames(flatten(data1))
+
+# or for short:
+data2 <- fromJSON("https://api.github.com/users/hadley/repos", flatten = TRUE)
+colnames(data2)
+}
+}
+
diff --git a/man/fromJSON.Rd b/man/fromJSON.Rd
new file mode 100644
index 0000000..6da565a
--- /dev/null
+++ b/man/fromJSON.Rd
@@ -0,0 +1,117 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/fromJSON.R, R/toJSON.R
+\name{toJSON, fromJSON}
+\alias{fromJSON}
+\alias{jsonlite}
+\alias{toJSON}
+\alias{toJSON, fromJSON}
+\title{Convert \R{} objects to/from JSON}
+\usage{
+fromJSON(txt, simplifyVector = TRUE, simplifyDataFrame = simplifyVector,
+  simplifyMatrix = simplifyVector, flatten = FALSE, ...)
+
+toJSON(x, dataframe = c("rows", "columns", "values"), matrix = c("rowmajor",
+  "columnmajor"), Date = c("ISO8601", "epoch"), POSIXt = c("string",
+  "ISO8601", "epoch", "mongo"), factor = c("string", "integer"),
+  complex = c("string", "list"), raw = c("base64", "hex", "mongo"),
+  null = c("list", "null"), na = c("null", "string"), auto_unbox = FALSE,
+  digits = 4, pretty = FALSE, force = FALSE, ...)
+}
+\arguments{
+\item{txt}{a JSON string, URL or file}
+
+\item{simplifyVector}{coerce JSON arrays containing only primitives into an atomic vector}
+
+\item{simplifyDataFrame}{coerce JSON arrays containing only records (JSON objects) into a data frame}
+
+\item{simplifyMatrix}{coerce JSON arrays containing vectors of equal mode and dimension into matrix or array}
+
+\item{flatten}{automatically \code{\link{flatten}} nested data frames into a single non-nested data frame}
+
+\item{...}{arguments passed on to class specific \code{print} methods}
+
+\item{x}{the object to be encoded}
+
+\item{dataframe}{how to encode data.frame objects: must be one of 'rows', 'columns' or 'values'}
+
+\item{matrix}{how to encode matrices and higher dimensional arrays: must be one of 'rowmajor' or 'columnmajor'.}
+
+\item{Date}{how to encode Date objects: must be one of 'ISO8601' or 'epoch'}
+
+\item{POSIXt}{how to encode POSIXt (datetime) objects: must be one of 'string', 'ISO8601', 'epoch' or 'mongo'}
+
+\item{factor}{how to encode factor objects: must be one of 'string' or 'integer'}
+
+\item{complex}{how to encode complex numbers: must be one of 'string' or 'list'}
+
+\item{raw}{how to encode raw objects: must be one of 'base64', 'hex' or 'mongo'}
+
+\item{null}{how to encode NULL values within a list: must be one of 'null' or 'list'}
+
+\item{na}{how to print NA values: must be one of 'null' or 'string'. Defaults are class specific}
+
+\item{auto_unbox}{automatically \code{\link{unbox}} all atomic vectors of length 1. It is usually safer to avoid this and instead use the \code{\link{unbox}} function to unbox individual elements.
+An exception is that objects of class \code{AsIs} (i.e. wrapped in \code{I()}) are not automatically unboxed. This is a way to mark single values as length-1 arrays.}
+
+\item{digits}{max number of decimal digits to print for numeric values. Use \code{I()} to specify significant digits.}
+
+\item{pretty}{adds indentation whitespace to JSON output. Can be TRUE/FALSE or a number specifying the number of spaces to indent. See \code{\link{prettify}}}
+
+\item{force}{unclass/skip objects of classes with no defined JSON mapping}
+}
+\description{
+These functions are used to convert between JSON data and \R{} objects. The \code{\link{toJSON}} and \code{\link{fromJSON}}
+functions use a class based mapping, which follows conventions outlined in this paper:  \url{http://arxiv.org/abs/1403.2805} (also available as vignette).
+}
+\details{
+The \code{\link{toJSON}} and \code{\link{fromJSON}} functions are drop-in replacements for the identically named functions
+in packages \code{rjson} and \code{RJSONIO}. Our implementation uses an alternative, somewhat more consistent mapping
+between \R{} objects and JSON strings.
+
+The \code{\link{serializeJSON}} and \code{\link{unserializeJSON}} functions in this package use an
+alternative system to convert between \R{} objects and JSON, which supports more classes but is much more verbose.
+
+A JSON string is always unicode, using \code{UTF-8} by default, hence there is usually no need to escape any characters.
+However, the JSON format does support escaping of unicode characters, which are encoded using a backslash followed by
+a lower case \code{"u"} and 4 hex characters, for example: \code{"Z\\u00FCrich"}. The \code{fromJSON} function
+will parse such escape sequences but it is usually preferable to encode unicode characters in JSON using native
+\code{UTF-8} rather than escape sequences.
+}
+\examples{
+# Stringify some data
+jsoncars <- toJSON(mtcars, pretty=TRUE)
+cat(jsoncars)
+
+# Parse it back
+fromJSON(jsoncars)
+
+# Parse escaped unicode
+fromJSON('{"city" : "Z\\\\u00FCrich"}')
+
+# Decimal vs significant digits
+toJSON(pi, digits=3)
+toJSON(pi, digits=I(3))
+
+\dontrun{retrieve data frame
+data1 <- fromJSON("https://api.github.com/users/hadley/orgs")
+names(data1)
+data1$login
+
+# Nested data frames:
+data2 <- fromJSON("https://api.github.com/users/hadley/repos")
+names(data2)
+names(data2$owner)
+data2$owner$login
+
+# Flatten the data into a regular non-nested dataframe
+names(flatten(data2))
+
+# Flatten directly (more efficient):
+data3 <- fromJSON("https://api.github.com/users/hadley/repos", flatten = TRUE)
+identical(data3, flatten(data2))
+}
+}
+\references{
+Jeroen Ooms (2014). The \code{jsonlite} Package: A Practical and Consistent Mapping Between JSON Data and \R{} Objects. \emph{arXiv:1403.2805}. \url{http://arxiv.org/abs/1403.2805}
+}
+
diff --git a/man/prettify.Rd b/man/prettify.Rd
new file mode 100644
index 0000000..09d8ad7
--- /dev/null
+++ b/man/prettify.Rd
@@ -0,0 +1,27 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/prettify.R
+\name{prettify, minify}
+\alias{minify}
+\alias{prettify}
+\alias{prettify, minify}
+\title{Prettify or minify a JSON string}
+\usage{
+prettify(txt, indent = 4)
+
+minify(txt)
+}
+\arguments{
+\item{txt}{JSON string}
+
+\item{indent}{number of spaces to indent}
+}
+\description{
+Prettify adds indentation to a JSON string; minify removes all indentation/whitespace.
+}
+\examples{
+myjson <- toJSON(cars)
+cat(myjson)
+prettify(myjson)
+minify(myjson)
+}
+
diff --git a/man/rbind.pages.Rd b/man/rbind.pages.Rd
new file mode 100644
index 0000000..c676901
--- /dev/null
+++ b/man/rbind.pages.Rd
@@ -0,0 +1,45 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/rbind.pages.R
+\name{rbind.pages}
+\alias{rbind.pages}
+\title{Combine pages into a single data frame}
+\usage{
+rbind.pages(pages)
+}
+\arguments{
+\item{pages}{a list of data frames, each representing a \emph{page} of data}
+}
+\description{
+The \code{rbind.pages} function is used to combine a list of data frames into a single
+data frame. This is often needed when working with a JSON API that limits the amount
+of data per request. If we need more data than what fits in a single request, we need to
+perform multiple requests that each retrieve a fragment of data, not unlike pages in a
+book. In practice this is often implemented using a \code{page} parameter in the API. The
+\code{rbind.pages} function can be used to combine these pages back into a single dataset.
+}
+\details{
+The \code{\link{rbind.pages}} function generalizes \code{\link[base:rbind]{base::rbind}} and
+\code{\link[plyr:rbind.fill]{plyr::rbind.fill}} with added support for nested data frames. Not each column
+has to be present in each of the individual data frames; missing columns will be filled
+up in \code{NA} values.
+}
+\examples{
+# Basic example
+x <- data.frame(foo = rnorm(3), bar = c(TRUE, FALSE, TRUE))
+y <- data.frame(foo = rnorm(2), col = c("blue", "red"))
+rbind.pages(list(x, y))
+
+\dontrun{
+baseurl <- "http://projects.propublica.org/nonprofits/api/v1/search.json"
+pages <- list()
+for(i in 0:20){
+  mydata <- fromJSON(paste0(baseurl, "?order=revenue&sort_order=desc&page=", i))
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+filings <- rbind.pages(pages)
+nrow(filings)
+colnames(filings)
+}
+}
+
diff --git a/man/serializeJSON.Rd b/man/serializeJSON.Rd
new file mode 100644
index 0000000..f8c5278
--- /dev/null
+++ b/man/serializeJSON.Rd
@@ -0,0 +1,53 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/serializeJSON.R
+\name{serializeJSON}
+\alias{serializeJSON}
+\alias{unserializeJSON}
+\title{serialize R objects to JSON}
+\usage{
+serializeJSON(x, digits = 8, pretty = FALSE)
+
+unserializeJSON(txt)
+}
+\arguments{
+\item{x}{an \R{} object to be serialized}
+
+\item{digits}{max number of digits (after the dot) to print for numeric values}
+
+\item{pretty}{add indentation/whitespace to JSON output. See \code{\link{prettify}}}
+
+\item{txt}{a JSON string which was created using \code{serializeJSON}}
+}
+\description{
+The \code{\link{serializeJSON}} and \code{\link{unserializeJSON}} functions convert between
+\R{} objects to JSON data. Instead of using a class based mapping like
+\code{\link{toJSON}} and \code{\link{fromJSON}}, the serialize functions base the encoding
+schema on the storage type, and capture all data and attributes from any object.
+Thereby the object can be restored almost perfectly from its JSON representation, but
+the resulting JSON output is very verbose. Apart from environments, all standard storage
+types are supported.
+}
+\note{
+JSON is a text based format which leads to loss of precision when printing numbers.
+}
+\examples{
+jsoncars <- serializeJSON(mtcars)
+mtcars2 <- unserializeJSON(jsoncars)
+identical(mtcars, mtcars2)
+
+set.seed('123')
+myobject <- list(
+  mynull = NULL,
+  mycomplex = lapply(eigen(matrix(-rnorm(9),3)), round, 3),
+  mymatrix = round(matrix(rnorm(9), 3),3),
+  myint = as.integer(c(1,2,3)),
+  mydf = cars,
+  mylist = list(foo='bar', 123, NA, NULL, list('test')),
+  mylogical = c(TRUE,FALSE,NA),
+  mychar = c('foo', NA, 'bar'),
+  somemissings = c(1,2,NA,NaN,5, Inf, 7 -Inf, 9, NA),
+  myrawvec = charToRaw('This is a test')
+);
+identical(unserializeJSON(serializeJSON(myobject)), myobject);
+}
+
diff --git a/man/stream_in.Rd b/man/stream_in.Rd
new file mode 100644
index 0000000..5070ade
--- /dev/null
+++ b/man/stream_in.Rd
@@ -0,0 +1,140 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/stream.R
+\name{stream_in, stream_out}
+\alias{stream_in}
+\alias{stream_in, stream_out}
+\alias{stream_out}
+\title{Streaming JSON input/output}
+\usage{
+stream_in(con, handler = NULL, pagesize = 500, verbose = TRUE, ...)
+
+stream_out(x, con = stdout(), pagesize = 500, verbose = TRUE, ...)
+}
+\arguments{
+\item{con}{a \code{\link{connection}} object. If the connection is not open,
+\code{stream_in} and \code{stream_out} will automatically open
+and later close (and destroy) the connection. See details.}
+
+\item{handler}{a custom function that is called on each page of JSON data. If not specified,
+the default handler stores all pages and binds them into a single data frame that will be
+returned by \code{stream_in}. See details.}
+
+\item{pagesize}{number of lines to read/write from/to the connection per iteration.}
+
+\item{verbose}{print some information on what is going on.}
+
+\item{...}{arguments for \code{\link{fromJSON}} and \code{\link{toJSON}} that
+control JSON formatting/parsing where applicable. Use with caution.}
+
+\item{x}{object to be streamed out. Currently only data frames are supported.}
+}
+\value{
+The \code{stream_out} function always returns \code{NULL}.
+When no custom handler is specified, \code{stream_in} returns a data frame of all pages binded together.
+When a custom handler function is specified, \code{stream_in} always returns \code{NULL}.
+}
+\description{
+The \code{stream_in} and \code{stream_out} functions implement line-by-line processing
+of JSON data over a \code{\link{connection}}, such as a socket, url, file or pipe. JSON
+streaming requires the \href{http://ndjson.org}{ndjson} format, which slightly differs
+from \code{\link{fromJSON}} and \code{\link{toJSON}}, see details.
+}
+\details{
+Because parsing huge JSON strings is difficult and inefficient, JSON streaming is done
+using \strong{lines of minified JSON records}, a.k.a. \href{http://ndjson.org}{ndjson}.
+This is pretty standard: JSON databases such as \href{https://github.com/maxogden/dat}{dat}
+or MongoDB use the same format to import/export datasets. Note that this means that the
+total stream combined is not valid JSON itself; only the individual lines are. Also note
+that because line-breaks are used as separators, prettified JSON is not permitted: the
+JSON lines \emph{must} be minified. In this respect, the format is a bit different from
+\code{\link{fromJSON}} and \code{\link{toJSON}} where all lines are part of a single JSON
+structure with optional line breaks.
+
+The \code{handler} is a callback function which is called for each page (batch) of
+JSON data with exactly one argument (usually a data frame with \code{pagesize} rows).
+If \code{handler} is missing or \code{NULL}, a default handler is used which stores all
+intermediate pages of data, and at the very end binds all pages together into one single
+data frame that is returned by \code{stream_in}. When a custom \code{handler} function
+is specified, \code{stream_in} does not store any intermediate results and always returns
+\code{NULL}. It is then up to the \code{handler} to process or store data pages.
+A \code{handler} function that does not store intermediate results in memory (for
+example by writing output to another connection) results in a pipeline that can process an
+unlimited amount of data. See example.
+
+If a connection is not opened yet, \code{stream_in} and \code{stream_out}
+will automatically open and later close the connection. Because R destroys connections
+when they are closed, they cannot be reused. To use a single connection for multiple
+calls to \code{stream_in} or \code{stream_out}, it needs to be opened
+beforehand. See example.
+}
+\examples{
+# compare formats
+x <- iris[1:3,]
+toJSON(x)
+stream_out(x)
+
+# Trivial example
+mydata <- stream_in(url("http://httpbin.org/stream/100"))
+
+\dontrun{stream large dataset to file and back
+library(nycflights13)
+stream_out(flights, file(tmp <- tempfile()))
+flights2 <- stream_in(file(tmp))
+unlink(tmp)
+all.equal(flights2, as.data.frame(flights))
+
+# stream over HTTP
+diamonds2 <- stream_in(url("http://jeroenooms.github.io/data/diamonds.json"))
+
+# stream over HTTP with gzip compression
+flights3 <- stream_in(gzcon(url("http://jeroenooms.github.io/data/nycflights13.json.gz")))
+all.equal(flights3, as.data.frame(flights))
+
+# stream over HTTPS (HTTP+SSL) via curl
+library(curl)
+flights4 <- stream_in(gzcon(curl("https://jeroenooms.github.io/data/nycflights13.json.gz")))
+all.equal(flights4, as.data.frame(flights))
+
+# or alternatively:
+flights5 <- stream_in(gzcon(pipe("curl https://jeroenooms.github.io/data/nycflights13.json.gz")))
+all.equal(flights5, as.data.frame(flights))
+
+# Full JSON IO stream from URL to file connection.
+# Calculate delays for flights over 1000 miles in batches of 5k
+library(dplyr)
+con_in <- gzcon(url("http://jeroenooms.github.io/data/nycflights13.json.gz"))
+con_out <- file(tmp <- tempfile(), open = "wb")
+stream_in(con_in, handler = function(df){
+  df <- dplyr::filter(df, distance > 1000)
+  df <- dplyr::mutate(df, delta = dep_delay - arr_delay)
+  stream_out(df, con_out, pagesize = 1000)
+}, pagesize = 5000)
+close(con_out)
+
+# stream it back in
+mydata <- stream_in(file(tmp))
+nrow(mydata)
+unlink(tmp)
+
+# Data from http://openweathermap.org/current#bulk
+# Each row contains a nested data frame.
+daily14 <- stream_in(gzcon(url("http://78.46.48.103/sample/daily_14.json.gz")), pagesize=50)
+subset(daily14, city$name == "Berlin")$data[[1]]
+
+# Or with dplyr:
+library(dplyr)
+daily14f <- flatten(daily14)
+filter(daily14f, city.name == "Berlin")$data[[1]]
+
+# Stream import large data from zip file
+tmp <- tempfile()
+download.file("http://jsonstudio.com/wp-content/uploads/2014/02/companies.zip", tmp)
+companies <- stream_in(unz(tmp, "companies.json"))
+}
+}
+\references{
+MongoDB export format: \url{http://docs.mongodb.org/manual/reference/program/mongoexport/#cmdoption--query}
+
+Documentation for the JSON Lines text file format: \url{http://jsonlines.org/}
+}
+
diff --git a/man/unbox.Rd b/man/unbox.Rd
new file mode 100644
index 0000000..32a08a4
--- /dev/null
+++ b/man/unbox.Rd
@@ -0,0 +1,48 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/unbox.R
+\name{unbox}
+\alias{unbox}
+\title{Unbox a vector or data frame}
+\usage{
+unbox(x)
+}
+\arguments{
+\item{x}{atomic vector of length 1, or data frame with 1 row.}
+}
+\value{
+Returns a singleton version of \code{x}.
+}
+\description{
+This function marks an atomic vector or data frame as a
+\href{http://en.wikipedia.org/wiki/Singleton_(mathematics)}{singleton}, i.e.
+a set with exactly 1 element. Thereby, the value will not turn into an
+\code{array} when encoded into JSON. This can only be done for
+atomic vectors of length 1, or data frames with exactly 1 row. To automatically
+unbox all vectors of length 1 within an object, use the \code{auto_unbox} argument
+in \code{\link{toJSON}}.
+}
+\details{
+It is usually recommended to avoid this function and stick with the default
+encoding schema for the various \R{} classes. The only use case for this function
+is if you are bound to some specific predefined JSON structure (e.g. to
+submit to an API), which has no natural \R{} representation. Note that the default
+encoding for data frames naturally results in a collection of key-value pairs,
+without using \code{unbox}.
+}
+\examples{
+toJSON(list(foo=123))
+toJSON(list(foo=unbox(123)))
+
+# Auto unbox vectors of length one:
+x = list(x=1:3, y = 4, z = "foo", k = NULL)
+toJSON(x)
+toJSON(x, auto_unbox = TRUE)
+
+x <- iris[1,]
+toJSON(list(rec=x))
+toJSON(list(rec=unbox(x)))
+}
+\references{
+\url{http://en.wikipedia.org/wiki/Singleton_(mathematics)}
+}
+
diff --git a/man/validate.Rd b/man/validate.Rd
new file mode 100644
index 0000000..0c2806e
--- /dev/null
+++ b/man/validate.Rd
@@ -0,0 +1,24 @@
+% Generated by roxygen2 (4.1.1): do not edit by hand
+% Please edit documentation in R/validate.R
+\name{validate}
+\alias{validate}
+\title{Validate JSON}
+\usage{
+validate(txt)
+}
+\arguments{
+\item{txt}{JSON string}
+}
+\description{
+Test if a string contains valid JSON. Characters vectors will be collapsed into a single string.
+}
+\examples{
+#Output from toJSON and serializeJSON should pass validation
+myjson <- toJSON(mtcars)
+validate(myjson) #TRUE
+
+#Something bad happened
+truncated <- substring(myjson, 1, 100)
+validate(truncated) #FALSE
+}
+
diff --git a/src/Makevars b/src/Makevars
new file mode 100644
index 0000000..8758ee1
--- /dev/null
+++ b/src/Makevars
@@ -0,0 +1,19 @@
+PKG_CPPFLAGS = -Iyajl -Iyajl/api -I.
+
+SOURCES = yajl/yajl.c yajl/yajl_alloc.c yajl/yajl_buf.c yajl/yajl_encode.c \
+          yajl/yajl_gen.c yajl/yajl_lex.c yajl/yajl_parser.c yajl/yajl_tree.c \
+          yajl/yajl_version.c base64.c collapse_array.c collapse_object.c \
+          escape_chars.c is_recordlist.c is_scalarlist.c null_to_na.c parse.c \
+          prettify.c validate.c modp_numtoa.c num_to_char.c integer64_to_na.c \
+          push_parser.c collapse_pretty.c row_collapse.c
+
+# For development only (GNU/GCC specific not allowed on CRAN):
+# SOURCES = $(wildcard yajl/*.c *.c)
+# PKG_CFLAGS= -Wall -pedantic
+
+OBJECTS = $(SOURCES:.c=.o)
+
+all: clean $(OBJECTS)
+
+clean:
+	rm -f $(OBJECTS)
diff --git a/src/base64.c b/src/base64.c
new file mode 100644
index 0000000..24369bc
--- /dev/null
+++ b/src/base64.c
@@ -0,0 +1,225 @@
+
+#include "base64.h"
+
+/*
+** Translation Table as described in RFC1113
+*/
+static const char cb64[]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+/*
+** Translation Table to decode (created by author)
+*/
+static const char cd64[]="|$$$}rstuvwxyz{$$$$$$$>?@ABCDEFGHIJKLMNOPQRSTUVW$$$$$$XYZ[\\]^_`abcdefghijklmnopq";
+
+/*
+** encodeblock
+**
+** encode 3 8-bit binary bytes as 4 '6-bit' characters
+*/
+void encodeblock( unsigned char in[3], unsigned char out[4], int len )
+{
+    out[0] = cb64[ in[0] >> 2 ];
+    out[1] = cb64[ ((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4) ];
+    out[2] = (unsigned char) (len > 1 ? cb64[ ((in[1] & 0x0f) << 2) | ((in[2] & 0xc0) >> 6) ] : '=');
+    out[3] = (unsigned char) (len > 2 ? cb64[ in[2] & 0x3f ] : '=');
+}
+
+/*
+** encode
+**
+** base64 encode a stream adding padding and line breaks as per spec.
+*/
+void encode( FILE *infile, FILE *outfile, int linesize )
+{
+    unsigned char in[3], out[4];
+    int i, len, blocksout = 0;
+
+    while( !feof( infile ) ) {
+        len = 0;
+        for( i = 0; i < 3; i++ ) {
+            in[i] = (unsigned char) getc( infile );
+            if( !feof( infile ) ) {
+                len++;
+            }
+            else {
+                in[i] = 0;
+            }
+        }
+        if( len ) {
+            encodeblock( in, out, len );
+            for( i = 0; i < 4; i++ ) {
+                putc( out[i], outfile );
+            }
+            blocksout++;
+        }
+        if( blocksout >= (linesize/4) || feof( infile ) ) {
+            if( blocksout ) {
+                fprintf( outfile, "\r\n" );
+            }
+            blocksout = 0;
+        }
+    }
+}
+
+/*
+** decodeblock
+**
+** decode 4 '6-bit' characters into 3 8-bit binary bytes
+*/
+void decodeblock( unsigned char in[4], unsigned char out[3] )
+{   
+    out[ 0 ] = (unsigned char ) (in[0] << 2 | in[1] >> 4);
+    out[ 1 ] = (unsigned char ) (in[1] << 4 | in[2] >> 2);
+    out[ 2 ] = (unsigned char ) (((in[2] << 6) & 0xc0) | in[3]);
+}
+
+/*
+** decode
+**
+** decode a base64 encoded stream discarding padding, line breaks and noise
+*/
+void decode( FILE *infile, FILE *outfile )
+{
+    unsigned char in[4], out[3], v;
+    int i, len;
+
+    while( !feof( infile ) ) {
+        for( len = 0, i = 0; i < 4 && !feof( infile ); i++ ) {
+            v = 0;
+            while( !feof( infile ) && v == 0 ) {
+                v = (unsigned char) getc( infile );
+                v = (unsigned char) ((v < 43 || v > 122) ? 0 : cd64[ v - 43 ]);
+                if( v ) {
+                    v = (unsigned char) ((v == '$') ? 0 : v - 61);
+                }
+            }
+            if( !feof( infile ) ) {
+                len++;
+                if( v ) {
+                    in[ i ] = (unsigned char) (v - 1);
+                }
+            }
+            else {
+                in[i] = 0;
+            }
+        }
+        if( len ) {
+            decodeblock( in, out );
+            for( i = 0; i < len - 1; i++ ) {
+                putc( out[i], outfile );
+            }
+        }
+    }
+}
+
+
+/*
+** b64_message
+**
+** Gather text messages in one place.
+**
+*/
+char *b64_message( int errcode )
+{
+    char *msgs[ B64_MAX_MESSAGES ] = {
+            "b64:000:Invalid Message Code.",
+            "b64:001:Syntax Error -- check help for usage.",
+            "b64:002:File Error Opening/Creating Files.",
+            "b64:003:File I/O Error -- Note: output file not removed.",
+            "b64:004:Error on output file close.",
+            "b64:004:linesize set to minimum."
+    };
+    char *msg = msgs[ 0 ];
+
+    if( errcode > 0 && errcode < B64_MAX_MESSAGES ) {
+        msg = msgs[ errcode ];
+    }
+
+    return( msg );
+}
+
+/*
+** b64
+**
+** 'engine' that opens streams and calls encode/decode
+*/
+
+int b64( int opt, char *infilename, char *outfilename, int linesize )
+{
+    FILE *infile;
+    int retcode = B64_FILE_ERROR;
+
+    if( !infilename ) {
+        infile = stdin;
+    }
+    else {
+        infile = fopen( infilename, "rb" );
+    }
+    if( !infile ) {
+        perror( infilename );
+    }
+    else {
+        FILE *outfile;
+        // if( !outfilename ) {
+        //     outfile = stdout;
+        // }
+        // else {
+            outfile = fopen( outfilename, "wb" );
+        // }
+        if( !outfile ) {
+            perror( outfilename );
+        }
+        else {
+            if( opt == 'e' ) {
+                encode( infile, outfile, linesize );
+            }
+            else {
+                decode( infile, outfile );
+            }
+            if (ferror( infile ) || ferror( outfile )) {
+                retcode = B64_FILE_IO_ERROR;
+            }
+            else {
+                 retcode = 0;
+            }
+            // if( outfile != stdout ) {
+                if( fclose( outfile ) != 0 ) {
+                    perror( b64_message( B64_ERROR_OUT_CLOSE ) );
+                    retcode = B64_FILE_IO_ERROR;
+                }
+            // }
+        }
+        if( infile != stdin ) {
+            fclose( infile );
+        }
+    }
+
+    return( retcode );
+}
+
+
+/* R functions */
+
+SEXP base64_encode_(SEXP input, SEXP output, SEXP line_size){
+	int res = b64( 'e', 
+		(char*)CHAR(STRING_ELT(input,0)), 
+		(char*)CHAR(STRING_ELT(output,0)), 
+		INTEGER(line_size)[0] ) ;
+	if( res ){
+		error( "%s\n", b64_message( res ) ) ;	
+	}
+	return R_NilValue ;
+}
+
+SEXP base64_decode_(SEXP input, SEXP output){
+	int res = b64( 'd', 
+		(char*)CHAR(STRING_ELT(input,0)), 
+		(char*)CHAR(STRING_ELT(output,0)), 
+		0 ) ;
+	if( res ){
+		error( "%s\n", b64_message( res ) ) ;	
+	}
+	return R_NilValue ;
+}
+
+
diff --git a/src/base64.h b/src/base64.h
new file mode 100644
index 0000000..5281f58
--- /dev/null
+++ b/src/base64.h
@@ -0,0 +1,33 @@
+#ifndef BASE64__BASE64_H
+#define BASE64__BASE64_H
+
+#include <R.h> 
+#include <Rinternals.h> 
+
+#include <stdio.h>
+#include <stdlib.h>
+
+/*
+** returnable errors
+**
+** Error codes returned to the operating system.
+**
+*/
+#define B64_SYNTAX_ERROR        1
+#define B64_FILE_ERROR          2
+#define B64_FILE_IO_ERROR       3
+#define B64_ERROR_OUT_CLOSE     4
+#define B64_LINE_SIZE_TO_MIN    5
+
+#define B64_DEF_LINE_SIZE   72                                                         
+#define B64_MIN_LINE_SIZE    4
+
+#define THIS_OPT(ac, av) (ac > 1 ? av[1][0] == '-' ? av[1][1] : 0 : 0)
+
+#define B64_MAX_MESSAGES 6
+
+SEXP base64_encode_(SEXP input, SEXP output, SEXP line_size) ;
+SEXP base64_decode_(SEXP input, SEXP output) ;
+
+
+#endif                                                                         
diff --git a/src/collapse_array.c b/src/collapse_array.c
new file mode 100644
index 0000000..af523a5
--- /dev/null
+++ b/src/collapse_array.c
@@ -0,0 +1,37 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+
+SEXP C_collapse_array(SEXP x) {
+  if (!isString(x))
+    error("x must be a character vector.");
+
+  int len = length(x);
+  size_t nchar_total = 0;
+
+  for (int i=0; i<len; i++) {
+    nchar_total += strlen(translateCharUTF8(STRING_ELT(x, i)));
+  }
+
+  char *s = malloc(nchar_total+len+3); //if len is 0, we need at least: '[]\0'
+  char *olds = s;
+  size_t size;
+
+  for (int i=0; i<len; i++) {
+    s[0] = ',';
+    size = strlen(translateCharUTF8(STRING_ELT(x, i)));
+    memcpy(++s, translateCharUTF8(STRING_ELT(x, i)), size);
+    s += size;
+  }
+  if(olds == s) s++;
+  olds[0] = '[';
+  s[0] = ']';
+  s[1] = '\0';
+
+  //get character encoding from first element
+  SEXP out = PROTECT(allocVector(STRSXP, 1));
+  SET_STRING_ELT(out, 0, mkCharCE(olds,  CE_UTF8));
+  UNPROTECT(1);
+  free(olds);
+  return out;
+}
diff --git a/src/collapse_object.c b/src/collapse_object.c
new file mode 100644
index 0000000..3c212d0
--- /dev/null
+++ b/src/collapse_object.c
@@ -0,0 +1,53 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+
+SEXP C_collapse_object(SEXP x, SEXP y) {
+  if (!isString(x) || !isString(y))
+    error("x and y must character vectors.");
+
+  int len = length(x);
+  if (len != length(y))
+    error("x and y must same length.");
+
+  size_t nchar_total = 0;
+
+  for (int i=0; i<len; i++) {
+    if(STRING_ELT(y, i) == NA_STRING) continue;
+    nchar_total += strlen(translateCharUTF8(STRING_ELT(x, i)));
+    nchar_total += strlen(translateCharUTF8(STRING_ELT(y, i)));
+    nchar_total += 2;
+  }
+
+  char *s = malloc(nchar_total + 3); //if len is 0, we need at least: '{}\0'
+  char *olds = s;
+  size_t size;
+
+  for (int i=0; i<len; i++) {
+    if(STRING_ELT(y, i) == NA_STRING) continue;
+    s[0] = ',';
+    //add x
+    size = strlen(translateCharUTF8(STRING_ELT(x, i)));
+    memcpy(++s, translateCharUTF8(STRING_ELT(x, i)), size);
+    s += size;
+
+    //add :
+    s[0] = ':';
+
+    //add y
+    size = strlen(translateCharUTF8(STRING_ELT(y, i)));
+    memcpy(++s, translateCharUTF8(STRING_ELT(y, i)), size);
+    s += size;
+  }
+  if(olds == s) s++;
+  olds[0] = '{';
+  s[0] = '}';
+  s[1] = '\0';
+
+  //get character encoding from first element
+  SEXP out = PROTECT(allocVector(STRSXP, 1));
+  SET_STRING_ELT(out, 0, mkCharCE(olds,  CE_UTF8));
+  UNPROTECT(1);
+  free(olds);
+  return out;
+}
diff --git a/src/collapse_pretty.c b/src/collapse_pretty.c
new file mode 100644
index 0000000..e8b1a38
--- /dev/null
+++ b/src/collapse_pretty.c
@@ -0,0 +1,179 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+
+/* a function to insert n spaces */
+void append_whitespace(char** cur, size_t n){
+  memset(*cur, ' ', n);
+  *cur += n;
+}
+
+/* add and increment */
+void append_text(char **cur, const char* val, int n){
+  if(n < 0)
+    n = strlen(val);
+  memcpy(*cur, val, n);
+  *cur += n;
+}
+
+/* collapse a json object with n spaces */
+SEXP C_collapse_object_pretty(SEXP x, SEXP y, SEXP indent) {
+  if (!isString(x) || !isString(y))
+    error("x and y must character vectors.");
+
+  int ni = asInteger(indent);
+  if(ni == NA_INTEGER)
+    error("indent must not be NA");
+
+  int len = length(x);
+  if (len != length(y))
+    error("x and y must have same length.");
+
+  //calculate required space
+  size_t nchar_total = 0;
+  for (int i=0; i<len; i++) {
+    if(STRING_ELT(y, i) == NA_STRING) continue;
+    nchar_total += strlen(translateCharUTF8(STRING_ELT(x, i)));
+    nchar_total += strlen(translateCharUTF8(STRING_ELT(y, i)));
+    nchar_total += ni + 6; //indent plus two extra spaces plus ": " and ",\n"
+  }
+
+  //final indent plus curly braces and linebreak and terminator
+  nchar_total += (ni + 2 + 2);
+
+  //allocate memory and create a cursor
+  char *str = malloc(nchar_total);
+  char *cursor = str;
+  char **cur = &cursor;
+
+  //init object
+  append_text(cur, "{", 1);
+  const char *start = *cur;
+
+  //copy everything
+  for (int i=0; i<len; i++) {
+    if(STRING_ELT(y, i) == NA_STRING) continue;
+    append_text(cur, "\n", 1);
+    append_whitespace(cur, ni + 2);
+    append_text(cur, translateCharUTF8(STRING_ELT(x, i)), -1);
+    append_text(cur, ": ", 2);
+    append_text(cur, translateCharUTF8(STRING_ELT(y, i)), -1);
+    append_text(cur, ",", 1);
+  }
+
+  //finalize object
+  if(cursor != start){
+    cursor[-1] = '\n';
+    append_whitespace(cur, ni);
+  }
+  append_text(cur, "}\0", 2);
+
+  //encode as UTF8 string
+  SEXP out = PROTECT(allocVector(STRSXP, 1));
+  SET_STRING_ELT(out, 0, mkCharCE(str,  CE_UTF8));
+  UNPROTECT(1);
+  free(str);
+  return out;
+}
+
+SEXP C_collapse_array_pretty_inner(SEXP x) {
+  if (!isString(x))
+    error("x must character vector.");
+
+  //calculate required space
+  int len = length(x);
+  size_t nchar_total = 0;
+  for (int i=0; i<len; i++) {
+    nchar_total += strlen(translateCharUTF8(STRING_ELT(x, i)));
+  }
+
+  // n-1 ", " separators
+  nchar_total += (len-1)*2;
+
+  //outer parentheses plus terminator
+  nchar_total += 3;
+
+  //allocate memory and create a cursor
+  char *str = malloc(nchar_total);
+  char *cursor = str;
+  char **cur = &cursor;
+
+  //init object
+  append_text(cur, "[", 1);
+
+  //copy everything
+  for (int i=0; i<len; i++) {
+    append_text(cur, translateCharUTF8(STRING_ELT(x, i)), -1);
+    append_text(cur, ", ", 2);
+  }
+
+  //remove trailing ", "
+  if(len) {
+    cursor -= 2;
+  }
+
+  //finish up
+  append_text(cur, "]\0", 2);
+
+  //encode as UTF8 string
+  SEXP out = PROTECT(allocVector(STRSXP, 1));
+  SET_STRING_ELT(out, 0, mkCharCE(str,  CE_UTF8));
+  UNPROTECT(1);
+  free(str);
+  return out;
+}
+
+SEXP C_collapse_array_pretty_outer(SEXP x, SEXP indent) {
+  if (!isString(x))
+    error("x must character vector.");
+
+  int len = length(x);
+  int ni = asInteger(indent);
+  if(ni == NA_INTEGER)
+    error("indent must not be NA");
+
+  //calculate required space
+  size_t nchar_total = 0;
+  for (int i=0; i<len; i++) {
+    nchar_total += strlen(translateCharUTF8(STRING_ELT(x, i)));
+  }
+
+  //for indent plus two extra spaces plus ",\n"
+  nchar_total += len * (ni + 4);
+
+  //outer parentheses plus final indent and linebreak and terminator
+  nchar_total += ni + 4;
+
+  //allocate memory and create a cursor
+  char *str = malloc(nchar_total);
+  char *cursor = str;
+  char **cur = &cursor;
+
+  //init object
+  append_text(cur, "[", 1);
+  const char *start = *cur;
+
+  //copy everything
+  for (int i=0; i<len; i++) {
+    append_text(cur, "\n", 1);
+    append_whitespace(cur, ni + 2);
+    append_text(cur, translateCharUTF8(STRING_ELT(x, i)), -1);
+    append_text(cur, ",", 1);
+  }
+
+  //remove trailing ", "
+  if(cursor != start){
+    cursor[-1] = '\n';
+    append_whitespace(cur, ni);
+  }
+
+  //finish up
+  append_text(cur, "]\0", 2);
+
+  //encode as UTF8 string
+  SEXP out = PROTECT(allocVector(STRSXP, 1));
+  SET_STRING_ELT(out, 0, mkCharCE(str,  CE_UTF8));
+  UNPROTECT(1);
+  free(str);
+  return out;
+}
diff --git a/src/escape_chars.c b/src/escape_chars.c
new file mode 100644
index 0000000..0778297
--- /dev/null
+++ b/src/escape_chars.c
@@ -0,0 +1,115 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+
+/*
+Fast escaping of character vectors (Winston Chang)
+https://gist.github.com/wch/e3ec5b20eb712f1b22b2
+http://stackoverflow.com/questions/25609174/fast-escaping-deparsing-of-character-vectors-in-r/25613834#25613834
+*/
+
+SEXP C_escape_chars_one(SEXP x) {
+  if (TYPEOF(x) != CHARSXP)
+    error("x must be a CHARSXP");
+
+  const char* old = CHAR(x);
+  char* old_p = (char*)old;
+
+  // Count up the number of matches
+  int matches = 0;
+  char oldc;
+  do {
+    oldc = *old_p;
+    switch(oldc) {
+      case '\\':
+      case '"':
+      case '\n':
+      case '\r':
+      case '\t':
+      case '\b':
+      case '\f':
+        matches++;
+    }
+    old_p++;
+  } while(oldc != '\0');
+
+  // Copy old string to new string, replacing where necessary.
+  old_p = (char*)old;
+  // Allocate string memory; add 2 for start and end quotes.
+  char* newstr = (char*)malloc(strlen(old) + matches + 3);
+  char* new_p = newstr;
+  *new_p = '"';
+  new_p++;
+
+  do {
+    oldc = *old_p;
+    switch(oldc) {
+      case '\\':
+        *new_p = '\\';
+        new_p++;
+        *new_p = '\\';
+        break;
+      case '"':
+        *new_p = '\\';
+        new_p++;
+        *new_p = '"';
+        break;
+      case '\n':
+        *new_p = '\\';
+        new_p++;
+        *new_p = 'n';
+        break;
+      case '\r':
+        *new_p = '\\';
+        new_p++;
+        *new_p = 'r';
+        break;
+      case '\t':
+        *new_p = '\\';
+        new_p++;
+        *new_p = 't';
+        break;
+      case '\b':
+        *new_p = '\\';
+        new_p++;
+        *new_p = 'b';
+        break;
+      case '\f':
+        *new_p = '\\';
+        new_p++;
+        *new_p = 'f';
+        break;
+      case '\0':
+        // End with a quote char
+        *new_p = '"';
+        new_p++;
+        *new_p = '\0';
+        break;
+      default:
+        *new_p = oldc;
+    }
+
+    old_p++;
+    new_p++;
+  } while(oldc != '\0');
+
+  SEXP val = mkCharCE(newstr, getCharCE(x));
+  free(newstr);
+  return val;
+}
+
+SEXP C_escape_chars(SEXP x) {
+  if (!isString(x))
+    error("x must be a character vector.");
+  if (x == R_NilValue || length(x) == 0)
+    return x;
+
+  int len = length(x);
+  SEXP out = PROTECT(allocVector(STRSXP, len));
+
+  for (int i=0; i<len; i++) {
+    SET_STRING_ELT(out, i, C_escape_chars_one(STRING_ELT(x, i)));
+  }
+  UNPROTECT(1);
+  return out;
+}
diff --git a/src/integer64_to_na.c b/src/integer64_to_na.c
new file mode 100644
index 0000000..850d62d
--- /dev/null
+++ b/src/integer64_to_na.c
@@ -0,0 +1,33 @@
+#include <Rinternals.h>
+#include <modp_numtoa.h>
+#define NA_INTEGER64 LLONG_MIN
+
+SEXP R_integer64_to_char(SEXP x, SEXP na_as_string){
+  int len = length(x);
+  int na_string = asLogical(na_as_string);
+  long long * xint = (long long *) REAL(x);
+  char buf[32];
+  SEXP out = PROTECT(allocVector(STRSXP, len));
+  for (int i = 0; i < len; i++) {
+    if(xint[i] == NA_INTEGER64){
+      if(na_string == NA_LOGICAL){
+        SET_STRING_ELT(out, i, NA_STRING);
+      } else if(na_string){
+        SET_STRING_ELT(out, i, mkChar("\"NA\""));
+      } else {
+        SET_STRING_ELT(out, i, mkChar("null"));
+      }
+    } else {
+      #ifdef _WIN32
+        snprintf(buf, 32, "%I64d", xint[i]);
+      #else
+        //snprintf(buf, 32, "%lld", xint[i]);
+        //modp is faster (but does not work on windows)
+        modp_litoa10(xint[i], buf);
+      #endif
+      SET_STRING_ELT(out, i, mkChar(buf));
+    }
+  }
+  UNPROTECT(1);
+  return out;
+}
diff --git a/src/is_recordlist.c b/src/is_recordlist.c
new file mode 100644
index 0000000..2b12a9c
--- /dev/null
+++ b/src/is_recordlist.c
@@ -0,0 +1,45 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+// .Call("C_is_namedlist", PACKAGE = "jsonlite", 123)
+bool is_namedlist(SEXP x) {
+  if(TYPEOF(x) == VECSXP && getAttrib(x, R_NamesSymbol) != R_NilValue){
+    return true;
+  }
+  return false;
+}
+
+bool is_unnamedlist(SEXP x) {
+  if(TYPEOF(x) == VECSXP && getAttrib(x, R_NamesSymbol) == R_NilValue){
+    return true;
+  }
+  return false;
+}
+
+bool is_namedlist_or_null(SEXP x){
+  return (is_namedlist(x) || (x == R_NilValue));
+}
+
+bool is_recordlist(SEXP x){
+  bool at_least_one_object = false;
+  if(!is_unnamedlist(x)){
+    return false;
+  }
+  int len = length(x);
+  if(len < 1){
+    return false;
+  }
+  for (int i=0; i<len; i++) {
+    if(!is_namedlist_or_null(VECTOR_ELT(x, i))) return false;
+    if(!at_least_one_object && is_namedlist(VECTOR_ELT(x, i))) {
+      at_least_one_object = true;
+    }
+  }
+  return at_least_one_object;
+}
+
+SEXP C_is_recordlist(SEXP x){
+  return ScalarLogical(is_recordlist(x));
+}
diff --git a/src/is_scalarlist.c b/src/is_scalarlist.c
new file mode 100644
index 0000000..8c29149
--- /dev/null
+++ b/src/is_scalarlist.c
@@ -0,0 +1,35 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+SEXP C_is_scalarlist(SEXP x) {
+
+  bool is_scalarlist = true;
+  if (TYPEOF(x) != VECSXP){
+    is_scalarlist = false;
+  } else {
+    SEXP el;
+    int len = length(x);
+    for (int i=0; i<len; i++) {
+      el = VECTOR_ELT(x, i);
+      switch(TYPEOF(el)) {
+        case LGLSXP:
+        case INTSXP:
+        case REALSXP:
+        case STRSXP:
+        case NILSXP:
+        case RAWSXP: //not used but for compatibility with is.atomic
+        case CPLXSXP: //not used but for compatibility with is.atomic
+          if(length(el) < 2) continue;
+          //else fall through
+        default:
+          is_scalarlist = false;
+          break;
+      }
+    }
+  }
+
+  //get character encoding from first element
+  return ScalarLogical(is_scalarlist);
+}
diff --git a/src/modp_numtoa.c b/src/modp_numtoa.c
new file mode 100644
index 0000000..acc2e17
--- /dev/null
+++ b/src/modp_numtoa.c
@@ -0,0 +1,291 @@
+/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+/* vi: set expandtab shiftwidth=4 tabstop=4: */
+
+#include "modp_numtoa.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <math.h>
+
+// other interesting references on num to string convesion
+// http://www.jb.man.ac.uk/~slowe/cpp/itoa.html
+// and http://www.ddj.com/dept/cpp/184401596?pgno=6
+
+// Version 19-Nov-2007
+// Fixed round-to-even rules to match printf
+//   thanks to Johannes Otepka
+
+/**
+ * Powers of 10
+ * 10^0 to 10^9
+ */
+static const double poww10[] = {1, 10, 100, 1000, 10000, 100000, 1000000,
+                               10000000, 100000000, 1000000000};
+
+static void strreverse(char* begin, char* end)
+{
+    char aux;
+    while (end > begin)
+        aux = *end, *end-- = *begin, *begin++ = aux;
+}
+
+void modp_itoa10(int32_t value, char* str)
+{
+    char* wstr=str;
+    // Take care of sign
+    unsigned int uvalue = (value < 0) ? -value : value;
+    // Conversion. Number is reversed.
+    do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10);
+    if (value < 0) *wstr++ = '-';
+    *wstr='\0';
+
+    // Reverse string
+    strreverse(str,wstr-1);
+}
+
+void modp_uitoa10(uint32_t value, char* str)
+{
+    char* wstr=str;
+    // Conversion. Number is reversed.
+    do *wstr++ = (char)(48 + (value % 10)); while (value /= 10);
+    *wstr='\0';
+    // Reverse string
+    strreverse(str, wstr-1);
+}
+
+void modp_litoa10(int64_t value, char* str)
+{
+    char* wstr=str;
+    unsigned long uvalue = (value < 0) ? -value : value;
+
+    // Conversion. Number is reversed.
+    do *wstr++ = (char)(48 + (uvalue % 10)); while(uvalue /= 10);
+    if (value < 0) *wstr++ = '-';
+    *wstr='\0';
+
+    // Reverse string
+    strreverse(str,wstr-1);
+}
+
+void modp_ulitoa10(uint64_t value, char* str)
+{
+    char* wstr=str;
+    // Conversion. Number is reversed.
+    do *wstr++ = (char)(48 + (value % 10)); while (value /= 10);
+    *wstr='\0';
+    // Reverse string
+    strreverse(str, wstr-1);
+}
+
+void modp_dtoa(double value, char* str, int prec)
+{
+    /* Hacky test for NaN
+     * under -fast-math this won't work, but then you also won't
+     * have correct nan values anyways.  The alternative is
+     * to link with libmath (bad) or hack IEEE double bits (bad)
+     */
+    if (! (value == value)) {
+        str[0] = 'n'; str[1] = 'a'; str[2] = 'n'; str[3] = '\0';
+        return;
+    }
+    /* if input is larger than thres_max, revert to exponential */
+    const double thres_max = (double)(0x7FFFFFFF);
+
+    double diff = 0.0;
+    char* wstr = str;
+
+    if (prec < 0) {
+        prec = 0;
+    } else if (prec > 9) {
+        /* precision of >= 10 can lead to overflow errors */
+        prec = 9;
+    }
+
+
+    /* we'll work in positive values and deal with the
+       negative sign issue later */
+    int neg = 0;
+    if (value < 0) {
+        neg = 1;
+        value = -value;
+    }
+
+
+    int whole = (int) value;
+    double tmp = (value - whole) * poww10[prec];
+    uint32_t frac = (uint32_t)(tmp);
+    diff = tmp - frac;
+
+    if (diff > 0.5) {
+        ++frac;
+        /* handle rollover, e.g.  case 0.99 with prec 1 is 1.0  */
+        if (frac >= poww10[prec]) {
+            frac = 0;
+            ++whole;
+        }
+    } else if (diff == 0.5 && ((frac == 0) || (frac & 1))) {
+        /* if halfway, round up if odd, OR
+           if last digit is 0.  That last part is strange */
+        ++frac;
+    }
+
+    /* for very large numbers switch back to native sprintf for exponentials.
+       anyone want to write code to replace this? */
+    /*
+      normal printf behavior is to print EVERY whole number digit
+      which can be 100s of characters overflowing your buffers == bad
+    */
+    if (value > thres_max) {
+        sprintf(str, "%e", neg ? -value : value);
+        return;
+    }
+
+    if (prec == 0) {
+        diff = value - whole;
+        if (diff > 0.5) {
+            /* greater than 0.5, round up, e.g. 1.6 -> 2 */
+            ++whole;
+        } else if (diff == 0.5 && (whole & 1)) {
+            /* exactly 0.5 and ODD, then round up */
+            /* 1.5 -> 2, but 2.5 -> 2 */
+            ++whole;
+        }
+    } else {
+        int count = prec;
+        // now do fractional part, as an unsigned number
+        do {
+            --count;
+            *wstr++ = (char)(48 + (frac % 10));
+        } while (frac /= 10);
+        // add extra 0s
+        while (count-- > 0) *wstr++ = '0';
+        // add decimal
+        *wstr++ = '.';
+    }
+
+    // do whole part
+    // Take care of sign
+    // Conversion. Number is reversed.
+    do *wstr++ = (char)(48 + (whole % 10)); while (whole /= 10);
+    if (neg) {
+        *wstr++ = '-';
+    }
+    *wstr='\0';
+    strreverse(str, wstr-1);
+}
+
+
+// This is near identical to modp_dtoa above
+//   The differnce is noted below
+void modp_dtoa2(double value, char* str, int prec)
+{
+    /* Hacky test for NaN
+     * under -fast-math this won't work, but then you also won't
+     * have correct nan values anyways.  The alternative is
+     * to link with libmath (bad) or hack IEEE double bits (bad)
+     */
+    if (! (value == value)) {
+        str[0] = 'n'; str[1] = 'a'; str[2] = 'n'; str[3] = '\0';
+        return;
+    }
+
+    /* if input is larger than thres_max, revert to exponential */
+    const double thres_max = (double)(0x7FFFFFFF);
+
+    int count;
+    double diff = 0.0;
+    char* wstr = str;
+
+    if (prec < 0) {
+        prec = 0;
+    } else if (prec > 9) {
+        /* precision of >= 10 can lead to overflow errors */
+        prec = 9;
+    }
+
+
+    /* we'll work in positive values and deal with the
+       negative sign issue later */
+    int neg = 0;
+    if (value < 0) {
+        neg = 1;
+        value = -value;
+    }
+
+
+    int whole = (int) value;
+    double tmp = (value - whole) * poww10[prec];
+    uint32_t frac = (uint32_t)(tmp);
+    diff = tmp - frac;
+
+    if (diff > 0.5) {
+        ++frac;
+        /* handle rollover, e.g.  case 0.99 with prec 1 is 1.0  */
+        if (frac >= poww10[prec]) {
+            frac = 0;
+            ++whole;
+        }
+    } else if (diff == 0.5 && ((frac == 0) || (frac & 1))) {
+        /* if halfway, round up if odd, OR
+           if last digit is 0.  That last part is strange */
+        ++frac;
+    }
+
+    /* for very large numbers switch back to native sprintf for exponentials.
+       anyone want to write code to replace this? */
+    /*
+      normal printf behavior is to print EVERY whole number digit
+      which can be 100s of characters overflowing your buffers == bad
+    */
+    if (value > thres_max) {
+        sprintf(str, "%e", neg ? -value : value);
+        return;
+    }
+
+    if (prec == 0) {
+        diff = value - whole;
+        if (diff > 0.5) {
+            /* greater than 0.5, round up, e.g. 1.6 -> 2 */
+            ++whole;
+        } else if (diff == 0.5 && (whole & 1)) {
+            /* exactly 0.5 and ODD, then round up */
+            /* 1.5 -> 2, but 2.5 -> 2 */
+            ++whole;
+        }
+
+        //vvvvvvvvvvvvvvvvvvv  Diff from modp_dto2
+    } else if (frac) {
+        count = prec;
+        // now do fractional part, as an unsigned number
+        // we know it is not 0 but we can have leading zeros, these
+        // should be removed
+        while (!(frac % 10)) {
+            --count;
+            frac /= 10;
+        }
+        //^^^^^^^^^^^^^^^^^^^  Diff from modp_dto2
+
+        // now do fractional part, as an unsigned number
+        do {
+            --count;
+            *wstr++ = (char)(48 + (frac % 10));
+        } while (frac /= 10);
+        // add extra 0s
+        while (count-- > 0) *wstr++ = '0';
+        // add decimal
+        *wstr++ = '.';
+    }
+
+    // do whole part
+    // Take care of sign
+    // Conversion. Number is reversed.
+    do *wstr++ = (char)(48 + (whole % 10)); while (whole /= 10);
+    if (neg) {
+        *wstr++ = '-';
+    }
+    *wstr='\0';
+    strreverse(str, wstr-1);
+}
+
+
+
diff --git a/src/modp_numtoa.h b/src/modp_numtoa.h
new file mode 100644
index 0000000..b848163
--- /dev/null
+++ b/src/modp_numtoa.h
@@ -0,0 +1,102 @@
+/* -*- mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+/* vi: set expandtab shiftwidth=4 tabstop=4: */
+
+/**
+ * \file
+ *
+ * <pre>
+ * Copyright © 2007, Nick Galbreath -- nickg [at] modp [dot] com
+ * All rights reserved.
+ * http://code.google.com/p/stringencoders/
+ * Released under the bsd license.
+ * </pre>
+ *
+ * This defines signed/unsigned integer, and 'double' to char buffer
+ * converters.  The standard way of doing this is with "sprintf", however
+ * these functions are
+ *   * guarenteed maximum size output
+ *   * 5-20x faster!
+ *   * core-dump safe
+ *
+ *
+ */
+
+#ifndef COM_MODP_STRINGENCODERS_NUMTOA_H
+#define COM_MODP_STRINGENCODERS_NUMTOA_H
+
+#ifdef __cplusplus
+#define BEGIN_C extern "C" {
+#define END_C }
+#else
+#define BEGIN_C
+#define END_C
+#endif
+
+BEGIN_C
+
+#include <stdint.h>
+
+/** \brief convert an signed integer to char buffer
+ *
+ * \param[in] value
+ * \param[out] buf the output buffer.  Should be 16 chars or more.
+ */
+void modp_itoa10(int32_t value, char* buf);
+
+/** \brief convert an unsigned integer to char buffer
+ *
+ * \param[in] value
+ * \param[out] buf The output buffer, should be 16 chars or more.
+ */
+void modp_uitoa10(uint32_t value, char* buf);
+
+/** \brief convert an signed long integer to char buffer
+ *
+ * \param[in] value
+ * \param[out] buf the output buffer.  Should be 24 chars or more.
+ */
+void modp_litoa10(int64_t value, char* buf);
+
+/** \brief convert an unsigned long integer to char buffer
+ *
+ * \param[in] value
+ * \param[out] buf The output buffer, should be 24 chars or more.
+ */
+void modp_ulitoa10(uint64_t value, char* buf);
+
+/** \brief convert a floating point number to char buffer with
+ *         fixed-precision format
+ *
+ * This is similar to "%.[0-9]f" in the printf style.  It will include
+ * trailing zeros
+ *
+ * If the input value is greater than 1<<31, then the output format
+ * will be switched exponential format.
+ *
+ * \param[in] value
+ * \param[out] buf  The allocated output buffer.  Should be 32 chars or more.
+ * \param[in] precision  Number of digits to the right of the decimal point.
+ *    Can only be 0-9.
+ */
+void modp_dtoa(double value, char* buf, int precision);
+
+/** \brief convert a floating point number to char buffer with a
+ *         variable-precision format, and no trailing zeros
+ *
+ * This is similar to "%.[0-9]f" in the printf style, except it will
+ * NOT include trailing zeros after the decimal point.  This type
+ * of format oddly does not exists with printf.
+ *
+ * If the input value is greater than 1<<31, then the output format
+ * will be switched exponential format.
+ *
+ * \param[in] value
+ * \param[out] buf  The allocated output buffer.  Should be 32 chars or more.
+ * \param[in] precision  Number of digits to the right of the decimal point.
+ *    Can only be 0-9.
+ */
+void modp_dtoa2(double value, char* buf, int precision);
+
+END_C
+
+#endif
diff --git a/src/null_to_na.c b/src/null_to_na.c
new file mode 100644
index 0000000..d3eb307
--- /dev/null
+++ b/src/null_to_na.c
@@ -0,0 +1,59 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+/*
+This function takes a list and replaces all NULL values by NA.
+In addition, it will parse strings "NA" "NaN" "Inf" and "-Inf",
+unless there is at least one non-na string element in the list.
+In that case converting to real values has no point because
+unlist() will coerse them back into a string anyway.
+*/
+
+SEXP C_null_to_na(SEXP x) {
+  int len = length(x);
+  if(len == 0) return x;
+
+  //null always turns into NA
+  bool looks_like_character_vector = false;
+  for (int i=0; i<len; i++) {
+    if(VECTOR_ELT(x, i) == R_NilValue) {
+      SET_VECTOR_ELT(x, i, ScalarLogical(NA_LOGICAL));
+    } else if(!looks_like_character_vector && TYPEOF(VECTOR_ELT(x, i)) == STRSXP){
+      if((strcmp("NA", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0) ||
+         (strcmp("NaN", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0) ||
+         (strcmp("Inf", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0) ||
+         (strcmp("-Inf", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0)) continue;
+      looks_like_character_vector = true;
+    }
+  }
+
+  // if this is a character vector, do not parse NA strings.
+  if(looks_like_character_vector) return(x);
+
+  //parse NA strings
+  for (int i=0; i<len; i++) {
+    if(TYPEOF(VECTOR_ELT(x, i)) == STRSXP){
+      if(strcmp("NA", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0) {
+        SET_VECTOR_ELT(x, i, ScalarLogical(NA_LOGICAL));
+        continue;
+      }
+      if(strcmp("NaN", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0) {
+        SET_VECTOR_ELT(x, i, ScalarReal(R_NaN));
+        continue;
+      }
+      if(strcmp("Inf", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0) {
+        SET_VECTOR_ELT(x, i, ScalarReal(R_PosInf));
+        continue;
+      }
+      if(strcmp("-Inf", CHAR(STRING_ELT(VECTOR_ELT(x, i), 0))) == 0) {
+        SET_VECTOR_ELT(x, i, ScalarReal(R_NegInf));
+        continue;
+      }
+    }
+  }
+
+  //return updated list
+  return x;
+}
diff --git a/src/num_to_char.c b/src/num_to_char.c
new file mode 100644
index 0000000..8aa9b44
--- /dev/null
+++ b/src/num_to_char.c
@@ -0,0 +1,78 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+#include <modp_numtoa.h>
+
+SEXP R_num_to_char(SEXP x, SEXP digits, SEXP na_as_string, SEXP use_signif) {
+  int len = length(x);
+  int na_string = asLogical(na_as_string);
+  int signif = asLogical(use_signif);
+  char buf[32];
+  SEXP out = PROTECT(allocVector(STRSXP, len));
+  if(isInteger(x)){
+    for (int i=0; i<len; i++) {
+      if(INTEGER(x)[i] == NA_INTEGER){
+        if(na_string == NA_LOGICAL){
+          SET_STRING_ELT(out, i, NA_STRING);
+        } else if(na_string){
+          SET_STRING_ELT(out, i, mkChar("\"NA\""));
+        } else {
+          SET_STRING_ELT(out, i, mkChar("null"));
+        }
+      } else {
+        modp_itoa10(INTEGER(x)[i], buf);
+        SET_STRING_ELT(out, i, mkChar(buf));
+      }
+    }
+  } else if(isReal(x)) {
+    int precision = asInteger(digits);
+    double * xreal = REAL(x);
+    for (int i=0; i<len; i++) {
+      double val = xreal[i];
+      if(!R_FINITE(val)){
+        if(na_string == NA_LOGICAL){
+          SET_STRING_ELT(out, i, NA_STRING);
+        } else if(na_string){
+          if(ISNA(val)){
+            SET_STRING_ELT(out, i, mkChar("\"NA\""));
+          } else if(ISNAN(val)){
+            SET_STRING_ELT(out, i, mkChar("\"NaN\""));
+          } else if(val == R_PosInf){
+            SET_STRING_ELT(out, i, mkChar("\"Inf\""));
+          } else if(val == R_NegInf){
+            SET_STRING_ELT(out, i, mkChar("\"-Inf\""));
+          } else {
+            error("Unrecognized non finite value.");
+          }
+        } else {
+          SET_STRING_ELT(out, i, mkChar("null"));
+        }
+      } else if(precision == NA_INTEGER){
+        snprintf(buf, 32, "%.15g", val);
+        SET_STRING_ELT(out, i, mkChar(buf));
+      } else if(signif){
+        //use signifant digits rather than decimal digits
+        snprintf(buf, 32, "%.*g", (int) ceil(fmin(15, precision)), val);
+        SET_STRING_ELT(out, i, mkChar(buf));
+      } else if(precision > -1 && precision < 10 && fabs(val) < 2147483647 && fabs(val) > 1e-5) {
+        //preferred method: fast with fixed decimal digits
+        //does not support large numbers or scientific notation
+        modp_dtoa2(val, buf, precision);
+        SET_STRING_ELT(out, i, mkChar(buf));
+        //Rprintf("Using modp_dtoa2\n");
+      } else {
+        //fall back on sprintf (includes scientific notation)
+        //limit total precision to 15 significant digits to avoid noise
+        //funky formula is mostly to convert decimal digits into significant digits
+        snprintf(buf, 32, "%.*g", (int) ceil(fmin(15, fmax(1, log10(val)) + precision)), val);
+        SET_STRING_ELT(out, i, mkChar(buf));
+        //Rprintf("Using sprintf with precision %d digits\n",(int) ceil(fmin(15, fmax(1, log10(val)) + precision)));
+      }
+    }
+  } else {
+    error("num_to_char called with invalid object type.");
+  }
+
+  UNPROTECT(1);
+  return out;
+}
diff --git a/src/parse.c b/src/parse.c
new file mode 100644
index 0000000..c9192f3
--- /dev/null
+++ b/src/parse.c
@@ -0,0 +1,112 @@
+/*
+   This function uses the YAJL tree parser to parse the entire document
+   before converting it to an R list. It might be faster to use the YAJL
+   callback mechanism instead to construct the R list immediately while
+   parsing the JSON. But that looks very complicated.
+
+*/
+
+#include <Rinternals.h>
+#include <yajl_tree.h>
+
+SEXP ParseObject(yajl_val node, int bigint);
+SEXP ParseArray(yajl_val node, int bigint);
+SEXP ParseValue(yajl_val node, int bigint);
+
+SEXP R_parse(SEXP x, SEXP bigint_as_char) {
+    /* get data from R */
+    const char* json = translateCharUTF8(asChar(x));
+    const int bigint = asLogical(bigint_as_char);
+
+    /* ignore BOM as suggested by RFC */
+    if(json[0] == '\xEF' && json[1] == '\xBB' && json[2] == '\xBF'){
+      warningcall(R_NilValue, "JSON string contains (illegal) UTF8 byte-order-mark!");
+      json = json + 3;
+    }
+
+    /* parse json */
+    char errbuf[1024];
+    yajl_val node = yajl_tree_parse(json, errbuf, sizeof(errbuf));
+
+    /* parser error */
+    if (!node) {
+      Rf_errorcall(R_NilValue, errbuf);
+    }
+    SEXP out = ParseValue(node, bigint);
+    yajl_tree_free(node);
+    return(out);
+}
+
+SEXP ParseValue(yajl_val node, int bigint){
+  if(YAJL_IS_NULL(node)){
+    return R_NilValue;
+  }
+  if(YAJL_IS_STRING(node)){
+    SEXP tmp = PROTECT(allocVector(STRSXP, 1));
+    SET_STRING_ELT(tmp, 0, mkCharCE(YAJL_GET_STRING(node),  CE_UTF8));
+    UNPROTECT(1);
+    return tmp;
+  }
+  if(YAJL_IS_INTEGER(node)){
+    long long int val = YAJL_GET_INTEGER(node);
+    /* 2^53 is highest int stored as double without loss */
+    if(bigint && (val > 9007199254740992 || val < -9007199254740992)){
+      char buf[32];
+      #ifdef _WIN32
+      snprintf(buf, 32, "%I64d", val);
+      #else
+      snprintf(buf, 32, "%lld", val);
+      #endif
+      return mkString(buf);
+    /* see .Machine$integer.max in R */
+    } else if(val > 2147483647 || val < -2147483647){
+      return ScalarReal(val);
+    } else {
+      return ScalarInteger(val);
+    }
+  }
+  if(YAJL_IS_DOUBLE(node)){
+    return(ScalarReal(YAJL_GET_DOUBLE(node)));
+  }
+  if(YAJL_IS_NUMBER(node)){
+    /* A number that is not int or double (very rare) */
+    /* This seems to correctly round to Inf/0/-Inf */
+    return(ScalarReal(YAJL_GET_DOUBLE(node)));
+  }
+  if(YAJL_IS_TRUE(node)){
+    return(ScalarLogical(1));
+  }
+  if(YAJL_IS_FALSE(node)){
+    return(ScalarLogical(0));
+  }
+  if(YAJL_IS_OBJECT(node)){
+    return(ParseObject(node, bigint));
+  }
+  if(YAJL_IS_ARRAY(node)){
+    return(ParseArray(node, bigint));
+  }
+  error("Invalid YAJL node type.");
+}
+
+SEXP ParseObject(yajl_val node, int bigint){
+  int len = YAJL_GET_OBJECT(node)->len;
+  SEXP keys = PROTECT(allocVector(STRSXP, len));
+  SEXP vec = PROTECT(allocVector(VECSXP, len));
+  for (int i = 0; i < len; ++i) {
+    SET_STRING_ELT(keys, i, mkCharCE(YAJL_GET_OBJECT(node)->keys[i], CE_UTF8));
+    SET_VECTOR_ELT(vec, i, ParseValue(YAJL_GET_OBJECT(node)->values[i], bigint));
+  }
+  setAttrib(vec, R_NamesSymbol, keys);
+  UNPROTECT(2);
+  return vec;
+}
+
+SEXP ParseArray(yajl_val node, int bigint){
+  int len = YAJL_GET_ARRAY(node)->len;
+  SEXP vec = PROTECT(allocVector(VECSXP, len));
+  for (int i = 0; i < len; ++i) {
+    SET_VECTOR_ELT(vec, i, ParseValue(YAJL_GET_ARRAY(node)->values[i], bigint));
+  }
+  UNPROTECT(1);
+  return vec;
+}
diff --git a/src/prettify.c b/src/prettify.c
new file mode 100644
index 0000000..b940c2d
--- /dev/null
+++ b/src/prettify.c
@@ -0,0 +1,148 @@
+#include <Rinternals.h>
+#include <string.h>
+#include <yajl_parse.h>
+#include <yajl_gen.h>
+
+static int s_streamReformat = 0;
+
+#define GEN_AND_RETURN(func){\
+    yajl_gen_status __stat = func;\
+    if (__stat == yajl_gen_generation_complete && s_streamReformat) {\
+      yajl_gen_reset(g, "\n");\
+      __stat = func;\
+    }\
+    return __stat == yajl_gen_status_ok;\
+}
+
+static int reformat_null(void * ctx)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_null(g));
+}
+
+static int reformat_boolean(void * ctx, int boolean)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_bool(g, boolean));
+}
+
+static int reformat_number(void * ctx, const char * s, size_t l)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_number(g, s, l));
+}
+
+static int reformat_string(void * ctx, const unsigned char * stringVal,
+                           size_t stringLen)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_string(g, stringVal, stringLen));
+}
+
+static int reformat_map_key(void * ctx, const unsigned char * stringVal,
+                            size_t stringLen)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_string(g, stringVal, stringLen));
+}
+
+static int reformat_start_map(void * ctx)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_map_open(g));
+}
+
+static int reformat_end_map(void * ctx)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_map_close(g));
+}
+
+static int reformat_start_array(void * ctx)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_array_open(g));
+}
+
+static int reformat_end_array(void * ctx)
+{
+    yajl_gen g = (yajl_gen) ctx;
+    GEN_AND_RETURN(yajl_gen_array_close(g));
+}
+
+static yajl_callbacks callbacks = {
+    reformat_null,
+    reformat_boolean,
+    NULL,
+    NULL,
+    reformat_number,
+    reformat_string,
+    reformat_start_map,
+    reformat_map_key,
+    reformat_end_map,
+    reformat_start_array,
+    reformat_end_array
+};
+
+SEXP R_reformat(SEXP x, SEXP pretty, SEXP indent_string) {
+    yajl_status stat;
+    yajl_handle hand;
+    yajl_gen g;
+    SEXP output;
+
+    /* init generator */
+    g = yajl_gen_alloc(NULL);
+    yajl_gen_config(g, yajl_gen_beautify, asInteger(pretty));
+    yajl_gen_config(g, yajl_gen_indent_string, translateCharUTF8(asChar(indent_string)));
+    yajl_gen_config(g, yajl_gen_validate_utf8, 0);
+
+    /* init parser */
+    hand = yajl_alloc(&callbacks, NULL, (void *) g);
+
+    /* get data from R */
+    const char* json = translateCharUTF8(asChar(x));
+
+    /* ignore BOM */
+    if(json[0] == '\xEF' && json[1] == '\xBB' && json[2] == '\xBF'){
+      json = json + 3;
+    }
+
+    /* Get length (after removing bom) */
+    const size_t rd = strlen(json);
+
+    /* parse */
+    stat = yajl_parse(hand, (const unsigned char*) json, rd);
+    if(stat == yajl_status_ok) {
+      stat = yajl_complete_parse(hand);
+    }
+
+    //error message
+    if (stat != yajl_status_ok) {
+      unsigned char* str = yajl_get_error(hand, 1, (const unsigned char*) json, rd);
+      output = mkString((const char*) str);
+      yajl_free_error(hand, str);
+    } else {
+      //create R object
+      const unsigned char* buf;
+      size_t len;
+      yajl_gen_get_buf(g, &buf, &len);
+
+      //force as UTF8 string
+      output = PROTECT(allocVector(STRSXP, 1));
+      SET_STRING_ELT(output, 0, mkCharCE((const char*) buf, CE_UTF8));
+      setAttrib(output, R_ClassSymbol, mkString("json"));
+      UNPROTECT(1);
+    }
+
+    /* clean up */
+    yajl_gen_clear(g);
+    yajl_gen_free(g);
+    yajl_free(hand);
+
+    /* return boolean vec (0 means no errors, means is valid) */
+    SEXP vec = PROTECT(allocVector(VECSXP, 2));
+    SET_VECTOR_ELT(vec, 0, ScalarInteger(stat));
+    SET_VECTOR_ELT(vec, 1, output);
+    UNPROTECT(1);
+    return vec;
+}
diff --git a/src/push_parser.c b/src/push_parser.c
new file mode 100644
index 0000000..e05df9d
--- /dev/null
+++ b/src/push_parser.c
@@ -0,0 +1,64 @@
+#include <Rinternals.h>
+#include <yajl_tree.h>
+#include <yajl_parse.h>
+#include <push_parser.h>
+
+/* finalizer */
+yajl_handle push_parser;
+
+void reset_parser(){
+  if(push_parser != NULL){
+    yajl_free(push_parser);
+    push_parser = NULL;
+  }
+}
+
+SEXP R_feed_push_parser(SEXP x, SEXP reset){
+
+  /* raw pointer */
+  const unsigned char *json = RAW(x);
+  int len = LENGTH(x);
+
+  /* init new push parser */
+  if(asLogical(reset)) {
+    reset_parser();
+    push_parser = push_parser_new();
+
+    /* ignore BOM as suggested by RFC */
+    if(len > 3 && json[0] == 239 && json[1] == 187 && json[2] == 191){
+      warningcall(R_NilValue, "JSON string contains (illegal) UTF8 byte-order-mark!");
+      json += 3;
+      len -= 3;
+    }
+  }
+
+  /* check for errors */
+  if (yajl_parse(push_parser, json, len) != yajl_status_ok) {
+    unsigned char* errstr = yajl_get_error(push_parser, 1, RAW(x), length(x));
+    SEXP tmp = mkChar((const char*) errstr);
+    yajl_free_error(push_parser, errstr);
+    reset_parser();
+    error(CHAR(tmp));
+  }
+
+  /* return OK */
+  return ScalarLogical(1);
+}
+
+SEXP R_finalize_push_parser(SEXP bigint_as_char){
+  /* check for errors */
+  if (yajl_complete_parse(push_parser) != yajl_status_ok) {
+    unsigned char* errstr = yajl_get_error(push_parser, 1, NULL, 0);
+    SEXP tmp = mkChar((const char*) errstr);
+    yajl_free_error(push_parser, errstr);
+    reset_parser();
+    error(CHAR(tmp));
+  }
+
+  /* get value */
+  yajl_val tree = push_parser_get(push_parser);
+  SEXP out = ParseValue(tree, asLogical(bigint_as_char));
+  yajl_tree_free(tree);
+  reset_parser();
+  return out;
+}
diff --git a/src/push_parser.h b/src/push_parser.h
new file mode 100644
index 0000000..2c768c3
--- /dev/null
+++ b/src/push_parser.h
@@ -0,0 +1,3 @@
+yajl_handle push_parser_new();
+yajl_val push_parser_get(yajl_handle handle);
+SEXP ParseValue(yajl_val node, int bigint_as_char);
diff --git a/src/row_collapse.c b/src/row_collapse.c
new file mode 100644
index 0000000..d576eee
--- /dev/null
+++ b/src/row_collapse.c
@@ -0,0 +1,55 @@
+#include <Rdefines.h>
+#include <Rinternals.h>
+#include <stdlib.h>
+
+SEXP C_collapse_object(SEXP x, SEXP y);
+SEXP C_collapse_array(SEXP x);
+SEXP C_collapse_array_pretty_inner(SEXP x, SEXP indent);
+SEXP C_collapse_object_pretty(SEXP x, SEXP y, SEXP indent);
+
+SEXP C_row_collapse_object(SEXP names, SEXP m, SEXP indent){
+  //get matrix dimensions
+  int *dims = INTEGER(getAttrib(m, install("dim")));
+  int x = dims[0];
+  int y = dims[1];
+
+  //allocate the output vector
+  SEXP out = PROTECT(allocVector(STRSXP, x));
+  SEXP vec = PROTECT(allocVector(STRSXP, y));
+  for(int i = 0; i < x; i++) {
+    for(int j = 0; j < y; j++) {
+      SET_STRING_ELT(vec, j, STRING_ELT(m, j*x + i));
+    }
+    if(asInteger(indent) == NA_INTEGER){
+      SET_STRING_ELT(out, i, asChar(C_collapse_object(names, vec)));
+    } else {
+      SET_STRING_ELT(out, i, asChar(C_collapse_object_pretty(names, vec, indent)));
+    }
+  }
+  UNPROTECT(2);
+  return out;
+}
+
+
+SEXP C_row_collapse_array(SEXP m, SEXP indent){
+  //get matrix dimensions
+  int *dims = INTEGER(getAttrib(m, install("dim")));
+  int x = dims[0];
+  int y = dims[1];
+
+  //allocate the output vector
+  SEXP out = PROTECT(allocVector(STRSXP, x));
+  SEXP vec = PROTECT(allocVector(STRSXP, y));
+  for(int i = 0; i < x; i++) {
+    for(int j = 0; j < y; j++) {
+      SET_STRING_ELT(vec, j, STRING_ELT(m, j*x + i));
+    }
+    if(asInteger(indent) == NA_INTEGER){
+      SET_STRING_ELT(out, i, asChar(C_collapse_array(vec)));
+    } else {
+      SET_STRING_ELT(out, i, asChar(C_collapse_array_pretty_inner(vec, indent)));
+    }
+  }
+  UNPROTECT(2);
+  return out;
+}
diff --git a/src/validate.c b/src/validate.c
new file mode 100644
index 0000000..3a2da33
--- /dev/null
+++ b/src/validate.c
@@ -0,0 +1,43 @@
+#include <Rinternals.h>
+#include <string.h>
+#include <yajl_parse.h>
+
+SEXP R_validate(SEXP x) {
+    /* get data from R */
+    const char* json = translateCharUTF8(asChar(x));
+
+    /* test for BOM */
+    if(json[0] == '\xEF' && json[1] == '\xBB' && json[2] == '\xBF'){
+      SEXP output = duplicate(ScalarLogical(0));
+      setAttrib(output, install("err"), mkString("JSON string contains UTF8 byte-order-mark."));
+      return(output);
+    }
+
+    /* allocate a parser */
+    yajl_handle hand = yajl_alloc(NULL, NULL, NULL);
+
+    /* parser options */
+    //yajl_config(hand, yajl_dont_validate_strings, 1);
+
+    /* go parse */
+    const size_t rd = strlen(json);
+    yajl_status stat = yajl_parse(hand, (const unsigned char*) json, rd);
+    if(stat == yajl_status_ok) {
+      stat = yajl_complete_parse(hand);
+    }
+
+    SEXP output = PROTECT(duplicate(ScalarLogical(!stat)));
+
+    //error message
+    if (stat != yajl_status_ok) {
+        unsigned char* str = yajl_get_error(hand, 1, (const unsigned char*) json, rd);
+        SEXP errstr = mkString((const char *) str);
+        yajl_free_error(hand, str);
+        setAttrib(output, install("err"), errstr);
+    }
+
+    /* return boolean vec (0 means no errors, means is valid) */
+    yajl_free(hand);
+    UNPROTECT(1);
+    return output;
+}
diff --git a/src/yajl/api/yajl_common.h b/src/yajl/api/yajl_common.h
new file mode 100644
index 0000000..9596ef9
--- /dev/null
+++ b/src/yajl/api/yajl_common.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __YAJL_COMMON_H__
+#define __YAJL_COMMON_H__
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define YAJL_MAX_DEPTH 128
+
+/* msft dll export gunk.  To build a DLL on windows, you
+ * must define WIN32, YAJL_SHARED, and YAJL_BUILD.  To use a shared
+ * DLL, you must define YAJL_SHARED and WIN32 */
+#if (defined(_WIN32) || defined(WIN32)) && defined(YAJL_SHARED)
+#  ifdef YAJL_BUILD
+#    define YAJL_API __declspec(dllexport)
+#  else
+#    define YAJL_API __declspec(dllimport)
+#  endif
+#else
+#  if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 303
+#    define YAJL_API __attribute__ ((visibility("default")))
+#  else
+#    define YAJL_API
+#  endif
+#endif
+
+/** pointer to a malloc function, supporting client overriding memory
+ *  allocation routines */
+typedef void * (*yajl_malloc_func)(void *ctx, size_t sz);
+
+/** pointer to a free function, supporting client overriding memory
+ *  allocation routines */
+typedef void (*yajl_free_func)(void *ctx, void * ptr);
+
+/** pointer to a realloc function which can resize an allocation. */
+typedef void * (*yajl_realloc_func)(void *ctx, void * ptr, size_t sz);
+
+/** A structure which can be passed to yajl_*_alloc routines to allow the
+ *  client to specify memory allocation functions to be used. */
+typedef struct
+{
+    /** pointer to a function that can allocate uninitialized memory */
+    yajl_malloc_func malloc;
+    /** pointer to a function that can resize memory allocations */
+    yajl_realloc_func realloc;
+    /** pointer to a function that can free memory allocated using
+     *  reallocFunction or mallocFunction */
+    yajl_free_func free;
+    /** a context pointer that will be passed to above allocation routines */
+    void * ctx;
+} yajl_alloc_funcs;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/yajl/api/yajl_gen.h b/src/yajl/api/yajl_gen.h
new file mode 100644
index 0000000..caef55e
--- /dev/null
+++ b/src/yajl/api/yajl_gen.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * \file yajl_gen.h
+ * Interface to YAJL's JSON generation facilities.
+ */
+
+#include <yajl_common.h>
+
+#ifndef __YAJL_GEN_H__
+#define __YAJL_GEN_H__
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+    /** generator status codes */
+    typedef enum {
+        /** no error */
+        yajl_gen_status_ok = 0,
+        /** at a point where a map key is generated, a function other than
+         *  yajl_gen_string was called */
+        yajl_gen_keys_must_be_strings,
+        /** YAJL's maximum generation depth was exceeded.  see
+         *  YAJL_MAX_DEPTH */
+        yajl_max_depth_exceeded,
+        /** A generator function (yajl_gen_XXX) was called while in an error
+         *  state */
+        yajl_gen_in_error_state,
+        /** A complete JSON document has been generated */
+        yajl_gen_generation_complete,
+        /** yajl_gen_double was passed an invalid floating point value
+         *  (infinity or NaN). */
+        yajl_gen_invalid_number,
+        /** A print callback was passed in, so there is no internal
+         * buffer to get from */
+        yajl_gen_no_buf,
+        /** returned from yajl_gen_string() when the yajl_gen_validate_utf8
+         *  option is enabled and an invalid was passed by client code.
+         */
+        yajl_gen_invalid_string
+    } yajl_gen_status;
+
+    /** an opaque handle to a generator */
+    typedef struct yajl_gen_t * yajl_gen;
+
+    /** a callback used for "printing" the results. */
+    typedef void (*yajl_print_t)(void * ctx,
+                                 const char * str,
+                                 size_t len);
+
+    /** configuration parameters for the parser, these may be passed to
+     *  yajl_gen_config() along with option specific argument(s).  In general,
+     *  all configuration parameters default to *off*. */
+    typedef enum {
+        /** generate indented (beautiful) output */
+        yajl_gen_beautify = 0x01,
+        /**
+         * Set an indent string which is used when yajl_gen_beautify
+         * is enabled.  Maybe something like \\t or some number of
+         * spaces.  The default is four spaces ' '.
+         */
+        yajl_gen_indent_string = 0x02,
+        /**
+         * Set a function and context argument that should be used to
+         * output generated json.  the function should conform to the
+         * yajl_print_t prototype while the context argument is a
+         * void * of your choosing.
+         *
+         * example:
+         *   yajl_gen_config(g, yajl_gen_print_callback, myFunc, myVoidPtr);
+         */
+        yajl_gen_print_callback = 0x04,
+        /**
+         * Normally the generator does not validate that strings you
+         * pass to it via yajl_gen_string() are valid UTF8.  Enabling
+         * this option will cause it to do so.
+         */
+        yajl_gen_validate_utf8 = 0x08,
+        /**
+         * the forward solidus (slash or '/' in human) is not required to be
+         * escaped in json text.  By default, YAJL will not escape it in the
+         * iterest of saving bytes.  Setting this flag will cause YAJL to
+         * always escape '/' in generated JSON strings.
+         */
+        yajl_gen_escape_solidus = 0x10
+    } yajl_gen_option;
+
+    /** allow the modification of generator options subsequent to handle
+     *  allocation (via yajl_alloc)
+     *  \returns zero in case of errors, non-zero otherwise
+     */
+    YAJL_API int yajl_gen_config(yajl_gen g, yajl_gen_option opt, ...);
+
+    /** allocate a generator handle
+     *  \param allocFuncs an optional pointer to a structure which allows
+     *                    the client to overide the memory allocation
+     *                    used by yajl.  May be NULL, in which case
+     *                    malloc/free/realloc will be used.
+     *
+     *  \returns an allocated handle on success, NULL on failure (bad params)
+     */
+    YAJL_API yajl_gen yajl_gen_alloc(const yajl_alloc_funcs * allocFuncs);
+
+    /** free a generator handle */
+    YAJL_API void yajl_gen_free(yajl_gen handle);
+
+    YAJL_API yajl_gen_status yajl_gen_integer(yajl_gen hand, long long int number);
+    /** generate a floating point number.  number may not be infinity or
+     *  NaN, as these have no representation in JSON.  In these cases the
+     *  generator will return 'yajl_gen_invalid_number' */
+    YAJL_API yajl_gen_status yajl_gen_double(yajl_gen hand, double number);
+    YAJL_API yajl_gen_status yajl_gen_number(yajl_gen hand,
+                                             const char * num,
+                                             size_t len);
+    YAJL_API yajl_gen_status yajl_gen_string(yajl_gen hand,
+                                             const unsigned char * str,
+                                             size_t len);
+    YAJL_API yajl_gen_status yajl_gen_null(yajl_gen hand);
+    YAJL_API yajl_gen_status yajl_gen_bool(yajl_gen hand, int boolean);
+    YAJL_API yajl_gen_status yajl_gen_map_open(yajl_gen hand);
+    YAJL_API yajl_gen_status yajl_gen_map_close(yajl_gen hand);
+    YAJL_API yajl_gen_status yajl_gen_array_open(yajl_gen hand);
+    YAJL_API yajl_gen_status yajl_gen_array_close(yajl_gen hand);
+
+    /** access the null terminated generator buffer.  If incrementally
+     *  outputing JSON, one should call yajl_gen_clear to clear the
+     *  buffer.  This allows stream generation. */
+    YAJL_API yajl_gen_status yajl_gen_get_buf(yajl_gen hand,
+                                              const unsigned char ** buf,
+                                              size_t * len);
+
+    /** clear yajl's output buffer, but maintain all internal generation
+     *  state.  This function will not "reset" the generator state, and is
+     *  intended to enable incremental JSON outputing. */
+    YAJL_API void yajl_gen_clear(yajl_gen hand);
+
+    /** Reset the generator state.  Allows a client to generate multiple
+     *  json entities in a stream. The "sep" string will be inserted to
+     *  separate the previously generated entity from the current,
+     *  NULL means *no separation* of entites (clients beware, generating
+     *  multiple JSON numbers without a separator, for instance, will result in ambiguous output)
+     *
+     *  Note: this call will not clear yajl's output buffer.  This
+     *  may be accomplished explicitly by calling yajl_gen_clear() */
+    YAJL_API void yajl_gen_reset(yajl_gen hand, const char * sep);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/yajl/api/yajl_parse.h b/src/yajl/api/yajl_parse.h
new file mode 100644
index 0000000..7cccb69
--- /dev/null
+++ b/src/yajl/api/yajl_parse.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * \file yajl_parse.h
+ * Interface to YAJL's JSON stream parsing facilities.
+ */
+
+#include <yajl_common.h>
+
+#ifndef __YAJL_PARSE_H__
+#define __YAJL_PARSE_H__
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+    /** error codes returned from this interface */
+    typedef enum {
+        /** no error was encountered */
+        yajl_status_ok,
+        /** a client callback returned zero, stopping the parse */
+        yajl_status_client_canceled,
+        /** An error occured during the parse.  Call yajl_get_error for
+         *  more information about the encountered error */
+        yajl_status_error
+    } yajl_status;
+
+    /** attain a human readable, english, string for an error */
+    YAJL_API const char * yajl_status_to_string(yajl_status code);
+
+    /** an opaque handle to a parser */
+    typedef struct yajl_handle_t * yajl_handle;
+
+    /** yajl is an event driven parser.  this means as json elements are
+     *  parsed, you are called back to do something with the data.  The
+     *  functions in this table indicate the various events for which
+     *  you will be called back.  Each callback accepts a "context"
+     *  pointer, this is a void * that is passed into the yajl_parse
+     *  function which the client code may use to pass around context.
+     *
+     *  All callbacks return an integer.  If non-zero, the parse will
+     *  continue.  If zero, the parse will be canceled and
+     *  yajl_status_client_canceled will be returned from the parse.
+     *
+     *  \attention {
+     *    A note about the handling of numbers:
+     *
+     *    yajl will only convert numbers that can be represented in a
+     *    double or a 64 bit (long long) int.  All other numbers will
+     *    be passed to the client in string form using the yajl_number
+     *    callback.  Furthermore, if yajl_number is not NULL, it will
+     *    always be used to return numbers, that is yajl_integer and
+     *    yajl_double will be ignored.  If yajl_number is NULL but one
+     *    of yajl_integer or yajl_double are defined, parsing of a
+     *    number larger than is representable in a double or 64 bit
+     *    integer will result in a parse error.
+     *  }
+     */
+    typedef struct {
+        int (* yajl_null)(void * ctx);
+        int (* yajl_boolean)(void * ctx, int boolVal);
+        int (* yajl_integer)(void * ctx, long long integerVal);
+        int (* yajl_double)(void * ctx, double doubleVal);
+        /** A callback which passes the string representation of the number
+         *  back to the client.  Will be used for all numbers when present */
+        int (* yajl_number)(void * ctx, const char * numberVal,
+                            size_t numberLen);
+
+        /** strings are returned as pointers into the JSON text when,
+         * possible, as a result, they are _not_ null padded */
+        int (* yajl_string)(void * ctx, const unsigned char * stringVal,
+                            size_t stringLen);
+
+        int (* yajl_start_map)(void * ctx);
+        int (* yajl_map_key)(void * ctx, const unsigned char * key,
+                             size_t stringLen);
+        int (* yajl_end_map)(void * ctx);
+
+        int (* yajl_start_array)(void * ctx);
+        int (* yajl_end_array)(void * ctx);
+    } yajl_callbacks;
+
+    /** allocate a parser handle
+     *  \param callbacks  a yajl callbacks structure specifying the
+     *                    functions to call when different JSON entities
+     *                    are encountered in the input text.  May be NULL,
+     *                    which is only useful for validation.
+     *  \param afs        memory allocation functions, may be NULL for to use
+     *                    C runtime library routines (malloc and friends)
+     *  \param ctx        a context pointer that will be passed to callbacks.
+     */
+    YAJL_API yajl_handle yajl_alloc(const yajl_callbacks * callbacks,
+                                    yajl_alloc_funcs * afs,
+                                    void * ctx);
+
+
+    /** configuration parameters for the parser, these may be passed to
+     *  yajl_config() along with option specific argument(s).  In general,
+     *  all configuration parameters default to *off*. */
+    typedef enum {
+        /** Ignore javascript style comments present in
+         *  JSON input.  Non-standard, but rather fun
+         *  arguments: toggled off with integer zero, on otherwise.
+         *
+         *  example:
+         *    yajl_config(h, yajl_allow_comments, 1); // turn comment support on
+         */
+        yajl_allow_comments = 0x01,
+        /**
+         * When set the parser will verify that all strings in JSON input are
+         * valid UTF8 and will emit a parse error if this is not so.  When set,
+         * this option makes parsing slightly more expensive (~7% depending
+         * on processor and compiler in use)
+         *
+         * example:
+         *   yajl_config(h, yajl_dont_validate_strings, 1); // disable utf8 checking
+         */
+        yajl_dont_validate_strings     = 0x02,
+        /**
+         * By default, upon calls to yajl_complete_parse(), yajl will
+         * ensure the entire input text was consumed and will raise an error
+         * otherwise.  Enabling this flag will cause yajl to disable this
+         * check.  This can be useful when parsing json out of a that contains more
+         * than a single JSON document.
+         */
+        yajl_allow_trailing_garbage = 0x04,
+        /**
+         * Allow multiple values to be parsed by a single handle.  The
+         * entire text must be valid JSON, and values can be seperated
+         * by any kind of whitespace.  This flag will change the
+         * behavior of the parser, and cause it continue parsing after
+         * a value is parsed, rather than transitioning into a
+         * complete state.  This option can be useful when parsing multiple
+         * values from an input stream.
+         */
+        yajl_allow_multiple_values = 0x08,
+        /**
+         * When yajl_complete_parse() is called the parser will
+         * check that the top level value was completely consumed.  I.E.,
+         * if called whilst in the middle of parsing a value
+         * yajl will enter an error state (premature EOF).  Setting this
+         * flag suppresses that check and the corresponding error.
+         */
+        yajl_allow_partial_values = 0x10
+    } yajl_option;
+
+    /** allow the modification of parser options subsequent to handle
+     *  allocation (via yajl_alloc)
+     *  \returns zero in case of errors, non-zero otherwise
+     */
+    YAJL_API int yajl_config(yajl_handle h, yajl_option opt, ...);
+
+    /** free a parser handle */
+    YAJL_API void yajl_free(yajl_handle handle);
+
+    /** Parse some json!
+     *  \param hand - a handle to the json parser allocated with yajl_alloc
+     *  \param jsonText - a pointer to the UTF8 json text to be parsed
+     *  \param jsonTextLength - the length, in bytes, of input text
+     */
+    YAJL_API yajl_status yajl_parse(yajl_handle hand,
+                                    const unsigned char * jsonText,
+                                    size_t jsonTextLength);
+
+    /** Parse any remaining buffered json.
+     *  Since yajl is a stream-based parser, without an explicit end of
+     *  input, yajl sometimes can't decide if content at the end of the
+     *  stream is valid or not.  For example, if "1" has been fed in,
+     *  yajl can't know whether another digit is next or some character
+     *  that would terminate the integer token.
+     *
+     *  \param hand - a handle to the json parser allocated with yajl_alloc
+     */
+    YAJL_API yajl_status yajl_complete_parse(yajl_handle hand);
+
+    /** get an error string describing the state of the
+     *  parse.
+     *
+     *  If verbose is non-zero, the message will include the JSON
+     *  text where the error occured, along with an arrow pointing to
+     *  the specific char.
+     *
+     *  \returns A dynamically allocated string will be returned which should
+     *  be freed with yajl_free_error
+     */
+    YAJL_API unsigned char * yajl_get_error(yajl_handle hand, int verbose,
+                                            const unsigned char * jsonText,
+                                            size_t jsonTextLength);
+
+    /**
+     * get the amount of data consumed from the last chunk passed to YAJL.
+     *
+     * In the case of a successful parse this can help you understand if
+     * the entire buffer was consumed (which will allow you to handle
+     * "junk at end of input").
+     *
+     * In the event an error is encountered during parsing, this function
+     * affords the client a way to get the offset into the most recent
+     * chunk where the error occured.  0 will be returned if no error
+     * was encountered.
+     */
+    YAJL_API size_t yajl_get_bytes_consumed(yajl_handle hand);
+
+    /** free an error returned from yajl_get_error */
+    YAJL_API void yajl_free_error(yajl_handle hand, unsigned char * str);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/yajl/api/yajl_tree.h b/src/yajl/api/yajl_tree.h
new file mode 100644
index 0000000..08c55eb
--- /dev/null
+++ b/src/yajl/api/yajl_tree.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2010-2011  Florian Forster  <ff at octo.it>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * \file yajl_tree.h
+ *
+ * Parses JSON data and returns the data in tree form.
+ *
+ * \author Florian Forster
+ * \date August 2010
+ *
+ * This interface makes quick parsing and extraction of
+ * smallish JSON docs trivial:
+ *
+ * \include example/parse_config.c
+ */
+
+#ifndef YAJL_TREE_H
+#define YAJL_TREE_H 1
+
+#include <yajl_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** possible data types that a yajl_val_s can hold */
+typedef enum {
+    yajl_t_string = 1,
+    yajl_t_number = 2,
+    yajl_t_object = 3,
+    yajl_t_array = 4,
+    yajl_t_true = 5,
+    yajl_t_false = 6,
+    yajl_t_null = 7,
+    /** The any type isn't valid for yajl_val_s.type, but can be
+     *  used as an argument to routines like yajl_tree_get().
+     */
+    yajl_t_any = 8
+} yajl_type;
+
+#define YAJL_NUMBER_INT_VALID    0x01
+#define YAJL_NUMBER_DOUBLE_VALID 0x02
+
+/** A pointer to a node in the parse tree */
+typedef struct yajl_val_s * yajl_val;
+
+/**
+ * A JSON value representation capable of holding one of the seven
+ * types above. For "string", "number", "object", and "array"
+ * additional data is available in the union.  The "YAJL_IS_*"
+ * and "YAJL_GET_*" macros below allow type checking and convenient
+ * value extraction.
+ */
+struct yajl_val_s
+{
+    /** Type of the value contained. Use the "YAJL_IS_*" macros to check for a
+     * specific type. */
+    yajl_type type;
+    /** Type-specific data. You may use the "YAJL_GET_*" macros to access these
+     * members. */
+    union
+    {
+        char * string;
+        struct {
+            long long i; /*< integer value, if representable. */
+            double  d;   /*< double value, if representable. */
+            char   *r;   /*< unparsed number in string form. */
+            /** Signals whether the \em i and \em d members are
+             * valid. See \c YAJL_NUMBER_INT_VALID and
+             * \c YAJL_NUMBER_DOUBLE_VALID. */
+            unsigned int flags;
+        } number;
+        struct {
+            const char **keys; /*< Array of keys */
+            yajl_val *values; /*< Array of values. */
+            size_t len; /*< Number of key-value-pairs. */
+        } object;
+        struct {
+            yajl_val *values; /*< Array of elements. */
+            size_t len; /*< Number of elements. */
+        } array;
+    } u;
+};
+
+/**
+ * Parse a string.
+ *
+ * Parses an null-terminated string containing JSON data and returns a pointer
+ * to the top-level value (root of the parse tree).
+ *
+ * \param input              Pointer to a null-terminated utf8 string containing
+ *                           JSON data.
+ * \param error_buffer       Pointer to a buffer in which an error message will
+ *                           be stored if \em yajl_tree_parse fails, or
+ *                           \c NULL. The buffer will be initialized before
+ *                           parsing, so its content will be destroyed even if
+ *                           \em yajl_tree_parse succeeds.
+ * \param error_buffer_size  Size of the memory area pointed to by
+ *                           \em error_buffer_size. If \em error_buffer_size is
+ *                           \c NULL, this argument is ignored.
+ *
+ * \returns Pointer to the top-level value or \c NULL on error. The memory
+ * pointed to must be freed using \em yajl_tree_free. In case of an error, a
+ * null terminated message describing the error in more detail is stored in
+ * \em error_buffer if it is not \c NULL.
+ */
+YAJL_API yajl_val yajl_tree_parse (const char *input,
+                                   char *error_buffer, size_t error_buffer_size);
+
+
+/**
+ * Free a parse tree returned by "yajl_tree_parse".
+ *
+ * \param v Pointer to a JSON value returned by "yajl_tree_parse". Passing NULL
+ * is valid and results in a no-op.
+ */
+YAJL_API void yajl_tree_free (yajl_val v);
+
+/**
+ * Access a nested value inside a tree.
+ *
+ * \param parent the node under which you'd like to extract values.
+ * \param path A null terminated array of strings, each the name of an object key
+ * \param type the yajl_type of the object you seek, or yajl_t_any if any will do.
+ *
+ * \returns a pointer to the found value, or NULL if we came up empty.
+ *
+ * Future Ideas:  it'd be nice to move path to a string and implement support for
+ * a teeny tiny micro language here, so you can extract array elements, do things
+ * like .first and .last, even .length.  Inspiration from JSONPath and css selectors?
+ * No it wouldn't be fast, but that's not what this API is about.
+ */
+YAJL_API yajl_val yajl_tree_get(yajl_val parent, const char ** path, yajl_type type);
+
+/* Various convenience macros to check the type of a `yajl_val` */
+#define YAJL_IS_STRING(v) (((v) != NULL) && ((v)->type == yajl_t_string))
+#define YAJL_IS_NUMBER(v) (((v) != NULL) && ((v)->type == yajl_t_number))
+#define YAJL_IS_INTEGER(v) (YAJL_IS_NUMBER(v) && ((v)->u.number.flags & YAJL_NUMBER_INT_VALID))
+#define YAJL_IS_DOUBLE(v) (YAJL_IS_NUMBER(v) && ((v)->u.number.flags & YAJL_NUMBER_DOUBLE_VALID))
+#define YAJL_IS_OBJECT(v) (((v) != NULL) && ((v)->type == yajl_t_object))
+#define YAJL_IS_ARRAY(v)  (((v) != NULL) && ((v)->type == yajl_t_array ))
+#define YAJL_IS_TRUE(v)   (((v) != NULL) && ((v)->type == yajl_t_true  ))
+#define YAJL_IS_FALSE(v)  (((v) != NULL) && ((v)->type == yajl_t_false ))
+#define YAJL_IS_NULL(v)   (((v) != NULL) && ((v)->type == yajl_t_null  ))
+
+/** Given a yajl_val_string return a ptr to the bare string it contains,
+ *  or NULL if the value is not a string. */
+#define YAJL_GET_STRING(v) (YAJL_IS_STRING(v) ? (v)->u.string : NULL)
+
+/** Get the string representation of a number.  You should check type first,
+ *  perhaps using YAJL_IS_NUMBER */
+#define YAJL_GET_NUMBER(v) ((v)->u.number.r)
+
+/** Get the double representation of a number.  You should check type first,
+ *  perhaps using YAJL_IS_DOUBLE */
+#define YAJL_GET_DOUBLE(v) ((v)->u.number.d)
+
+/** Get the 64bit (long long) integer representation of a number.  You should
+ *  check type first, perhaps using YAJL_IS_INTEGER */
+#define YAJL_GET_INTEGER(v) ((v)->u.number.i)
+
+/** Get a pointer to a yajl_val_object or NULL if the value is not an object. */
+#define YAJL_GET_OBJECT(v) (YAJL_IS_OBJECT(v) ? &(v)->u.object : NULL)
+
+/** Get a pointer to a yajl_val_array or NULL if the value is not an object. */
+#define YAJL_GET_ARRAY(v)  (YAJL_IS_ARRAY(v)  ? &(v)->u.array  : NULL)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* YAJL_TREE_H */
diff --git a/src/yajl/api/yajl_version.h b/src/yajl/api/yajl_version.h
new file mode 100644
index 0000000..ab1fc12
--- /dev/null
+++ b/src/yajl/api/yajl_version.h
@@ -0,0 +1,23 @@
+#ifndef YAJL_VERSION_H_
+#define YAJL_VERSION_H_
+
+#include <yajl_common.h>
+
+#define YAJL_MAJOR 2
+#define YAJL_MINOR 1
+#define YAJL_MICRO 1
+
+#define YAJL_VERSION ((YAJL_MAJOR * 10000) + (YAJL_MINOR * 100) + YAJL_MICRO)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int YAJL_API yajl_version(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* YAJL_VERSION_H_ */
+
diff --git a/src/yajl/readme.txt b/src/yajl/readme.txt
new file mode 100644
index 0000000..5d240c1
--- /dev/null
+++ b/src/yajl/readme.txt
@@ -0,0 +1,9 @@
+Changes in yajl code by Jeroen:
+
+ - Manually changed the header include paths in some c/h files to avoid cmake dependency.
+ - Comment out call to abort() in src/yajl/yajl_parser.c (for CMD check)
+ - Manually generated yajl.version.h from yajl.version.h.in (by running cmake)
+ - Patch for CMD check warnings on Windows: https://github.com/lloyd/yajl/issues/143
+ - Patch for error messages in yajl_tree_parse: https://github.com/lloyd/yajl/issues/144
+ - Fix for windows XP: https://rt.cpan.org/Public/Bug/Display.html?id=69113
+ - in yajl_tree.c added functions: push_parser_new and push_parser_get
diff --git a/src/yajl/yajl.c b/src/yajl/yajl.c
new file mode 100644
index 0000000..d477893
--- /dev/null
+++ b/src/yajl/yajl.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "api/yajl_parse.h"
+#include "yajl_lex.h"
+#include "yajl_parser.h"
+#include "yajl_alloc.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+
+const char *
+yajl_status_to_string(yajl_status stat)
+{
+    const char * statStr = "unknown";
+    switch (stat) {
+        case yajl_status_ok:
+            statStr = "ok, no error";
+            break;
+        case yajl_status_client_canceled:
+            statStr = "client canceled parse";
+            break;
+        case yajl_status_error:
+            statStr = "parse error";
+            break;
+    }
+    return statStr;
+}
+
+yajl_handle
+yajl_alloc(const yajl_callbacks * callbacks,
+           yajl_alloc_funcs * afs,
+           void * ctx)
+{
+    yajl_handle hand = NULL;
+    yajl_alloc_funcs afsBuffer;
+
+    /* first order of business is to set up memory allocation routines */
+    if (afs != NULL) {
+        if (afs->malloc == NULL || afs->realloc == NULL || afs->free == NULL)
+        {
+            return NULL;
+        }
+    } else {
+        yajl_set_default_alloc_funcs(&afsBuffer);
+        afs = &afsBuffer;
+    }
+
+    hand = (yajl_handle) YA_MALLOC(afs, sizeof(struct yajl_handle_t));
+
+    /* copy in pointers to allocation routines */
+    memcpy((void *) &(hand->alloc), (void *) afs, sizeof(yajl_alloc_funcs));
+
+    hand->callbacks = callbacks;
+    hand->ctx = ctx;
+    hand->lexer = NULL; 
+    hand->bytesConsumed = 0;
+    hand->decodeBuf = yajl_buf_alloc(&(hand->alloc));
+    hand->flags	    = 0;
+    yajl_bs_init(hand->stateStack, &(hand->alloc));
+    yajl_bs_push(hand->stateStack, yajl_state_start);
+
+    return hand;
+}
+
+int
+yajl_config(yajl_handle h, yajl_option opt, ...)
+{
+    int rv = 1;
+    va_list ap;
+    va_start(ap, opt);
+
+    switch(opt) {
+        case yajl_allow_comments:
+        case yajl_dont_validate_strings:
+        case yajl_allow_trailing_garbage:
+        case yajl_allow_multiple_values:
+        case yajl_allow_partial_values:
+            if (va_arg(ap, int)) h->flags |= opt;
+            else h->flags &= ~opt;
+            break;
+        default:
+            rv = 0;
+    }
+    va_end(ap);
+
+    return rv;
+}
+
+void
+yajl_free(yajl_handle handle)
+{
+    yajl_bs_free(handle->stateStack);
+    yajl_buf_free(handle->decodeBuf);
+    if (handle->lexer) {
+        yajl_lex_free(handle->lexer);
+        handle->lexer = NULL;
+    }
+    YA_FREE(&(handle->alloc), handle);
+}
+
+yajl_status
+yajl_parse(yajl_handle hand, const unsigned char * jsonText,
+           size_t jsonTextLen)
+{
+    yajl_status status;
+
+    /* lazy allocation of the lexer */
+    if (hand->lexer == NULL) {
+        hand->lexer = yajl_lex_alloc(&(hand->alloc),
+                                     hand->flags & yajl_allow_comments,
+                                     !(hand->flags & yajl_dont_validate_strings));
+    }
+
+    status = yajl_do_parse(hand, jsonText, jsonTextLen);
+    return status;
+}
+
+
+yajl_status
+yajl_complete_parse(yajl_handle hand)
+{
+    /* The lexer is lazy allocated in the first call to parse.  if parse is
+     * never called, then no data was provided to parse at all.  This is a
+     * "premature EOF" error unless yajl_allow_partial_values is specified.
+     * allocating the lexer now is the simplest possible way to handle this
+     * case while preserving all the other semantics of the parser
+     * (multiple values, partial values, etc). */
+    if (hand->lexer == NULL) {
+        hand->lexer = yajl_lex_alloc(&(hand->alloc),
+                                     hand->flags & yajl_allow_comments,
+                                     !(hand->flags & yajl_dont_validate_strings));
+    }
+
+    return yajl_do_finish(hand);
+}
+
+unsigned char *
+yajl_get_error(yajl_handle hand, int verbose,
+               const unsigned char * jsonText, size_t jsonTextLen)
+{
+    return yajl_render_error_string(hand, jsonText, jsonTextLen, verbose);
+}
+
+size_t
+yajl_get_bytes_consumed(yajl_handle hand)
+{
+    if (!hand) return 0;
+    else return hand->bytesConsumed;
+}
+
+
+void
+yajl_free_error(yajl_handle hand, unsigned char * str)
+{
+    /* use memory allocation functions if set */
+    YA_FREE(&(hand->alloc), str);
+}
+
+/* XXX: add utility routines to parse from file */
diff --git a/src/yajl/yajl_alloc.c b/src/yajl/yajl_alloc.c
new file mode 100644
index 0000000..96ad1d3
--- /dev/null
+++ b/src/yajl/yajl_alloc.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * \file yajl_alloc.h
+ * default memory allocation routines for yajl which use malloc/realloc and
+ * free
+ */
+
+#include "yajl_alloc.h"
+#include <stdlib.h>
+
+static void * yajl_internal_malloc(void *ctx, size_t sz)
+{
+    (void)ctx;
+    return malloc(sz);
+}
+
+static void * yajl_internal_realloc(void *ctx, void * previous,
+                                    size_t sz)
+{
+    (void)ctx;
+    return realloc(previous, sz);
+}
+
+static void yajl_internal_free(void *ctx, void * ptr)
+{
+    (void)ctx;
+    free(ptr);
+}
+
+void yajl_set_default_alloc_funcs(yajl_alloc_funcs * yaf)
+{
+    yaf->malloc = yajl_internal_malloc;
+    yaf->free = yajl_internal_free;
+    yaf->realloc = yajl_internal_realloc;
+    yaf->ctx = NULL;
+}
+
diff --git a/src/yajl/yajl_alloc.h b/src/yajl/yajl_alloc.h
new file mode 100644
index 0000000..203c2f9
--- /dev/null
+++ b/src/yajl/yajl_alloc.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * \file yajl_alloc.h
+ * default memory allocation routines for yajl which use malloc/realloc and
+ * free
+ */
+
+#ifndef __YAJL_ALLOC_H__
+#define __YAJL_ALLOC_H__
+
+#include "api/yajl_common.h"
+
+#define YA_MALLOC(afs, sz) (afs)->malloc((afs)->ctx, (sz))
+#define YA_FREE(afs, ptr) (afs)->free((afs)->ctx, (ptr))
+#define YA_REALLOC(afs, ptr, sz) (afs)->realloc((afs)->ctx, (ptr), (sz))
+
+void yajl_set_default_alloc_funcs(yajl_alloc_funcs * yaf);
+
+#endif
diff --git a/src/yajl/yajl_buf.c b/src/yajl/yajl_buf.c
new file mode 100644
index 0000000..1aeafde
--- /dev/null
+++ b/src/yajl/yajl_buf.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "yajl_buf.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define YAJL_BUF_INIT_SIZE 2048
+
+struct yajl_buf_t {
+    size_t len;
+    size_t used;
+    unsigned char * data;
+    yajl_alloc_funcs * alloc;
+};
+
+static
+void yajl_buf_ensure_available(yajl_buf buf, size_t want)
+{
+    size_t need;
+    
+    assert(buf != NULL);
+
+    /* first call */
+    if (buf->data == NULL) {
+        buf->len = YAJL_BUF_INIT_SIZE;
+        buf->data = (unsigned char *) YA_MALLOC(buf->alloc, buf->len);
+        buf->data[0] = 0;
+    }
+
+    need = buf->len;
+
+    while (want >= (need - buf->used)) need <<= 1;
+
+    if (need != buf->len) {
+        buf->data = (unsigned char *) YA_REALLOC(buf->alloc, buf->data, need);
+        buf->len = need;
+    }
+}
+
+yajl_buf yajl_buf_alloc(yajl_alloc_funcs * alloc)
+{
+    yajl_buf b = YA_MALLOC(alloc, sizeof(struct yajl_buf_t));
+    memset((void *) b, 0, sizeof(struct yajl_buf_t));
+    b->alloc = alloc;
+    return b;
+}
+
+void yajl_buf_free(yajl_buf buf)
+{
+    assert(buf != NULL);
+    if (buf->data) YA_FREE(buf->alloc, buf->data);
+    YA_FREE(buf->alloc, buf);
+}
+
+void yajl_buf_append(yajl_buf buf, const void * data, size_t len)
+{
+    yajl_buf_ensure_available(buf, len);
+    if (len > 0) {
+        assert(data != NULL);
+        memcpy(buf->data + buf->used, data, len);
+        buf->used += len;
+        buf->data[buf->used] = 0;
+    }
+}
+
+void yajl_buf_clear(yajl_buf buf)
+{
+    buf->used = 0;
+    if (buf->data) buf->data[buf->used] = 0;
+}
+
+const unsigned char * yajl_buf_data(yajl_buf buf)
+{
+    return buf->data;
+}
+
+size_t yajl_buf_len(yajl_buf buf)
+{
+    return buf->used;
+}
+
+void
+yajl_buf_truncate(yajl_buf buf, size_t len)
+{
+    assert(len <= buf->used);
+    buf->used = len;
+}
diff --git a/src/yajl/yajl_buf.h b/src/yajl/yajl_buf.h
new file mode 100644
index 0000000..a358246
--- /dev/null
+++ b/src/yajl/yajl_buf.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __YAJL_BUF_H__
+#define __YAJL_BUF_H__
+
+#include "api/yajl_common.h"
+#include "yajl_alloc.h"
+
+/*
+ * Implementation/performance notes.  If this were moved to a header
+ * only implementation using #define's where possible we might be 
+ * able to sqeeze a little performance out of the guy by killing function
+ * call overhead.  YMMV.
+ */
+
+/**
+ * yajl_buf is a buffer with exponential growth.  the buffer ensures that
+ * you are always null padded.
+ */
+typedef struct yajl_buf_t * yajl_buf;
+
+/* allocate a new buffer */
+yajl_buf yajl_buf_alloc(yajl_alloc_funcs * alloc);
+
+/* free the buffer */
+void yajl_buf_free(yajl_buf buf);
+
+/* append a number of bytes to the buffer */
+void yajl_buf_append(yajl_buf buf, const void * data, size_t len);
+
+/* empty the buffer */
+void yajl_buf_clear(yajl_buf buf);
+
+/* get a pointer to the beginning of the buffer */
+const unsigned char * yajl_buf_data(yajl_buf buf);
+
+/* get the length of the buffer */
+size_t yajl_buf_len(yajl_buf buf);
+
+/* truncate the buffer */
+void yajl_buf_truncate(yajl_buf buf, size_t len);
+
+#endif
diff --git a/src/yajl/yajl_bytestack.h b/src/yajl/yajl_bytestack.h
new file mode 100644
index 0000000..9ea7d15
--- /dev/null
+++ b/src/yajl/yajl_bytestack.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * A header only implementation of a simple stack of bytes, used in YAJL
+ * to maintain parse state.
+ */
+
+#ifndef __YAJL_BYTESTACK_H__
+#define __YAJL_BYTESTACK_H__
+
+#include "api/yajl_common.h"
+
+#define YAJL_BS_INC 128
+
+typedef struct yajl_bytestack_t
+{
+    unsigned char * stack;
+    size_t size;
+    size_t used;
+    yajl_alloc_funcs * yaf;
+} yajl_bytestack;
+
+/* initialize a bytestack */
+#define yajl_bs_init(obs, _yaf) {               \
+        (obs).stack = NULL;                     \
+        (obs).size = 0;                         \
+        (obs).used = 0;                         \
+        (obs).yaf = (_yaf);                     \
+    }                                           \
+
+
+/* initialize a bytestack */
+#define yajl_bs_free(obs)                 \
+    if ((obs).stack) (obs).yaf->free((obs).yaf->ctx, (obs).stack);
+
+#define yajl_bs_current(obs)               \
+    (assert((obs).used > 0), (obs).stack[(obs).used - 1])
+
+#define yajl_bs_push(obs, byte) {                       \
+    if (((obs).size - (obs).used) == 0) {               \
+        (obs).size += YAJL_BS_INC;                      \
+        (obs).stack = (obs).yaf->realloc((obs).yaf->ctx,\
+                                         (void *) (obs).stack, (obs).size);\
+    }                                                   \
+    (obs).stack[((obs).used)++] = (byte);               \
+}
+
+/* removes the top item of the stack, returns nothing */
+#define yajl_bs_pop(obs) { ((obs).used)--; }
+
+#define yajl_bs_set(obs, byte)                          \
+    (obs).stack[((obs).used) - 1] = (byte);
+
+
+#endif
diff --git a/src/yajl/yajl_encode.c b/src/yajl/yajl_encode.c
new file mode 100644
index 0000000..fd08258
--- /dev/null
+++ b/src/yajl/yajl_encode.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "yajl_encode.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+static void CharToHex(unsigned char c, char * hexBuf)
+{
+    const char * hexchar = "0123456789ABCDEF";
+    hexBuf[0] = hexchar[c >> 4];
+    hexBuf[1] = hexchar[c & 0x0F];
+}
+
+void
+yajl_string_encode(const yajl_print_t print,
+                   void * ctx,
+                   const unsigned char * str,
+                   size_t len,
+                   int escape_solidus)
+{
+    size_t beg = 0;
+    size_t end = 0;
+    char hexBuf[7];
+    hexBuf[0] = '\\'; hexBuf[1] = 'u'; hexBuf[2] = '0'; hexBuf[3] = '0';
+    hexBuf[6] = 0;
+
+    while (end < len) {
+        const char * escaped = NULL;
+        switch (str[end]) {
+            case '\r': escaped = "\\r"; break;
+            case '\n': escaped = "\\n"; break;
+            case '\\': escaped = "\\\\"; break;
+            /* it is not required to escape a solidus in JSON:
+             * read sec. 2.5: http://www.ietf.org/rfc/rfc4627.txt
+             * specifically, this production from the grammar:
+             *   unescaped = %x20-21 / %x23-5B / %x5D-10FFFF
+             */
+            case '/': if (escape_solidus) escaped = "\\/"; break;
+            case '"': escaped = "\\\""; break;
+            case '\f': escaped = "\\f"; break;
+            case '\b': escaped = "\\b"; break;
+            case '\t': escaped = "\\t"; break;
+            default:
+                if ((unsigned char) str[end] < 32) {
+                    CharToHex(str[end], hexBuf + 4);
+                    escaped = hexBuf;
+                }
+                break;
+        }
+        if (escaped != NULL) {
+            print(ctx, (const char *) (str + beg), end - beg);
+            print(ctx, escaped, (unsigned int)strlen(escaped));
+            beg = ++end;
+        } else {
+            ++end;
+        }
+    }
+    print(ctx, (const char *) (str + beg), end - beg);
+}
+
+static void hexToDigit(unsigned int * val, const unsigned char * hex)
+{
+    unsigned int i;
+    for (i=0;i<4;i++) {
+        unsigned char c = hex[i];
+        if (c >= 'A') c = (c & ~0x20) - 7;
+        c -= '0';
+        assert(!(c & 0xF0));
+        *val = (*val << 4) | c;
+    }
+}
+
+static void Utf32toUtf8(unsigned int codepoint, char * utf8Buf) 
+{
+    if (codepoint < 0x80) {
+        utf8Buf[0] = (char) codepoint;
+        utf8Buf[1] = 0;
+    } else if (codepoint < 0x0800) {
+        utf8Buf[0] = (char) ((codepoint >> 6) | 0xC0);
+        utf8Buf[1] = (char) ((codepoint & 0x3F) | 0x80);
+        utf8Buf[2] = 0;
+    } else if (codepoint < 0x10000) {
+        utf8Buf[0] = (char) ((codepoint >> 12) | 0xE0);
+        utf8Buf[1] = (char) (((codepoint >> 6) & 0x3F) | 0x80);
+        utf8Buf[2] = (char) ((codepoint & 0x3F) | 0x80);
+        utf8Buf[3] = 0;
+    } else if (codepoint < 0x200000) {
+        utf8Buf[0] =(char)((codepoint >> 18) | 0xF0);
+        utf8Buf[1] =(char)(((codepoint >> 12) & 0x3F) | 0x80);
+        utf8Buf[2] =(char)(((codepoint >> 6) & 0x3F) | 0x80);
+        utf8Buf[3] =(char)((codepoint & 0x3F) | 0x80);
+        utf8Buf[4] = 0;
+    } else {
+        utf8Buf[0] = '?';
+        utf8Buf[1] = 0;
+    }
+}
+
+void yajl_string_decode(yajl_buf buf, const unsigned char * str,
+                        size_t len)
+{
+    size_t beg = 0;
+    size_t end = 0;    
+
+    while (end < len) {
+        if (str[end] == '\\') {
+            char utf8Buf[5];
+            const char * unescaped = "?";
+            yajl_buf_append(buf, str + beg, end - beg);
+            switch (str[++end]) {
+                case 'r': unescaped = "\r"; break;
+                case 'n': unescaped = "\n"; break;
+                case '\\': unescaped = "\\"; break;
+                case '/': unescaped = "/"; break;
+                case '"': unescaped = "\""; break;
+                case 'f': unescaped = "\f"; break;
+                case 'b': unescaped = "\b"; break;
+                case 't': unescaped = "\t"; break;
+                case 'u': {
+                    unsigned int codepoint = 0;
+                    hexToDigit(&codepoint, str + ++end);
+                    end+=3;
+                    /* check if this is a surrogate */
+                    if ((codepoint & 0xFC00) == 0xD800) {
+                        end++;
+                        if (str[end] == '\\' && str[end + 1] == 'u') {
+                            unsigned int surrogate = 0;
+                            hexToDigit(&surrogate, str + end + 2);
+                            codepoint =
+                                (((codepoint & 0x3F) << 10) | 
+                                 ((((codepoint >> 6) & 0xF) + 1) << 16) | 
+                                 (surrogate & 0x3FF));
+                            end += 5;
+                        } else {
+                            unescaped = "?";
+                            break;
+                        }
+                    }
+                    
+                    Utf32toUtf8(codepoint, utf8Buf);
+                    unescaped = utf8Buf;
+
+                    if (codepoint == 0) {
+                        yajl_buf_append(buf, unescaped, 1);
+                        beg = ++end;
+                        continue;
+                    }
+
+                    break;
+                }
+                default:
+                    assert("this should never happen" == NULL);
+            }
+            yajl_buf_append(buf, unescaped, (unsigned int)strlen(unescaped));
+            beg = ++end;
+        } else {
+            end++;
+        }
+    }
+    yajl_buf_append(buf, str + beg, end - beg);
+}
+
+#define ADV_PTR s++; if (!(len--)) return 0;
+
+int yajl_string_validate_utf8(const unsigned char * s, size_t len)
+{
+    if (!len) return 1;
+    if (!s) return 0;
+    
+    while (len--) {
+        /* single byte */
+        if (*s <= 0x7f) {
+            /* noop */
+        }
+        /* two byte */ 
+        else if ((*s >> 5) == 0x6) {
+            ADV_PTR;
+            if (!((*s >> 6) == 0x2)) return 0;
+        }
+        /* three byte */
+        else if ((*s >> 4) == 0x0e) {
+            ADV_PTR;
+            if (!((*s >> 6) == 0x2)) return 0;
+            ADV_PTR;
+            if (!((*s >> 6) == 0x2)) return 0;
+        }
+        /* four byte */        
+        else if ((*s >> 3) == 0x1e) {
+            ADV_PTR;
+            if (!((*s >> 6) == 0x2)) return 0;
+            ADV_PTR;
+            if (!((*s >> 6) == 0x2)) return 0;
+            ADV_PTR;
+            if (!((*s >> 6) == 0x2)) return 0;
+        } else {
+            return 0;
+        }
+        
+        s++;
+    }
+    
+    return 1;
+}
diff --git a/src/yajl/yajl_encode.h b/src/yajl/yajl_encode.h
new file mode 100644
index 0000000..853a1a7
--- /dev/null
+++ b/src/yajl/yajl_encode.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __YAJL_ENCODE_H__
+#define __YAJL_ENCODE_H__
+
+#include "yajl_buf.h"
+#include "api/yajl_gen.h"
+
+void yajl_string_encode(const yajl_print_t printer,
+                        void * ctx,
+                        const unsigned char * str,
+                        size_t length,
+                        int escape_solidus);
+
+void yajl_string_decode(yajl_buf buf, const unsigned char * str,
+                        size_t length);
+
+int yajl_string_validate_utf8(const unsigned char * s, size_t len);
+
+#endif
diff --git a/src/yajl/yajl_gen.c b/src/yajl/yajl_gen.c
new file mode 100644
index 0000000..bf7629a
--- /dev/null
+++ b/src/yajl/yajl_gen.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "api/yajl_gen.h"
+#include "yajl_buf.h"
+#include "yajl_encode.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdarg.h>
+
+typedef enum {
+    yajl_gen_start,
+    yajl_gen_map_start,
+    yajl_gen_map_key,
+    yajl_gen_map_val,
+    yajl_gen_array_start,
+    yajl_gen_in_array,
+    yajl_gen_complete,
+    yajl_gen_error
+} yajl_gen_state;
+
+struct yajl_gen_t
+{
+    unsigned int flags;
+    unsigned int depth;
+    const char * indentString;
+    yajl_gen_state state[YAJL_MAX_DEPTH];
+    yajl_print_t print;
+    void * ctx; /* yajl_buf */
+    /* memory allocation routines */
+    yajl_alloc_funcs alloc;
+};
+
+int
+yajl_gen_config(yajl_gen g, yajl_gen_option opt, ...)
+{
+    int rv = 1;
+    va_list ap;
+    va_start(ap, opt);
+
+    switch(opt) {
+        case yajl_gen_beautify:
+        case yajl_gen_validate_utf8:
+        case yajl_gen_escape_solidus:
+            if (va_arg(ap, int)) g->flags |= opt;
+            else g->flags &= ~opt;
+            break;
+        case yajl_gen_indent_string: {
+            const char *indent = va_arg(ap, const char *);
+            g->indentString = indent;
+            for (; *indent; indent++) {
+                if (*indent != '\n'
+                    && *indent != '\v'
+                    && *indent != '\f'
+                    && *indent != '\t'
+                    && *indent != '\r'
+                    && *indent != ' ')
+                {
+                    g->indentString = NULL;
+                    rv = 0;
+                }
+            }
+            break;
+        }
+        case yajl_gen_print_callback:
+            yajl_buf_free(g->ctx);
+            g->print = va_arg(ap, const yajl_print_t);
+            g->ctx = va_arg(ap, void *);
+            break;
+        default:
+            rv = 0;
+    }
+
+    va_end(ap);
+
+    return rv;
+}
+
+
+
+yajl_gen
+yajl_gen_alloc(const yajl_alloc_funcs * afs)
+{
+    yajl_gen g = NULL;
+    yajl_alloc_funcs afsBuffer;
+
+    /* first order of business is to set up memory allocation routines */
+    if (afs != NULL) {
+        if (afs->malloc == NULL || afs->realloc == NULL || afs->free == NULL)
+        {
+            return NULL;
+        }
+    } else {
+        yajl_set_default_alloc_funcs(&afsBuffer);
+        afs = &afsBuffer;
+    }
+
+    g = (yajl_gen) YA_MALLOC(afs, sizeof(struct yajl_gen_t));
+    if (!g) return NULL;
+
+    memset((void *) g, 0, sizeof(struct yajl_gen_t));
+    /* copy in pointers to allocation routines */
+    memcpy((void *) &(g->alloc), (void *) afs, sizeof(yajl_alloc_funcs));
+
+    g->print = (yajl_print_t)&yajl_buf_append;
+    g->ctx = yajl_buf_alloc(&(g->alloc));
+    g->indentString = "    ";
+
+    return g;
+}
+
+void
+yajl_gen_reset(yajl_gen g, const char * sep)
+{
+    g->depth = 0;
+    memset((void *) &(g->state), 0, sizeof(g->state));
+    if (sep != NULL) g->print(g->ctx, sep, strlen(sep));
+}
+
+void
+yajl_gen_free(yajl_gen g)
+{
+    if (g->print == (yajl_print_t)&yajl_buf_append) yajl_buf_free((yajl_buf)g->ctx);
+    YA_FREE(&(g->alloc), g);
+}
+
+#define INSERT_SEP \
+    if (g->state[g->depth] == yajl_gen_map_key ||               \
+        g->state[g->depth] == yajl_gen_in_array) {              \
+        g->print(g->ctx, ",", 1);                               \
+        if ((g->flags & yajl_gen_beautify)) g->print(g->ctx, "\n", 1);               \
+    } else if (g->state[g->depth] == yajl_gen_map_val) {        \
+        g->print(g->ctx, ":", 1);                               \
+        if ((g->flags & yajl_gen_beautify)) g->print(g->ctx, " ", 1);                \
+   }
+
+#define INSERT_WHITESPACE                                               \
+    if ((g->flags & yajl_gen_beautify)) {                                                    \
+        if (g->state[g->depth] != yajl_gen_map_val) {                   \
+            unsigned int _i;                                            \
+            for (_i=0;_i<g->depth;_i++)                                 \
+                g->print(g->ctx,                                        \
+                         g->indentString,                               \
+                         (unsigned int)strlen(g->indentString));        \
+        }                                                               \
+    }
+
+#define ENSURE_NOT_KEY \
+    if (g->state[g->depth] == yajl_gen_map_key ||       \
+        g->state[g->depth] == yajl_gen_map_start)  {    \
+        return yajl_gen_keys_must_be_strings;           \
+    }                                                   \
+
+/* check that we're not complete, or in error state.  in a valid state
+ * to be generating */
+#define ENSURE_VALID_STATE \
+    if (g->state[g->depth] == yajl_gen_error) {   \
+        return yajl_gen_in_error_state;\
+    } else if (g->state[g->depth] == yajl_gen_complete) {   \
+        return yajl_gen_generation_complete;                \
+    }
+
+#define INCREMENT_DEPTH \
+    if (++(g->depth) >= YAJL_MAX_DEPTH) return yajl_max_depth_exceeded;
+
+#define DECREMENT_DEPTH \
+  if (--(g->depth) >= YAJL_MAX_DEPTH) return yajl_gen_generation_complete;
+
+#define APPENDED_ATOM \
+    switch (g->state[g->depth]) {                   \
+        case yajl_gen_start:                        \
+            g->state[g->depth] = yajl_gen_complete; \
+            break;                                  \
+        case yajl_gen_map_start:                    \
+        case yajl_gen_map_key:                      \
+            g->state[g->depth] = yajl_gen_map_val;  \
+            break;                                  \
+        case yajl_gen_array_start:                  \
+            g->state[g->depth] = yajl_gen_in_array; \
+            break;                                  \
+        case yajl_gen_map_val:                      \
+            g->state[g->depth] = yajl_gen_map_key;  \
+            break;                                  \
+        default:                                    \
+            break;                                  \
+    }                                               \
+
+#define FINAL_NEWLINE                                        \
+    if ((g->flags & yajl_gen_beautify) && g->state[g->depth] == yajl_gen_complete) \
+        g->print(g->ctx, "\n", 1);
+
+yajl_gen_status
+yajl_gen_integer(yajl_gen g, long long int number)
+{
+    char i[32];
+    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
+#ifdef _WIN32
+    sprintf(i, "%I64d", number);
+#else
+    sprintf(i, "%lld", number);
+#endif
+    g->print(g->ctx, i, (unsigned int)strlen(i));
+    APPENDED_ATOM;
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+#if defined(_WIN32) || defined(WIN32)
+#include <float.h>
+#define isnan _isnan
+#define isinf !_finite
+#endif
+
+yajl_gen_status
+yajl_gen_double(yajl_gen g, double number)
+{
+    char i[32];
+    ENSURE_VALID_STATE; ENSURE_NOT_KEY;
+    if (isnan(number) || isinf(number)) return yajl_gen_invalid_number;
+    INSERT_SEP; INSERT_WHITESPACE;
+    sprintf(i, "%.20g", number);
+    if (strspn(i, "0123456789-") == strlen(i)) {
+        strcat(i, ".0");
+    }
+    g->print(g->ctx, i, (unsigned int)strlen(i));
+    APPENDED_ATOM;
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_number(yajl_gen g, const char * s, size_t l)
+{
+    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
+    g->print(g->ctx, s, l);
+    APPENDED_ATOM;
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_string(yajl_gen g, const unsigned char * str,
+                size_t len)
+{
+    // if validation is enabled, check that the string is valid utf8
+    // XXX: This checking could be done a little faster, in the same pass as
+    // the string encoding
+    if (g->flags & yajl_gen_validate_utf8) {
+        if (!yajl_string_validate_utf8(str, len)) {
+            return yajl_gen_invalid_string;
+        }
+    }
+    ENSURE_VALID_STATE; INSERT_SEP; INSERT_WHITESPACE;
+    g->print(g->ctx, "\"", 1);
+    yajl_string_encode(g->print, g->ctx, str, len, g->flags & yajl_gen_escape_solidus);
+    g->print(g->ctx, "\"", 1);
+    APPENDED_ATOM;
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_null(yajl_gen g)
+{
+    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
+    g->print(g->ctx, "null", strlen("null"));
+    APPENDED_ATOM;
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_bool(yajl_gen g, int boolean)
+{
+    const char * val = boolean ? "true" : "false";
+
+	ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
+    g->print(g->ctx, val, (unsigned int)strlen(val));
+    APPENDED_ATOM;
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_map_open(yajl_gen g)
+{
+    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
+    INCREMENT_DEPTH;
+
+    g->state[g->depth] = yajl_gen_map_start;
+    g->print(g->ctx, "{", 1);
+    if ((g->flags & yajl_gen_beautify)) g->print(g->ctx, "\n", 1);
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_map_close(yajl_gen g)
+{
+    ENSURE_VALID_STATE;
+    DECREMENT_DEPTH;
+
+    if ((g->flags & yajl_gen_beautify)) g->print(g->ctx, "\n", 1);
+    APPENDED_ATOM;
+    INSERT_WHITESPACE;
+    g->print(g->ctx, "}", 1);
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_array_open(yajl_gen g)
+{
+    ENSURE_VALID_STATE; ENSURE_NOT_KEY; INSERT_SEP; INSERT_WHITESPACE;
+    INCREMENT_DEPTH;
+    g->state[g->depth] = yajl_gen_array_start;
+    g->print(g->ctx, "[", 1);
+    if ((g->flags & yajl_gen_beautify)) g->print(g->ctx, "\n", 1);
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_array_close(yajl_gen g)
+{
+    ENSURE_VALID_STATE;
+    DECREMENT_DEPTH;
+    if ((g->flags & yajl_gen_beautify)) g->print(g->ctx, "\n", 1);
+    APPENDED_ATOM;
+    INSERT_WHITESPACE;
+    g->print(g->ctx, "]", 1);
+    FINAL_NEWLINE;
+    return yajl_gen_status_ok;
+}
+
+yajl_gen_status
+yajl_gen_get_buf(yajl_gen g, const unsigned char ** buf,
+                 size_t * len)
+{
+    if (g->print != (yajl_print_t)&yajl_buf_append) return yajl_gen_no_buf;
+    *buf = yajl_buf_data((yajl_buf)g->ctx);
+    *len = yajl_buf_len((yajl_buf)g->ctx);
+    return yajl_gen_status_ok;
+}
+
+void
+yajl_gen_clear(yajl_gen g)
+{
+    if (g->print == (yajl_print_t)&yajl_buf_append) yajl_buf_clear((yajl_buf)g->ctx);
+}
diff --git a/src/yajl/yajl_lex.c b/src/yajl/yajl_lex.c
new file mode 100644
index 0000000..0b6f7cc
--- /dev/null
+++ b/src/yajl/yajl_lex.c
@@ -0,0 +1,763 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "yajl_lex.h"
+#include "yajl_buf.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#ifdef YAJL_LEXER_DEBUG
+static const char *
+tokToStr(yajl_tok tok)
+{
+    switch (tok) {
+        case yajl_tok_bool: return "bool";
+        case yajl_tok_colon: return "colon";
+        case yajl_tok_comma: return "comma";
+        case yajl_tok_eof: return "eof";
+        case yajl_tok_error: return "error";
+        case yajl_tok_left_brace: return "brace";
+        case yajl_tok_left_bracket: return "bracket";
+        case yajl_tok_null: return "null";
+        case yajl_tok_integer: return "integer";
+        case yajl_tok_double: return "double";
+        case yajl_tok_right_brace: return "brace";
+        case yajl_tok_right_bracket: return "bracket";
+        case yajl_tok_string: return "string";
+        case yajl_tok_string_with_escapes: return "string_with_escapes";
+    }
+    return "unknown";
+}
+#endif
+
+/* Impact of the stream parsing feature on the lexer:
+ *
+ * YAJL support stream parsing.  That is, the ability to parse the first
+ * bits of a chunk of JSON before the last bits are available (still on
+ * the network or disk).  This makes the lexer more complex.  The
+ * responsibility of the lexer is to handle transparently the case where
+ * a chunk boundary falls in the middle of a token.  This is
+ * accomplished is via a buffer and a character reading abstraction.
+ *
+ * Overview of implementation
+ *
+ * When we lex to end of input string before end of token is hit, we
+ * copy all of the input text composing the token into our lexBuf.
+ *
+ * Every time we read a character, we do so through the readChar function.
+ * readChar's responsibility is to handle pulling all chars from the buffer
+ * before pulling chars from input text
+ */
+
+struct yajl_lexer_t {
+    /* the overal line and char offset into the data */
+    size_t lineOff;
+    size_t charOff;
+
+    /* error */
+    yajl_lex_error error;
+
+    /* a input buffer to handle the case where a token is spread over
+     * multiple chunks */
+    yajl_buf buf;
+
+    /* in the case where we have data in the lexBuf, bufOff holds
+     * the current offset into the lexBuf. */
+    size_t bufOff;
+
+    /* are we using the lex buf? */
+    unsigned int bufInUse;
+
+    /* shall we allow comments? */
+    unsigned int allowComments;
+
+    /* shall we validate utf8 inside strings? */
+    unsigned int validateUTF8;
+
+    yajl_alloc_funcs * alloc;
+};
+
+#define readChar(lxr, txt, off)                      \
+    (((lxr)->bufInUse && yajl_buf_len((lxr)->buf) && lxr->bufOff < yajl_buf_len((lxr)->buf)) ? \
+     (*((const unsigned char *) yajl_buf_data((lxr)->buf) + ((lxr)->bufOff)++)) : \
+     ((txt)[(*(off))++]))
+
+#define unreadChar(lxr, off) ((*(off) > 0) ? (*(off))-- : ((lxr)->bufOff--))
+
+yajl_lexer
+yajl_lex_alloc(yajl_alloc_funcs * alloc,
+               unsigned int allowComments, unsigned int validateUTF8)
+{
+    yajl_lexer lxr = (yajl_lexer) YA_MALLOC(alloc, sizeof(struct yajl_lexer_t));
+    memset((void *) lxr, 0, sizeof(struct yajl_lexer_t));
+    lxr->buf = yajl_buf_alloc(alloc);
+    lxr->allowComments = allowComments;
+    lxr->validateUTF8 = validateUTF8;
+    lxr->alloc = alloc;
+    return lxr;
+}
+
+void
+yajl_lex_free(yajl_lexer lxr)
+{
+    yajl_buf_free(lxr->buf);
+    YA_FREE(lxr->alloc, lxr);
+    return;
+}
+
+/* a lookup table which lets us quickly determine three things:
+ * VEC - valid escaped control char
+ * note.  the solidus '/' may be escaped or not.
+ * IJC - invalid json char
+ * VHC - valid hex char
+ * NFP - needs further processing (from a string scanning perspective)
+ * NUC - needs utf8 checking when enabled (from a string scanning perspective)
+ */
+#define VEC 0x01
+#define IJC 0x02
+#define VHC 0x04
+#define NFP 0x08
+#define NUC 0x10
+
+static const char charLookupTable[256] =
+{
+/*00*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
+/*08*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
+/*10*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
+/*18*/ IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    , IJC    ,
+
+/*20*/ 0      , 0      , NFP|VEC|IJC, 0      , 0      , 0      , 0      , 0      ,
+/*28*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , VEC    ,
+/*30*/ VHC    , VHC    , VHC    , VHC    , VHC    , VHC    , VHC    , VHC    ,
+/*38*/ VHC    , VHC    , 0      , 0      , 0      , 0      , 0      , 0      ,
+
+/*40*/ 0      , VHC    , VHC    , VHC    , VHC    , VHC    , VHC    , 0      ,
+/*48*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      ,
+/*50*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      ,
+/*58*/ 0      , 0      , 0      , 0      , NFP|VEC|IJC, 0      , 0      , 0      ,
+
+/*60*/ 0      , VHC    , VEC|VHC, VHC    , VHC    , VHC    , VEC|VHC, 0      ,
+/*68*/ 0      , 0      , 0      , 0      , 0      , 0      , VEC    , 0      ,
+/*70*/ 0      , 0      , VEC    , 0      , VEC    , 0      , 0      , 0      ,
+/*78*/ 0      , 0      , 0      , 0      , 0      , 0      , 0      , 0      ,
+
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    ,
+       NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC    , NUC
+};
+
+/** process a variable length utf8 encoded codepoint.
+ *
+ *  returns:
+ *    yajl_tok_string - if valid utf8 char was parsed and offset was
+ *                      advanced
+ *    yajl_tok_eof - if end of input was hit before validation could
+ *                   complete
+ *    yajl_tok_error - if invalid utf8 was encountered
+ *
+ *  NOTE: on error the offset will point to the first char of the
+ *  invalid utf8 */
+#define UTF8_CHECK_EOF if (*offset >= jsonTextLen) { return yajl_tok_eof; }
+
+static yajl_tok
+yajl_lex_utf8_char(yajl_lexer lexer, const unsigned char * jsonText,
+                   size_t jsonTextLen, size_t * offset,
+                   unsigned char curChar)
+{
+    if (curChar <= 0x7f) {
+        /* single byte */
+        return yajl_tok_string;
+    } else if ((curChar >> 5) == 0x6) {
+        /* two byte */
+        UTF8_CHECK_EOF;
+        curChar = readChar(lexer, jsonText, offset);
+        if ((curChar >> 6) == 0x2) return yajl_tok_string;
+    } else if ((curChar >> 4) == 0x0e) {
+        /* three byte */
+        UTF8_CHECK_EOF;
+        curChar = readChar(lexer, jsonText, offset);
+        if ((curChar >> 6) == 0x2) {
+            UTF8_CHECK_EOF;
+            curChar = readChar(lexer, jsonText, offset);
+            if ((curChar >> 6) == 0x2) return yajl_tok_string;
+        }
+    } else if ((curChar >> 3) == 0x1e) {
+        /* four byte */
+        UTF8_CHECK_EOF;
+        curChar = readChar(lexer, jsonText, offset);
+        if ((curChar >> 6) == 0x2) {
+            UTF8_CHECK_EOF;
+            curChar = readChar(lexer, jsonText, offset);
+            if ((curChar >> 6) == 0x2) {
+                UTF8_CHECK_EOF;
+                curChar = readChar(lexer, jsonText, offset);
+                if ((curChar >> 6) == 0x2) return yajl_tok_string;
+            }
+        }
+    }
+
+    return yajl_tok_error;
+}
+
+/* lex a string.  input is the lexer, pointer to beginning of
+ * json text, and start of string (offset).
+ * a token is returned which has the following meanings:
+ * yajl_tok_string: lex of string was successful.  offset points to
+ *                  terminating '"'.
+ * yajl_tok_eof: end of text was encountered before we could complete
+ *               the lex.
+ * yajl_tok_error: embedded in the string were unallowable chars.  offset
+ *               points to the offending char
+ */
+#define STR_CHECK_EOF \
+if (*offset >= jsonTextLen) { \
+   tok = yajl_tok_eof; \
+   goto finish_string_lex; \
+}
+
+/** scan a string for interesting characters that might need further
+ *  review.  return the number of chars that are uninteresting and can
+ *  be skipped.
+ * (lth) hi world, any thoughts on how to make this routine faster? */
+static size_t
+yajl_string_scan(const unsigned char * buf, size_t len, int utf8check)
+{
+    unsigned char mask = IJC|NFP|(utf8check ? NUC : 0);
+    size_t skip = 0;
+    while (skip < len && !(charLookupTable[*buf] & mask))
+    {
+        skip++;
+        buf++;
+    }
+    return skip;
+}
+
+static yajl_tok
+yajl_lex_string(yajl_lexer lexer, const unsigned char * jsonText,
+                size_t jsonTextLen, size_t * offset)
+{
+    yajl_tok tok = yajl_tok_error;
+    int hasEscapes = 0;
+
+    for (;;) {
+        unsigned char curChar;
+
+        /* now jump into a faster scanning routine to skip as much
+         * of the buffers as possible */
+        {
+            const unsigned char * p;
+            size_t len;
+
+            if ((lexer->bufInUse && yajl_buf_len(lexer->buf) &&
+                 lexer->bufOff < yajl_buf_len(lexer->buf)))
+            {
+                p = ((const unsigned char *) yajl_buf_data(lexer->buf) +
+                     (lexer->bufOff));
+                len = yajl_buf_len(lexer->buf) - lexer->bufOff;
+                lexer->bufOff += yajl_string_scan(p, len, lexer->validateUTF8);
+            }
+            else if (*offset < jsonTextLen)
+            {
+                p = jsonText + *offset;
+                len = jsonTextLen - *offset;
+                *offset += yajl_string_scan(p, len, lexer->validateUTF8);
+            }
+        }
+
+        STR_CHECK_EOF;
+
+        curChar = readChar(lexer, jsonText, offset);
+
+        /* quote terminates */
+        if (curChar == '"') {
+            tok = yajl_tok_string;
+            break;
+        }
+        /* backslash escapes a set of control chars, */
+        else if (curChar == '\\') {
+            hasEscapes = 1;
+            STR_CHECK_EOF;
+
+            /* special case \u */
+            curChar = readChar(lexer, jsonText, offset);
+            if (curChar == 'u') {
+                unsigned int i = 0;
+
+                for (i=0;i<4;i++) {
+                    STR_CHECK_EOF;
+                    curChar = readChar(lexer, jsonText, offset);
+                    if (!(charLookupTable[curChar] & VHC)) {
+                        /* back up to offending char */
+                        unreadChar(lexer, offset);
+                        lexer->error = yajl_lex_string_invalid_hex_char;
+                        goto finish_string_lex;
+                    }
+                }
+            } else if (!(charLookupTable[curChar] & VEC)) {
+                /* back up to offending char */
+                unreadChar(lexer, offset);
+                lexer->error = yajl_lex_string_invalid_escaped_char;
+                goto finish_string_lex;
+            }
+        }
+        /* when not validating UTF8 it's a simple table lookup to determine
+         * if the present character is invalid */
+        else if(charLookupTable[curChar] & IJC) {
+            /* back up to offending char */
+            unreadChar(lexer, offset);
+            lexer->error = yajl_lex_string_invalid_json_char;
+            goto finish_string_lex;
+        }
+        /* when in validate UTF8 mode we need to do some extra work */
+        else if (lexer->validateUTF8) {
+            yajl_tok t = yajl_lex_utf8_char(lexer, jsonText, jsonTextLen,
+                                            offset, curChar);
+
+            if (t == yajl_tok_eof) {
+                tok = yajl_tok_eof;
+                goto finish_string_lex;
+            } else if (t == yajl_tok_error) {
+                lexer->error = yajl_lex_string_invalid_utf8;
+                goto finish_string_lex;
+            }
+        }
+        /* accept it, and move on */
+    }
+  finish_string_lex:
+    /* tell our buddy, the parser, wether he needs to process this string
+     * again */
+    if (hasEscapes && tok == yajl_tok_string) {
+        tok = yajl_tok_string_with_escapes;
+    }
+
+    return tok;
+}
+
+#define RETURN_IF_EOF if (*offset >= jsonTextLen) return yajl_tok_eof;
+
+static yajl_tok
+yajl_lex_number(yajl_lexer lexer, const unsigned char * jsonText,
+                size_t jsonTextLen, size_t * offset)
+{
+    /** XXX: numbers are the only entities in json that we must lex
+     *       _beyond_ in order to know that they are complete.  There
+     *       is an ambiguous case for integers at EOF. */
+
+    unsigned char c;
+
+    yajl_tok tok = yajl_tok_integer;
+
+    RETURN_IF_EOF;
+    c = readChar(lexer, jsonText, offset);
+
+    /* optional leading minus */
+    if (c == '-') {
+        RETURN_IF_EOF;
+        c = readChar(lexer, jsonText, offset);
+    }
+
+    /* a single zero, or a series of integers */
+    if (c == '0') {
+        RETURN_IF_EOF;
+        c = readChar(lexer, jsonText, offset);
+    } else if (c >= '1' && c <= '9') {
+        do {
+            RETURN_IF_EOF;
+            c = readChar(lexer, jsonText, offset);
+        } while (c >= '0' && c <= '9');
+    } else {
+        unreadChar(lexer, offset);
+        lexer->error = yajl_lex_missing_integer_after_minus;
+        return yajl_tok_error;
+    }
+
+    /* optional fraction (indicates this is floating point) */
+    if (c == '.') {
+        int numRd = 0;
+
+        RETURN_IF_EOF;
+        c = readChar(lexer, jsonText, offset);
+
+        while (c >= '0' && c <= '9') {
+            numRd++;
+            RETURN_IF_EOF;
+            c = readChar(lexer, jsonText, offset);
+        }
+
+        if (!numRd) {
+            unreadChar(lexer, offset);
+            lexer->error = yajl_lex_missing_integer_after_decimal;
+            return yajl_tok_error;
+        }
+        tok = yajl_tok_double;
+    }
+
+    /* optional exponent (indicates this is floating point) */
+    if (c == 'e' || c == 'E') {
+        RETURN_IF_EOF;
+        c = readChar(lexer, jsonText, offset);
+
+        /* optional sign */
+        if (c == '+' || c == '-') {
+            RETURN_IF_EOF;
+            c = readChar(lexer, jsonText, offset);
+        }
+
+        if (c >= '0' && c <= '9') {
+            do {
+                RETURN_IF_EOF;
+                c = readChar(lexer, jsonText, offset);
+            } while (c >= '0' && c <= '9');
+        } else {
+            unreadChar(lexer, offset);
+            lexer->error = yajl_lex_missing_integer_after_exponent;
+            return yajl_tok_error;
+        }
+        tok = yajl_tok_double;
+    }
+
+    /* we always go "one too far" */
+    unreadChar(lexer, offset);
+
+    return tok;
+}
+
+static yajl_tok
+yajl_lex_comment(yajl_lexer lexer, const unsigned char * jsonText,
+                 size_t jsonTextLen, size_t * offset)
+{
+    unsigned char c;
+
+    yajl_tok tok = yajl_tok_comment;
+
+    RETURN_IF_EOF;
+    c = readChar(lexer, jsonText, offset);
+
+    /* either slash or star expected */
+    if (c == '/') {
+        /* now we throw away until end of line */
+        do {
+            RETURN_IF_EOF;
+            c = readChar(lexer, jsonText, offset);
+        } while (c != '\n');
+    } else if (c == '*') {
+        /* now we throw away until end of comment */
+        for (;;) {
+            RETURN_IF_EOF;
+            c = readChar(lexer, jsonText, offset);
+            if (c == '*') {
+                RETURN_IF_EOF;
+                c = readChar(lexer, jsonText, offset);
+                if (c == '/') {
+                    break;
+                } else {
+                    unreadChar(lexer, offset);
+                }
+            }
+        }
+    } else {
+        lexer->error = yajl_lex_invalid_char;
+        tok = yajl_tok_error;
+    }
+
+    return tok;
+}
+
+yajl_tok
+yajl_lex_lex(yajl_lexer lexer, const unsigned char * jsonText,
+             size_t jsonTextLen, size_t * offset,
+             const unsigned char ** outBuf, size_t * outLen)
+{
+    yajl_tok tok = yajl_tok_error;
+    unsigned char c;
+    size_t startOffset = *offset;
+
+    *outBuf = NULL;
+    *outLen = 0;
+
+    for (;;) {
+        assert(*offset <= jsonTextLen);
+
+        if (*offset >= jsonTextLen) {
+            tok = yajl_tok_eof;
+            goto lexed;
+        }
+
+        c = readChar(lexer, jsonText, offset);
+
+        switch (c) {
+            case '{':
+                tok = yajl_tok_left_bracket;
+                goto lexed;
+            case '}':
+                tok = yajl_tok_right_bracket;
+                goto lexed;
+            case '[':
+                tok = yajl_tok_left_brace;
+                goto lexed;
+            case ']':
+                tok = yajl_tok_right_brace;
+                goto lexed;
+            case ',':
+                tok = yajl_tok_comma;
+                goto lexed;
+            case ':':
+                tok = yajl_tok_colon;
+                goto lexed;
+            case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
+                startOffset++;
+                break;
+            case 't': {
+                const char * want = "rue";
+                do {
+                    if (*offset >= jsonTextLen) {
+                        tok = yajl_tok_eof;
+                        goto lexed;
+                    }
+                    c = readChar(lexer, jsonText, offset);
+                    if (c != *want) {
+                        unreadChar(lexer, offset);
+                        lexer->error = yajl_lex_invalid_string;
+                        tok = yajl_tok_error;
+                        goto lexed;
+                    }
+                } while (*(++want));
+                tok = yajl_tok_bool;
+                goto lexed;
+            }
+            case 'f': {
+                const char * want = "alse";
+                do {
+                    if (*offset >= jsonTextLen) {
+                        tok = yajl_tok_eof;
+                        goto lexed;
+                    }
+                    c = readChar(lexer, jsonText, offset);
+                    if (c != *want) {
+                        unreadChar(lexer, offset);
+                        lexer->error = yajl_lex_invalid_string;
+                        tok = yajl_tok_error;
+                        goto lexed;
+                    }
+                } while (*(++want));
+                tok = yajl_tok_bool;
+                goto lexed;
+            }
+            case 'n': {
+                const char * want = "ull";
+                do {
+                    if (*offset >= jsonTextLen) {
+                        tok = yajl_tok_eof;
+                        goto lexed;
+                    }
+                    c = readChar(lexer, jsonText, offset);
+                    if (c != *want) {
+                        unreadChar(lexer, offset);
+                        lexer->error = yajl_lex_invalid_string;
+                        tok = yajl_tok_error;
+                        goto lexed;
+                    }
+                } while (*(++want));
+                tok = yajl_tok_null;
+                goto lexed;
+            }
+            case '"': {
+                tok = yajl_lex_string(lexer, (const unsigned char *) jsonText,
+                                      jsonTextLen, offset);
+                goto lexed;
+            }
+            case '-':
+            case '0': case '1': case '2': case '3': case '4':
+            case '5': case '6': case '7': case '8': case '9': {
+                /* integer parsing wants to start from the beginning */
+                unreadChar(lexer, offset);
+                tok = yajl_lex_number(lexer, (const unsigned char *) jsonText,
+                                      jsonTextLen, offset);
+                goto lexed;
+            }
+            case '/':
+                /* hey, look, a probable comment!  If comments are disabled
+                 * it's an error. */
+                if (!lexer->allowComments) {
+                    unreadChar(lexer, offset);
+                    lexer->error = yajl_lex_unallowed_comment;
+                    tok = yajl_tok_error;
+                    goto lexed;
+                }
+                /* if comments are enabled, then we should try to lex
+                 * the thing.  possible outcomes are
+                 * - successful lex (tok_comment, which means continue),
+                 * - malformed comment opening (slash not followed by
+                 *   '*' or '/') (tok_error)
+                 * - eof hit. (tok_eof) */
+                tok = yajl_lex_comment(lexer, (const unsigned char *) jsonText,
+                                       jsonTextLen, offset);
+                if (tok == yajl_tok_comment) {
+                    /* "error" is silly, but that's the initial
+                     * state of tok.  guilty until proven innocent. */
+                    tok = yajl_tok_error;
+                    yajl_buf_clear(lexer->buf);
+                    lexer->bufInUse = 0;
+                    startOffset = *offset;
+                    break;
+                }
+                /* hit error or eof, bail */
+                goto lexed;
+            default:
+                lexer->error = yajl_lex_invalid_char;
+                tok = yajl_tok_error;
+                goto lexed;
+        }
+    }
+
+
+  lexed:
+    /* need to append to buffer if the buffer is in use or
+     * if it's an EOF token */
+    if (tok == yajl_tok_eof || lexer->bufInUse) {
+        if (!lexer->bufInUse) yajl_buf_clear(lexer->buf);
+        lexer->bufInUse = 1;
+        yajl_buf_append(lexer->buf, jsonText + startOffset, *offset - startOffset);
+        lexer->bufOff = 0;
+
+        if (tok != yajl_tok_eof) {
+            *outBuf = yajl_buf_data(lexer->buf);
+            *outLen = yajl_buf_len(lexer->buf);
+            lexer->bufInUse = 0;
+        }
+    } else if (tok != yajl_tok_error) {
+        *outBuf = jsonText + startOffset;
+        *outLen = *offset - startOffset;
+    }
+
+    /* special case for strings. skip the quotes. */
+    if (tok == yajl_tok_string || tok == yajl_tok_string_with_escapes)
+    {
+        assert(*outLen >= 2);
+        (*outBuf)++;
+        *outLen -= 2;
+    }
+
+
+#ifdef YAJL_LEXER_DEBUG
+    if (tok == yajl_tok_error) {
+        printf("lexical error: %s\n",
+               yajl_lex_error_to_string(yajl_lex_get_error(lexer)));
+    } else if (tok == yajl_tok_eof) {
+        printf("EOF hit\n");
+    } else {
+        printf("lexed %s: '", tokToStr(tok));
+        fwrite(*outBuf, 1, *outLen, stdout);
+        printf("'\n");
+    }
+#endif
+
+    return tok;
+}
+
+const char *
+yajl_lex_error_to_string(yajl_lex_error error)
+{
+    switch (error) {
+        case yajl_lex_e_ok:
+            return "ok, no error";
+        case yajl_lex_string_invalid_utf8:
+            return "invalid bytes in UTF8 string.";
+        case yajl_lex_string_invalid_escaped_char:
+            return "inside a string, '\\' occurs before a character "
+                   "which it may not.";
+        case yajl_lex_string_invalid_json_char:
+            return "invalid character inside string.";
+        case yajl_lex_string_invalid_hex_char:
+            return "invalid (non-hex) character occurs after '\\u' inside "
+                   "string.";
+        case yajl_lex_invalid_char:
+            return "invalid char in json text.";
+        case yajl_lex_invalid_string:
+            return "invalid string in json text.";
+        case yajl_lex_missing_integer_after_exponent:
+            return "malformed number, a digit is required after the exponent.";
+        case yajl_lex_missing_integer_after_decimal:
+            return "malformed number, a digit is required after the "
+                   "decimal point.";
+        case yajl_lex_missing_integer_after_minus:
+            return "malformed number, a digit is required after the "
+                   "minus sign.";
+        case yajl_lex_unallowed_comment:
+            return "probable comment found in input text, comments are "
+                   "not enabled.";
+    }
+    return "unknown error code";
+}
+
+
+/** allows access to more specific information about the lexical
+ *  error when yajl_lex_lex returns yajl_tok_error. */
+yajl_lex_error
+yajl_lex_get_error(yajl_lexer lexer)
+{
+    if (lexer == NULL) return (yajl_lex_error) -1;
+    return lexer->error;
+}
+
+size_t yajl_lex_current_line(yajl_lexer lexer)
+{
+    return lexer->lineOff;
+}
+
+size_t yajl_lex_current_char(yajl_lexer lexer)
+{
+    return lexer->charOff;
+}
+
+yajl_tok yajl_lex_peek(yajl_lexer lexer, const unsigned char * jsonText,
+                       size_t jsonTextLen, size_t offset)
+{
+    const unsigned char * outBuf;
+    size_t outLen;
+    size_t bufLen = yajl_buf_len(lexer->buf);
+    size_t bufOff = lexer->bufOff;
+    unsigned int bufInUse = lexer->bufInUse;
+    yajl_tok tok;
+
+    tok = yajl_lex_lex(lexer, jsonText, jsonTextLen, &offset,
+                       &outBuf, &outLen);
+
+    lexer->bufOff = bufOff;
+    lexer->bufInUse = bufInUse;
+    yajl_buf_truncate(lexer->buf, bufLen);
+
+    return tok;
+}
diff --git a/src/yajl/yajl_lex.h b/src/yajl/yajl_lex.h
new file mode 100644
index 0000000..fd17c00
--- /dev/null
+++ b/src/yajl/yajl_lex.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __YAJL_LEX_H__
+#define __YAJL_LEX_H__
+
+#include "api/yajl_common.h"
+
+typedef enum {
+    yajl_tok_bool,
+    yajl_tok_colon,
+    yajl_tok_comma,
+    yajl_tok_eof,
+    yajl_tok_error,
+    yajl_tok_left_brace,
+    yajl_tok_left_bracket,
+    yajl_tok_null,
+    yajl_tok_right_brace,
+    yajl_tok_right_bracket,
+
+    /* we differentiate between integers and doubles to allow the
+     * parser to interpret the number without re-scanning */
+    yajl_tok_integer,
+    yajl_tok_double,
+
+    /* we differentiate between strings which require further processing,
+     * and strings that do not */
+    yajl_tok_string,
+    yajl_tok_string_with_escapes,
+
+    /* comment tokens are not currently returned to the parser, ever */
+    yajl_tok_comment
+} yajl_tok;
+
+typedef struct yajl_lexer_t * yajl_lexer;
+
+yajl_lexer yajl_lex_alloc(yajl_alloc_funcs * alloc,
+                          unsigned int allowComments,
+                          unsigned int validateUTF8);
+
+void yajl_lex_free(yajl_lexer lexer);
+
+/**
+ * run/continue a lex. "offset" is an input/output parameter.
+ * It should be initialized to zero for a
+ * new chunk of target text, and upon subsetquent calls with the same
+ * target text should passed with the value of the previous invocation.
+ *
+ * the client may be interested in the value of offset when an error is
+ * returned from the lexer.  This allows the client to render useful
+ * error messages.
+ *
+ * When you pass the next chunk of data, context should be reinitialized
+ * to zero.
+ *
+ * Finally, the output buffer is usually just a pointer into the jsonText,
+ * however in cases where the entity being lexed spans multiple chunks,
+ * the lexer will buffer the entity and the data returned will be
+ * a pointer into that buffer.
+ *
+ * This behavior is abstracted from client code except for the performance
+ * implications which require that the client choose a reasonable chunk
+ * size to get adequate performance.
+ */
+yajl_tok yajl_lex_lex(yajl_lexer lexer, const unsigned char * jsonText,
+                      size_t jsonTextLen, size_t * offset,
+                      const unsigned char ** outBuf, size_t * outLen);
+
+/** have a peek at the next token, but don't move the lexer forward */
+yajl_tok yajl_lex_peek(yajl_lexer lexer, const unsigned char * jsonText,
+                       size_t jsonTextLen, size_t offset);
+
+
+typedef enum {
+    yajl_lex_e_ok = 0,
+    yajl_lex_string_invalid_utf8,
+    yajl_lex_string_invalid_escaped_char,
+    yajl_lex_string_invalid_json_char,
+    yajl_lex_string_invalid_hex_char,
+    yajl_lex_invalid_char,
+    yajl_lex_invalid_string,
+    yajl_lex_missing_integer_after_decimal,
+    yajl_lex_missing_integer_after_exponent,
+    yajl_lex_missing_integer_after_minus,
+    yajl_lex_unallowed_comment
+} yajl_lex_error;
+
+const char * yajl_lex_error_to_string(yajl_lex_error error);
+
+/** allows access to more specific information about the lexical
+ *  error when yajl_lex_lex returns yajl_tok_error. */
+yajl_lex_error yajl_lex_get_error(yajl_lexer lexer);
+
+/** get the current offset into the most recently lexed json string. */
+size_t yajl_lex_current_offset(yajl_lexer lexer);
+
+/** get the number of lines lexed by this lexer instance */
+size_t yajl_lex_current_line(yajl_lexer lexer);
+
+/** get the number of chars lexed by this lexer instance since the last
+ *  \n or \r */
+size_t yajl_lex_current_char(yajl_lexer lexer);
+
+#endif
diff --git a/src/yajl/yajl_parser.c b/src/yajl/yajl_parser.c
new file mode 100644
index 0000000..1a9bd23
--- /dev/null
+++ b/src/yajl/yajl_parser.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "api/yajl_parse.h"
+#include "yajl_lex.h"
+#include "yajl_parser.h"
+#include "yajl_encode.h"
+#include "yajl_bytestack.h"
+
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <assert.h>
+#include <math.h>
+
+#define MAX_VALUE_TO_MULTIPLY ((LLONG_MAX / 10) + (LLONG_MAX % 10))
+
+ /* same semantics as strtol */
+long long
+yajl_parse_integer(const unsigned char *number, unsigned int length)
+{
+    long long ret  = 0;
+    long sign = 1;
+    const unsigned char *pos = number;
+    if (*pos == '-') { pos++; sign = -1; }
+    if (*pos == '+') { pos++; }
+
+    while (pos < number + length) {
+        if ( ret > MAX_VALUE_TO_MULTIPLY ) {
+            errno = ERANGE;
+            return sign == 1 ? LLONG_MAX : LLONG_MIN;
+        }
+        ret *= 10;
+        if (LLONG_MAX - ret < (*pos - '0')) {
+            errno = ERANGE;
+            return sign == 1 ? LLONG_MAX : LLONG_MIN;
+        }
+        if (*pos < '0' || *pos > '9') {
+            errno = ERANGE;
+            return sign == 1 ? LLONG_MAX : LLONG_MIN;
+        }
+        ret += (*pos++ - '0');
+    }
+
+    return sign * ret;
+}
+
+unsigned char *
+yajl_render_error_string(yajl_handle hand, const unsigned char * jsonText,
+                         size_t jsonTextLen, int verbose)
+{
+    size_t offset = hand->bytesConsumed;
+    unsigned char * str;
+    const char * errorType = NULL;
+    const char * errorText = NULL;
+    char text[72];
+    const char * arrow = "                     (right here) ------^\n";
+
+    if (yajl_bs_current(hand->stateStack) == yajl_state_parse_error) {
+        errorType = "parse";
+        errorText = hand->parseError;
+    } else if (yajl_bs_current(hand->stateStack) == yajl_state_lexical_error) {
+        errorType = "lexical";
+        errorText = yajl_lex_error_to_string(yajl_lex_get_error(hand->lexer));
+    } else {
+        errorType = "unknown";
+    }
+
+    {
+        size_t memneeded = 0;
+        memneeded += strlen(errorType);
+        memneeded += strlen(" error");
+        if (errorText != NULL) {
+            memneeded += strlen(": ");
+            memneeded += strlen(errorText);
+        }
+        str = (unsigned char *) YA_MALLOC(&(hand->alloc), memneeded + 2);
+        if (!str) return NULL;
+        str[0] = 0;
+        strcat((char *) str, errorType);
+        strcat((char *) str, " error");
+        if (errorText != NULL) {
+            strcat((char *) str, ": ");
+            strcat((char *) str, errorText);
+        }
+        strcat((char *) str, "\n");
+    }
+
+    /* now we append as many spaces as needed to make sure the error
+     * falls at char 41, if verbose was specified */
+    if (verbose) {
+        size_t start, end, i;
+        size_t spacesNeeded;
+
+        spacesNeeded = (offset < 30 ? 40 - offset : 10);
+        start = (offset >= 30 ? offset - 30 : 0);
+        end = (offset + 30 > jsonTextLen ? jsonTextLen : offset + 30);
+
+        for (i=0;i<spacesNeeded;i++) text[i] = ' ';
+
+        for (;start < end;start++, i++) {
+            if (jsonText[start] != '\n' && jsonText[start] != '\r')
+            {
+                text[i] = jsonText[start];
+            }
+            else
+            {
+                text[i] = ' ';
+            }
+        }
+        assert(i <= 71);
+        text[i++] = '\n';
+        text[i] = 0;
+        {
+            char * newStr = (char *)
+                YA_MALLOC(&(hand->alloc), (unsigned int)(strlen((char *) str) +
+                                                         strlen((char *) text) +
+                                                         strlen(arrow) + 1));
+            if (newStr) {
+                newStr[0] = 0;
+                strcat((char *) newStr, (char *) str);
+                strcat((char *) newStr, text);
+                strcat((char *) newStr, arrow);
+            }
+            YA_FREE(&(hand->alloc), str);
+            str = (unsigned char *) newStr;
+        }
+    }
+    return str;
+}
+
+/* check for client cancelation */
+#define _CC_CHK(x)                                                \
+    if (!(x)) {                                                   \
+        yajl_bs_set(hand->stateStack, yajl_state_parse_error);    \
+        hand->parseError =                                        \
+            "client cancelled parse via callback return value";   \
+        return yajl_status_client_canceled;                       \
+    }
+
+
+yajl_status
+yajl_do_finish(yajl_handle hand)
+{
+    yajl_status stat;
+    stat = yajl_do_parse(hand,(const unsigned char *) " ",1);
+
+    if (stat != yajl_status_ok) return stat;
+
+    switch(yajl_bs_current(hand->stateStack))
+    {
+        case yajl_state_parse_error:
+        case yajl_state_lexical_error:
+            return yajl_status_error;
+        case yajl_state_got_value:
+        case yajl_state_parse_complete:
+            return yajl_status_ok;
+        default:
+            if (!(hand->flags & yajl_allow_partial_values))
+            {
+                yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                hand->parseError = "premature EOF";
+                return yajl_status_error;
+            }
+            return yajl_status_ok;
+    }
+}
+
+yajl_status
+yajl_do_parse(yajl_handle hand, const unsigned char * jsonText,
+              size_t jsonTextLen)
+{
+    yajl_tok tok;
+    const unsigned char * buf;
+    size_t bufLen;
+    size_t * offset = &(hand->bytesConsumed);
+
+    *offset = 0;
+
+  around_again:
+    switch (yajl_bs_current(hand->stateStack)) {
+        case yajl_state_parse_complete:
+            if (hand->flags & yajl_allow_multiple_values) {
+                yajl_bs_set(hand->stateStack, yajl_state_got_value);
+                goto around_again;
+            }
+            if (!(hand->flags & yajl_allow_trailing_garbage)) {
+                if (*offset != jsonTextLen) {
+                    tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
+                                       offset, &buf, &bufLen);
+                    if (tok != yajl_tok_eof) {
+                        yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                        hand->parseError = "trailing garbage";
+                    }
+                    goto around_again;
+                }
+            }
+            return yajl_status_ok;
+        case yajl_state_lexical_error:
+        case yajl_state_parse_error:
+            return yajl_status_error;
+        case yajl_state_start:
+        case yajl_state_got_value:
+        case yajl_state_map_need_val:
+        case yajl_state_array_need_val:
+        case yajl_state_array_start:  {
+            /* for arrays and maps, we advance the state for this
+             * depth, then push the state of the next depth.
+             * If an error occurs during the parsing of the nesting
+             * enitity, the state at this level will not matter.
+             * a state that needs pushing will be anything other
+             * than state_start */
+
+            yajl_state stateToPush = yajl_state_start;
+
+            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
+                               offset, &buf, &bufLen);
+
+            switch (tok) {
+                case yajl_tok_eof:
+                    return yajl_status_ok;
+                case yajl_tok_error:
+                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
+                    goto around_again;
+                case yajl_tok_string:
+                    if (hand->callbacks && hand->callbacks->yajl_string) {
+                        _CC_CHK(hand->callbacks->yajl_string(hand->ctx,
+                                                             buf, bufLen));
+                    }
+                    break;
+                case yajl_tok_string_with_escapes:
+                    if (hand->callbacks && hand->callbacks->yajl_string) {
+                        yajl_buf_clear(hand->decodeBuf);
+                        yajl_string_decode(hand->decodeBuf, buf, bufLen);
+                        _CC_CHK(hand->callbacks->yajl_string(
+                                    hand->ctx, yajl_buf_data(hand->decodeBuf),
+                                    yajl_buf_len(hand->decodeBuf)));
+                    }
+                    break;
+                case yajl_tok_bool:
+                    if (hand->callbacks && hand->callbacks->yajl_boolean) {
+                        _CC_CHK(hand->callbacks->yajl_boolean(hand->ctx,
+                                                              *buf == 't'));
+                    }
+                    break;
+                case yajl_tok_null:
+                    if (hand->callbacks && hand->callbacks->yajl_null) {
+                        _CC_CHK(hand->callbacks->yajl_null(hand->ctx));
+                    }
+                    break;
+                case yajl_tok_left_bracket:
+                    if (hand->callbacks && hand->callbacks->yajl_start_map) {
+                        _CC_CHK(hand->callbacks->yajl_start_map(hand->ctx));
+                    }
+                    stateToPush = yajl_state_map_start;
+                    break;
+                case yajl_tok_left_brace:
+                    if (hand->callbacks && hand->callbacks->yajl_start_array) {
+                        _CC_CHK(hand->callbacks->yajl_start_array(hand->ctx));
+                    }
+                    stateToPush = yajl_state_array_start;
+                    break;
+                case yajl_tok_integer:
+                    if (hand->callbacks) {
+                        if (hand->callbacks->yajl_number) {
+                            _CC_CHK(hand->callbacks->yajl_number(
+                                        hand->ctx,(const char *) buf, bufLen));
+                        } else if (hand->callbacks->yajl_integer) {
+                            long long int i = 0;
+                            errno = 0;
+                            i = yajl_parse_integer(buf, bufLen);
+                            if ((i == LLONG_MIN || i == LLONG_MAX) &&
+                                errno == ERANGE)
+                            {
+                                yajl_bs_set(hand->stateStack,
+                                            yajl_state_parse_error);
+                                hand->parseError = "integer overflow" ;
+                                /* try to restore error offset */
+                                if (*offset >= bufLen) *offset -= bufLen;
+                                else *offset = 0;
+                                goto around_again;
+                            }
+                            _CC_CHK(hand->callbacks->yajl_integer(hand->ctx,
+                                                                  i));
+                        }
+                    }
+                    break;
+                case yajl_tok_double:
+                    if (hand->callbacks) {
+                        if (hand->callbacks->yajl_number) {
+                            _CC_CHK(hand->callbacks->yajl_number(
+                                        hand->ctx, (const char *) buf, bufLen));
+                        } else if (hand->callbacks->yajl_double) {
+                            double d = 0.0;
+                            yajl_buf_clear(hand->decodeBuf);
+                            yajl_buf_append(hand->decodeBuf, buf, bufLen);
+                            buf = yajl_buf_data(hand->decodeBuf);
+                            errno = 0;
+                            d = strtod((char *) buf, NULL);
+                            if ((d == HUGE_VAL || d == -HUGE_VAL) &&
+                                errno == ERANGE)
+                            {
+                                yajl_bs_set(hand->stateStack,
+                                            yajl_state_parse_error);
+                                hand->parseError = "numeric (floating point) "
+                                    "overflow";
+                                /* try to restore error offset */
+                                if (*offset >= bufLen) *offset -= bufLen;
+                                else *offset = 0;
+                                goto around_again;
+                            }
+                            _CC_CHK(hand->callbacks->yajl_double(hand->ctx,
+                                                                 d));
+                        }
+                    }
+                    break;
+                case yajl_tok_right_brace: {
+                    if (yajl_bs_current(hand->stateStack) ==
+                        yajl_state_array_start)
+                    {
+                        if (hand->callbacks &&
+                            hand->callbacks->yajl_end_array)
+                        {
+                            _CC_CHK(hand->callbacks->yajl_end_array(hand->ctx));
+                        }
+                        yajl_bs_pop(hand->stateStack);
+                        goto around_again;
+                    }
+                    /* intentional fall-through */
+                }
+                case yajl_tok_colon:
+                case yajl_tok_comma:
+                case yajl_tok_right_bracket:
+                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                    hand->parseError =
+                        "unallowed token at this point in JSON text";
+                    goto around_again;
+                default:
+                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                    hand->parseError = "invalid token, internal error";
+                    goto around_again;
+            }
+            /* got a value.  transition depends on the state we're in. */
+            {
+                yajl_state s = yajl_bs_current(hand->stateStack);
+                if (s == yajl_state_start || s == yajl_state_got_value) {
+                    yajl_bs_set(hand->stateStack, yajl_state_parse_complete);
+                } else if (s == yajl_state_map_need_val) {
+                    yajl_bs_set(hand->stateStack, yajl_state_map_got_val);
+                } else {
+                    yajl_bs_set(hand->stateStack, yajl_state_array_got_val);
+                }
+            }
+            if (stateToPush != yajl_state_start) {
+                yajl_bs_push(hand->stateStack, stateToPush);
+            }
+
+            goto around_again;
+        }
+        case yajl_state_map_start:
+        case yajl_state_map_need_key: {
+            /* only difference between these two states is that in
+             * start '}' is valid, whereas in need_key, we've parsed
+             * a comma, and a string key _must_ follow */
+            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
+                               offset, &buf, &bufLen);
+            switch (tok) {
+                case yajl_tok_eof:
+                    return yajl_status_ok;
+                case yajl_tok_error:
+                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
+                    goto around_again;
+                case yajl_tok_string_with_escapes:
+                    if (hand->callbacks && hand->callbacks->yajl_map_key) {
+                        yajl_buf_clear(hand->decodeBuf);
+                        yajl_string_decode(hand->decodeBuf, buf, bufLen);
+                        buf = yajl_buf_data(hand->decodeBuf);
+                        bufLen = yajl_buf_len(hand->decodeBuf);
+                    }
+                    /* intentional fall-through */
+                case yajl_tok_string:
+                    if (hand->callbacks && hand->callbacks->yajl_map_key) {
+                        _CC_CHK(hand->callbacks->yajl_map_key(hand->ctx, buf,
+                                                              bufLen));
+                    }
+                    yajl_bs_set(hand->stateStack, yajl_state_map_sep);
+                    goto around_again;
+                case yajl_tok_right_bracket:
+                    if (yajl_bs_current(hand->stateStack) ==
+                        yajl_state_map_start)
+                    {
+                        if (hand->callbacks && hand->callbacks->yajl_end_map) {
+                            _CC_CHK(hand->callbacks->yajl_end_map(hand->ctx));
+                        }
+                        yajl_bs_pop(hand->stateStack);
+                        goto around_again;
+                    }
+                default:
+                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                    hand->parseError =
+                        "invalid object key (must be a string)";
+                    goto around_again;
+            }
+        }
+        case yajl_state_map_sep: {
+            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
+                               offset, &buf, &bufLen);
+            switch (tok) {
+                case yajl_tok_colon:
+                    yajl_bs_set(hand->stateStack, yajl_state_map_need_val);
+                    goto around_again;
+                case yajl_tok_eof:
+                    return yajl_status_ok;
+                case yajl_tok_error:
+                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
+                    goto around_again;
+                default:
+                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                    hand->parseError = "object key and value must "
+                        "be separated by a colon (':')";
+                    goto around_again;
+            }
+        }
+        case yajl_state_map_got_val: {
+            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
+                               offset, &buf, &bufLen);
+            switch (tok) {
+                case yajl_tok_right_bracket:
+                    if (hand->callbacks && hand->callbacks->yajl_end_map) {
+                        _CC_CHK(hand->callbacks->yajl_end_map(hand->ctx));
+                    }
+                    yajl_bs_pop(hand->stateStack);
+                    goto around_again;
+                case yajl_tok_comma:
+                    yajl_bs_set(hand->stateStack, yajl_state_map_need_key);
+                    goto around_again;
+                case yajl_tok_eof:
+                    return yajl_status_ok;
+                case yajl_tok_error:
+                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
+                    goto around_again;
+                default:
+                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                    hand->parseError = "after key and value, inside map, "
+                                       "I expect ',' or '}'";
+                    /* try to restore error offset */
+                    if (*offset >= bufLen) *offset -= bufLen;
+                    else *offset = 0;
+                    goto around_again;
+            }
+        }
+        case yajl_state_array_got_val: {
+            tok = yajl_lex_lex(hand->lexer, jsonText, jsonTextLen,
+                               offset, &buf, &bufLen);
+            switch (tok) {
+                case yajl_tok_right_brace:
+                    if (hand->callbacks && hand->callbacks->yajl_end_array) {
+                        _CC_CHK(hand->callbacks->yajl_end_array(hand->ctx));
+                    }
+                    yajl_bs_pop(hand->stateStack);
+                    goto around_again;
+                case yajl_tok_comma:
+                    yajl_bs_set(hand->stateStack, yajl_state_array_need_val);
+                    goto around_again;
+                case yajl_tok_eof:
+                    return yajl_status_ok;
+                case yajl_tok_error:
+                    yajl_bs_set(hand->stateStack, yajl_state_lexical_error);
+                    goto around_again;
+                default:
+                    yajl_bs_set(hand->stateStack, yajl_state_parse_error);
+                    hand->parseError =
+                        "after array element, I expect ',' or ']'";
+                    goto around_again;
+            }
+        }
+    }
+
+    //comment out by jeroen for R CMD check
+    //abort();
+    return yajl_status_error;
+}
+
diff --git a/src/yajl/yajl_parser.h b/src/yajl/yajl_parser.h
new file mode 100644
index 0000000..c79299a
--- /dev/null
+++ b/src/yajl/yajl_parser.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2007-2014, Lloyd Hilaiel <me at lloyd.io>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __YAJL_PARSER_H__
+#define __YAJL_PARSER_H__
+
+#include "api/yajl_parse.h"
+#include "yajl_bytestack.h"
+#include "yajl_buf.h"
+#include "yajl_lex.h"
+
+
+typedef enum {
+    yajl_state_start = 0,
+    yajl_state_parse_complete,
+    yajl_state_parse_error,
+    yajl_state_lexical_error,
+    yajl_state_map_start,
+    yajl_state_map_sep,
+    yajl_state_map_need_val,
+    yajl_state_map_got_val,
+    yajl_state_map_need_key,
+    yajl_state_array_start,
+    yajl_state_array_got_val,
+    yajl_state_array_need_val,
+    yajl_state_got_value,
+} yajl_state;
+
+struct yajl_handle_t {
+    const yajl_callbacks * callbacks;
+    void * ctx;
+    yajl_lexer lexer;
+    const char * parseError;
+    /* the number of bytes consumed from the last client buffer,
+     * in the case of an error this will be an error offset, in the
+     * case of an error this can be used as the error offset */
+    size_t bytesConsumed;
+    /* temporary storage for decoded strings */
+    yajl_buf decodeBuf;
+    /* a stack of states.  access with yajl_state_XXX routines */
+    yajl_bytestack stateStack;
+    /* memory allocation routines */
+    yajl_alloc_funcs alloc;
+    /* bitfield */
+    unsigned int flags;
+};
+
+yajl_status
+yajl_do_parse(yajl_handle handle, const unsigned char * jsonText,
+              size_t jsonTextLen);
+
+yajl_status
+yajl_do_finish(yajl_handle handle);
+
+unsigned char *
+yajl_render_error_string(yajl_handle hand, const unsigned char * jsonText,
+                         size_t jsonTextLen, int verbose);
+
+/* A little built in integer parsing routine with the same semantics as strtol
+ * that's unaffected by LOCALE. */
+long long
+yajl_parse_integer(const unsigned char *number, unsigned int length);
+
+
+#endif
diff --git a/src/yajl/yajl_tree.c b/src/yajl/yajl_tree.c
new file mode 100644
index 0000000..da48d21
--- /dev/null
+++ b/src/yajl/yajl_tree.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2010-2011  Florian Forster  <ff at octo.it>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "api/yajl_tree.h"
+#include "api/yajl_parse.h"
+
+#include "yajl_parser.h"
+
+/*
+#if defined(_WIN32) || defined(WIN32)
+Fix for windows XP from https://rt.cpan.org/Public/Bug/Display.html?id=69113
+*/
+#if((defined(_WIN32) || defined(WIN32)) && defined(_MSC_VER))
+/* end fix */
+
+#define snprintf sprintf_s
+#endif
+
+#define STATUS_CONTINUE 1
+#define STATUS_ABORT    0
+
+struct stack_elem_s;
+typedef struct stack_elem_s stack_elem_t;
+struct stack_elem_s
+{
+    char * key;
+    yajl_val value;
+    stack_elem_t *next;
+};
+
+struct context_s
+{
+    stack_elem_t *stack;
+    yajl_val root;
+    char *errbuf;
+    size_t errbuf_size;
+};
+typedef struct context_s context_t;
+
+#define RETURN_ERROR(ctx,retval,...) {                                  \
+        if ((ctx)->errbuf != NULL)                                      \
+            snprintf ((ctx)->errbuf, (ctx)->errbuf_size, __VA_ARGS__);  \
+        return (retval);                                                \
+    }
+
+static yajl_val value_alloc (yajl_type type)
+{
+    yajl_val v;
+
+    v = malloc (sizeof (*v));
+    if (v == NULL) return (NULL);
+    memset (v, 0, sizeof (*v));
+    v->type = type;
+
+    return (v);
+}
+
+static void yajl_object_free (yajl_val v)
+{
+    size_t i;
+
+    if (!YAJL_IS_OBJECT(v)) return;
+
+    for (i = 0; i < v->u.object.len; i++)
+    {
+        free((char *) v->u.object.keys[i]);
+        v->u.object.keys[i] = NULL;
+        yajl_tree_free (v->u.object.values[i]);
+        v->u.object.values[i] = NULL;
+    }
+
+    free((void*) v->u.object.keys);
+    free(v->u.object.values);
+    free(v);
+}
+
+static void yajl_array_free (yajl_val v)
+{
+    size_t i;
+
+    if (!YAJL_IS_ARRAY(v)) return;
+
+    for (i = 0; i < v->u.array.len; i++)
+    {
+        yajl_tree_free (v->u.array.values[i]);
+        v->u.array.values[i] = NULL;
+    }
+
+    free(v->u.array.values);
+    free(v);
+}
+
+/*
+ * Parsing nested objects and arrays is implemented using a stack. When a new
+ * object or array starts (a curly or a square opening bracket is read), an
+ * appropriate value is pushed on the stack. When the end of the object is
+ * reached (an appropriate closing bracket has been read), the value is popped
+ * off the stack and added to the enclosing object using "context_add_value".
+ */
+static int context_push(context_t *ctx, yajl_val v)
+{
+    stack_elem_t *stack;
+
+    stack = malloc (sizeof (*stack));
+    if (stack == NULL)
+        RETURN_ERROR (ctx, ENOMEM, "Out of memory");
+    memset (stack, 0, sizeof (*stack));
+
+    assert ((ctx->stack == NULL)
+            || YAJL_IS_OBJECT (v)
+            || YAJL_IS_ARRAY (v));
+
+    stack->value = v;
+    stack->next = ctx->stack;
+    ctx->stack = stack;
+
+    return (0);
+}
+
+static yajl_val context_pop(context_t *ctx)
+{
+    stack_elem_t *stack;
+    yajl_val v;
+
+    if (ctx->stack == NULL)
+        RETURN_ERROR (ctx, NULL, "context_pop: "
+                      "Bottom of stack reached prematurely");
+
+    stack = ctx->stack;
+    ctx->stack = stack->next;
+
+    v = stack->value;
+
+    free (stack);
+
+    return (v);
+}
+
+static int object_add_keyval(context_t *ctx,
+                             yajl_val obj, char *key, yajl_val value)
+{
+    const char **tmpk;
+    yajl_val *tmpv;
+
+    /* We're checking for NULL in "context_add_value" or its callers. */
+    assert (ctx != NULL);
+    assert (obj != NULL);
+    assert (key != NULL);
+    assert (value != NULL);
+
+    /* We're assuring that "obj" is an object in "context_add_value". */
+    assert(YAJL_IS_OBJECT(obj));
+
+    tmpk = realloc((void *) obj->u.object.keys, sizeof(*(obj->u.object.keys)) * (obj->u.object.len + 1));
+    if (tmpk == NULL)
+        RETURN_ERROR(ctx, ENOMEM, "Out of memory");
+    obj->u.object.keys = tmpk;
+
+    tmpv = realloc(obj->u.object.values, sizeof (*obj->u.object.values) * (obj->u.object.len + 1));
+    if (tmpv == NULL)
+        RETURN_ERROR(ctx, ENOMEM, "Out of memory");
+    obj->u.object.values = tmpv;
+
+    obj->u.object.keys[obj->u.object.len] = key;
+    obj->u.object.values[obj->u.object.len] = value;
+    obj->u.object.len++;
+
+    return (0);
+}
+
+static int array_add_value (context_t *ctx,
+                            yajl_val array, yajl_val value)
+{
+    yajl_val *tmp;
+
+    /* We're checking for NULL pointers in "context_add_value" or its
+     * callers. */
+    assert (ctx != NULL);
+    assert (array != NULL);
+    assert (value != NULL);
+
+    /* "context_add_value" will only call us with array values. */
+    assert(YAJL_IS_ARRAY(array));
+
+    tmp = realloc(array->u.array.values,
+                  sizeof(*(array->u.array.values)) * (array->u.array.len + 1));
+    if (tmp == NULL)
+        RETURN_ERROR(ctx, ENOMEM, "Out of memory");
+    array->u.array.values = tmp;
+    array->u.array.values[array->u.array.len] = value;
+    array->u.array.len++;
+
+    return 0;
+}
+
+/*
+ * Add a value to the value on top of the stack or the "root" member in the
+ * context if the end of the parsing process is reached.
+ */
+static int context_add_value (context_t *ctx, yajl_val v)
+{
+    /* We're checking for NULL values in all the calling functions. */
+    assert (ctx != NULL);
+    assert (v != NULL);
+
+    /*
+     * There are three valid states in which this function may be called:
+     *   - There is no value on the stack => This is the only value. This is the
+     *     last step done when parsing a document. We assign the value to the
+     *     "root" member and return.
+     *   - The value on the stack is an object. In this case store the key on the
+     *     stack or, if the key has already been read, add key and value to the
+     *     object.
+     *   - The value on the stack is an array. In this case simply add the value
+     *     and return.
+     */
+    if (ctx->stack == NULL)
+    {
+        assert (ctx->root == NULL);
+        ctx->root = v;
+        return (0);
+    }
+    else if (YAJL_IS_OBJECT (ctx->stack->value))
+    {
+        if (ctx->stack->key == NULL)
+        {
+            if (!YAJL_IS_STRING (v))
+                RETURN_ERROR (ctx, EINVAL, "context_add_value: "
+                              "Object key is not a string (%#04x)",
+                              v->type);
+
+            ctx->stack->key = v->u.string;
+            v->u.string = NULL;
+            free(v);
+            return (0);
+        }
+        else /* if (ctx->key != NULL) */
+        {
+            char * key;
+
+            key = ctx->stack->key;
+            ctx->stack->key = NULL;
+            return (object_add_keyval (ctx, ctx->stack->value, key, v));
+        }
+    }
+    else if (YAJL_IS_ARRAY (ctx->stack->value))
+    {
+        return (array_add_value (ctx, ctx->stack->value, v));
+    }
+    else
+    {
+        RETURN_ERROR (ctx, EINVAL, "context_add_value: Cannot add value to "
+                      "a value of type %#04x (not a composite type)",
+                      ctx->stack->value->type);
+    }
+}
+
+static int handle_string (void *ctx,
+                          const unsigned char *string, size_t string_length)
+{
+    yajl_val v;
+
+    v = value_alloc (yajl_t_string);
+    if (v == NULL)
+        RETURN_ERROR ((context_t *) ctx, STATUS_ABORT, "Out of memory");
+
+    v->u.string = malloc (string_length + 1);
+    if (v->u.string == NULL)
+    {
+        free (v);
+        RETURN_ERROR ((context_t *) ctx, STATUS_ABORT, "Out of memory");
+    }
+    memcpy(v->u.string, string, string_length);
+    v->u.string[string_length] = 0;
+
+    return ((context_add_value (ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+static int handle_number (void *ctx, const char *string, size_t string_length)
+{
+    yajl_val v;
+    char *endptr;
+
+    v = value_alloc(yajl_t_number);
+    if (v == NULL)
+        RETURN_ERROR((context_t *) ctx, STATUS_ABORT, "Out of memory");
+
+    v->u.number.r = malloc(string_length + 1);
+    if (v->u.number.r == NULL)
+    {
+        free(v);
+        RETURN_ERROR((context_t *) ctx, STATUS_ABORT, "Out of memory");
+    }
+    memcpy(v->u.number.r, string, string_length);
+    v->u.number.r[string_length] = 0;
+
+    v->u.number.flags = 0;
+
+    errno = 0;
+    v->u.number.i = yajl_parse_integer((const unsigned char *) v->u.number.r,
+                                       strlen(v->u.number.r));
+    if (errno == 0)
+        v->u.number.flags |= YAJL_NUMBER_INT_VALID;
+
+    endptr = NULL;
+    errno = 0;
+    v->u.number.d = strtod(v->u.number.r, &endptr);
+    if ((errno == 0) && (endptr != NULL) && (*endptr == 0))
+        v->u.number.flags |= YAJL_NUMBER_DOUBLE_VALID;
+
+    return ((context_add_value(ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+static int handle_start_map (void *ctx)
+{
+    yajl_val v;
+
+    v = value_alloc(yajl_t_object);
+    if (v == NULL)
+        RETURN_ERROR ((context_t *) ctx, STATUS_ABORT, "Out of memory");
+
+    v->u.object.keys = NULL;
+    v->u.object.values = NULL;
+    v->u.object.len = 0;
+
+    return ((context_push (ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+static int handle_end_map (void *ctx)
+{
+    yajl_val v;
+
+    v = context_pop (ctx);
+    if (v == NULL)
+        return (STATUS_ABORT);
+
+    return ((context_add_value (ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+static int handle_start_array (void *ctx)
+{
+    yajl_val v;
+
+    v = value_alloc(yajl_t_array);
+    if (v == NULL)
+        RETURN_ERROR ((context_t *) ctx, STATUS_ABORT, "Out of memory");
+
+    v->u.array.values = NULL;
+    v->u.array.len = 0;
+
+    return ((context_push (ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+static int handle_end_array (void *ctx)
+{
+    yajl_val v;
+
+    v = context_pop (ctx);
+    if (v == NULL)
+        return (STATUS_ABORT);
+
+    return ((context_add_value (ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+static int handle_boolean (void *ctx, int boolean_value)
+{
+    yajl_val v;
+
+    v = value_alloc (boolean_value ? yajl_t_true : yajl_t_false);
+    if (v == NULL)
+        RETURN_ERROR ((context_t *) ctx, STATUS_ABORT, "Out of memory");
+
+    return ((context_add_value (ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+static int handle_null (void *ctx)
+{
+    yajl_val v;
+
+    v = value_alloc (yajl_t_null);
+    if (v == NULL)
+        RETURN_ERROR ((context_t *) ctx, STATUS_ABORT, "Out of memory");
+
+    return ((context_add_value (ctx, v) == 0) ? STATUS_CONTINUE : STATUS_ABORT);
+}
+
+/*
+ * Public functions
+ */
+yajl_val yajl_tree_parse (const char *input,
+                          char *error_buffer, size_t error_buffer_size)
+{
+    static const yajl_callbacks callbacks =
+        {
+            /* null        = */ handle_null,
+            /* boolean     = */ handle_boolean,
+            /* integer     = */ NULL,
+            /* double      = */ NULL,
+            /* number      = */ handle_number,
+            /* string      = */ handle_string,
+            /* start map   = */ handle_start_map,
+            /* map key     = */ handle_string,
+            /* end map     = */ handle_end_map,
+            /* start array = */ handle_start_array,
+            /* end array   = */ handle_end_array
+        };
+
+    yajl_handle handle;
+    yajl_status status;
+    char * internal_err_str;
+	context_t ctx = { NULL, NULL, NULL, 0 };
+
+	ctx.errbuf = error_buffer;
+	ctx.errbuf_size = error_buffer_size;
+
+    if (error_buffer != NULL)
+        memset (error_buffer, 0, error_buffer_size);
+
+    handle = yajl_alloc (&callbacks, NULL, &ctx);
+    yajl_config(handle, yajl_allow_comments, 1);
+
+    status = yajl_parse(handle,
+                        (unsigned char *) input,
+                        strlen (input));
+
+    //fix by jeroen
+    if(status == yajl_status_ok){
+      status = yajl_complete_parse (handle);
+    }
+    //end of fix
+    if (status != yajl_status_ok) {
+        if (error_buffer != NULL && error_buffer_size > 0) {
+               internal_err_str = (char *) yajl_get_error(handle, 1,
+                     (const unsigned char *) input,
+                     strlen(input));
+             snprintf(error_buffer, error_buffer_size, "%s", internal_err_str);
+             YA_FREE(&(handle->alloc), internal_err_str);
+        }
+        yajl_free (handle);
+        return NULL;
+    }
+
+    yajl_free (handle);
+    return (ctx.root);
+}
+
+yajl_val yajl_tree_get(yajl_val n, const char ** path, yajl_type type)
+{
+    if (!path) return NULL;
+    while (n && *path) {
+        size_t i;
+        size_t len;
+
+        if (n->type != yajl_t_object) return NULL;
+        len = n->u.object.len;
+        for (i = 0; i < len; i++) {
+            if (!strcmp(*path, n->u.object.keys[i])) {
+                n = n->u.object.values[i];
+                break;
+            }
+        }
+        if (i == len) return NULL;
+        path++;
+    }
+    if (n && type != yajl_t_any && type != n->type) n = NULL;
+    return n;
+}
+
+void yajl_tree_free (yajl_val v)
+{
+    if (v == NULL) return;
+
+    if (YAJL_IS_STRING(v))
+    {
+        free(v->u.string);
+        free(v);
+    }
+    else if (YAJL_IS_NUMBER(v))
+    {
+        free(v->u.number.r);
+        free(v);
+    }
+    else if (YAJL_GET_OBJECT(v))
+    {
+        yajl_object_free(v);
+    }
+    else if (YAJL_GET_ARRAY(v))
+    {
+        yajl_array_free(v);
+    }
+    else /* if (yajl_t_true or yajl_t_false or yajl_t_null) */
+    {
+        free(v);
+    }
+}
+
+/*
+ * Stuff below added by Jeroen to support push parsing over connection interface.
+ */
+
+yajl_handle push_parser_new () {
+
+  /* init callback handlers */
+  yajl_callbacks *callbacks = malloc(sizeof(yajl_callbacks));
+  callbacks->yajl_null = handle_null;
+  callbacks->yajl_boolean = handle_boolean;
+  callbacks->yajl_number = handle_number;
+  callbacks->yajl_integer = NULL;
+  callbacks->yajl_double = NULL;
+  callbacks->yajl_string = handle_string;
+  callbacks->yajl_start_map = handle_start_map;
+  callbacks->yajl_map_key = handle_string;
+  callbacks->yajl_end_map = handle_end_map;
+  callbacks->yajl_start_array = handle_start_array;
+  callbacks->yajl_end_array = handle_end_array;
+
+  /* init context */
+  context_t *ctx = malloc(sizeof(context_t));
+  ctx->root = NULL;
+  ctx->stack = NULL;
+  ctx->errbuf = malloc(1024);
+  ctx->errbuf_size = 1024;
+
+  /* init handle */
+  yajl_handle handle = yajl_alloc(callbacks, NULL, ctx);
+  yajl_config(handle, yajl_allow_comments, 1);
+  return handle;
+}
+
+yajl_val push_parser_get(yajl_handle handle){
+  context_t *ctx = (context_t*) handle->ctx;
+  return ctx->root;
+}
diff --git a/src/yajl/yajl_version.c b/src/yajl/yajl_version.c
new file mode 100644
index 0000000..71c1450
--- /dev/null
+++ b/src/yajl/yajl_version.c
@@ -0,0 +1,7 @@
+#include <api/yajl_version.h>
+
+int yajl_version(void)
+{
+	return YAJL_VERSION;
+}
+
diff --git a/tests/run-all.R b/tests/run-all.R
new file mode 100644
index 0000000..87fe983
--- /dev/null
+++ b/tests/run-all.R
@@ -0,0 +1,7 @@
+#This file runs all unit tests on every R CMD check.
+#Comment this out to disable.
+
+library(testthat)
+
+#filter is to disable tests that rely on external servers
+test_package("jsonlite", filter="toJSON|fromJSON|libjson|serializeJSON")
diff --git a/vignettes/json-aaquickstart.Rmd b/vignettes/json-aaquickstart.Rmd
new file mode 100644
index 0000000..7724c27
--- /dev/null
+++ b/vignettes/json-aaquickstart.Rmd
@@ -0,0 +1,126 @@
+---
+Title: "Getting started with JSON and jsonlite"
+date: "`r Sys.Date()`"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Getting started with JSON and jsonlite}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+
+```{r echo=FALSE}
+library(knitr)
+opts_chunk$set(comment="")
+
+#this replaces tabs by spaces because latex-verbatim doesn't like tabs
+#no longer needed because yajl does not use tabs.
+#toJSON <- function(...){
+#  gsub("\t", "  ", jsonlite::toJSON(...), fixed=TRUE);
+#}
+```
+
+# Getting started with JSON and jsonlite
+
+The jsonlite package is a JSON parser/generator optimized for the web. Its main strength is that it implements a bidirectional mapping between JSON data and the most important R data types. Thereby we can convert between R objects and JSON without loss of type or information, and without the need for any manual data munging. This is ideal for interacting with web APIs, or to build pipelines where data structures seamlessly flow in and out of R using JSON.
+
+```{r message=FALSE}
+library(jsonlite)
+all.equal(mtcars, fromJSON(toJSON(mtcars)))
+```
+
+This vignette introduces basic concepts to get started with jsonlite. For a more detailed outline and motivation of the mapping, see: [arXiv:1403.2805](http://arxiv.org/abs/1403.2805).
+
+## Simplification
+
+Simplification is the process where JSON arrays automatically get converted from a list into a more specific R class. The `fromJSON` function has 3 arguments which control the simplification process: `simplifyVector`, `simplifyDataFrame` and `simplifyMatrix`. Each one is enabled by default.
+
+| JSON structure        | Example JSON data                                        | Simplifies to R class | Argument in fromJSON | 
+| ----------------------|----------------------------------------------------------|-----------------------|----------------------|
+| Array of primitives   | `["Amsterdam", "Rotterdam", "Utrecht", "Den Haag"]`      | Atomic Vector         | simplifyVector       | 
+| Array of objects      | `[{"name":"Erik", "age":43}, {"name":"Anna", "age":32}]` | Data Frame            | simplifyDataFrame    | 
+| Array of arrays       | `[ [1, 2, 3], [4, 5, 6] ]`                               | Matrix                | simplifyMatrix       |
+
+### Atomic Vectors
+
+When `simplifyVector` is enabled, JSON arrays containing **primitives** (strings, numbers, booleans or null) simplify into an atomic vector:
+
+```{r}
+# A JSON array of primitives
+json <- '["Mario", "Peach", null, "Bowser"]'
+
+# Simplifies into an atomic vector
+fromJSON(json)
+```
+
+Without simplification, any JSON array turns into a list: 
+
+```{r}
+# No simplification:
+fromJSON(json, simplifyVector = FALSE)
+```
+
+
+### Data Frames
+
+When `simplifyDataFrame` is enabled, JSON arrays containing **objects** (key-value pairs) simplify into a data frame:
+
+```{r}
+json <-
+'[
+  {"Name" : "Mario", "Age" : 32, "Occupation" : "Plumber"}, 
+  {"Name" : "Peach", "Age" : 21, "Occupation" : "Princess"},
+  {},
+  {"Name" : "Bowser", "Occupation" : "Koopa"}
+]'
+mydf <- fromJSON(json)
+mydf
+```
+
+The data frame gets converted back into the original JSON structure by `toJSON` (whitespace and line breaks are ignorable in JSON).
+
+```{r}
+mydf$Ranking <- c(3, 1, 2, 4)
+toJSON(mydf, pretty=TRUE)
+```
+
+Hence you can go back and forth between dataframes and JSON, without any manual data restructuring.
+
+### Matrices and Arrays
+
+When `simplifyMatrix` is enabled, JSON arrays containing **equal-length sub-arrays** simplify into a matrix (or higher order R array):
+
+```{r}
+json <- '[
+  [1, 2, 3, 4],
+  [5, 6, 7, 8],
+  [9, 10, 11, 12]
+]'
+mymatrix <- fromJSON(json)
+mymatrix
+```
+
+Again, we can use `toJSON` to convert the matrix or array back into the original JSON structure:
+
+```{r}
+toJSON(mymatrix, pretty = TRUE)
+```
+
+The simplification works for arrays of arbitrary dimensionality, as long as the dimensions match (R does not support ragged arrays).
+
+```{r}
+json <- '[
+   [[1, 2], 
+    [3, 4]],
+   [[5, 6], 
+    [7, 8]],
+   [[9, 10],
+    [11, 12]]
+]'
+myarray <- fromJSON(json)
+myarray[1, , ]
+myarray[ , ,1]
+```
+
+This is all there is to it! For a more detailed outline and motivation of the mapping, see: [arXiv:1403.2805](http://arxiv.org/abs/1403.2805).
diff --git a/vignettes/json-apis.Rmd b/vignettes/json-apis.Rmd
new file mode 100644
index 0000000..c55d0b9
--- /dev/null
+++ b/vignettes/json-apis.Rmd
@@ -0,0 +1,376 @@
+---
+title: "Fetching JSON data from REST APIs"
+date: "2015-09-06"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Fetching JSON data from REST APIs}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+
+
+This section lists some examples of public HTTP APIs that publish data in JSON format. These are great to get a sense of the complex structures that are encountered in real world JSON data. All services are free, but some require registration/authentication. Each example returns lots of data, therefore not all output is printed in this document.
+
+
+```r
+library(jsonlite)
+```
+
+## Github
+
+Github is an online code repository and has APIs to get live data on almost all activity. Below some examples from a well known R package and author:
+
+
+```r
+hadley_orgs <- fromJSON("https://api.github.com/users/hadley/orgs")
+hadley_repos <- fromJSON("https://api.github.com/users/hadley/repos")
+gg_commits <- fromJSON("https://api.github.com/repos/hadley/ggplot2/commits")
+gg_issues <- fromJSON("https://api.github.com/repos/hadley/ggplot2/issues")
+
+#latest issues
+paste(format(gg_issues$user$login), ":", gg_issues$title)
+```
+
+```
+ [1] "idavydov     : annotate(\"segment\") wrong position if limits are inverted"                      
+ [2] "ben519       : geom_polygon doesn't make NA values grey when using continuous fill"              
+ [3] "has2k1       : Fix multiple tiny issues in the position classes"                                 
+ [4] "neggert      : Problem with geom_bar position=fill and faceting"                                 
+ [5] "robertzk     : Fix typo in geom_linerange docs."                                                 
+ [6] "lionel-      : stat_bar() gets confused with numeric discrete data?"                             
+ [7] "daattali     : Request: support theme axis.ticks.length.x and axis.ticks.length.y"               
+ [8] "sethchandler : Documentation error on %+replace% ?"                                              
+ [9] "daattali     : dev version 1.0.1.9003 has some breaking changes"                                 
+[10] "lionel-      : Labels"                                                                           
+[11] "nutterb      : legend for `geom_line` colour disappears when `alpha` < 1.0"                      
+[12] "wch          : scale_name property should be removed from Scale objects"                         
+[13] "wch          : scale_details arguments in Coords should be renamed panel_scales or scale"        
+[14] "wch          : ScalesList-related functions should be moved into ggproto object"                 
+[15] "wch          : update_geom_defaults and update_stat_defaults should accept Geom and Stat objects"
+[16] "wch          : Make some ggproto objects immutable. Closes #1237"                                
+[17] "and3k        : Control size of the border and padding of geom_label"                             
+[18] "hadley       : Consistent argument order and formatting for layer functions"                     
+[19] "hadley       : Consistently handle missing values"                                               
+[20] "cmohamma     : fortify causes fatal error"                                                       
+[21] "lionel-      : Flawed `label_bquote()` implementation"                                           
+[22] "beroe        : Create alias for `colors=` in `scale_color_gradientn()`"                          
+[23] "and3k        : hjust broken in y facets"                                                         
+[24] "joranE       : Allow color bar guides for alpha scales"                                          
+[25] "hadley       : dir = \"v\" also needs to swap nrow and ncol"                                     
+[26] "joranE       : Add examples for removing guides"                                                 
+[27] "lionel-      : New approach for horizontal layers"                                               
+[28] "bbolker      : add horizontal linerange geom"                                                    
+[29] "hadley       : Write vignette about grid"                                                        
+[30] "hadley       : Immutable flag for ggproto objects"                                               
+```
+
+## CitiBike NYC
+
+A single public API that shows location, status and current availability for all stations in the New York City bike sharing imitative.
+
+
+```r
+citibike <- fromJSON("http://citibikenyc.com/stations/json")
+stations <- citibike$stationBeanList
+colnames(stations)
+```
+
+```
+ [1] "id"                    "stationName"          
+ [3] "availableDocks"        "totalDocks"           
+ [5] "latitude"              "longitude"            
+ [7] "statusValue"           "statusKey"            
+ [9] "availableBikes"        "stAddress1"           
+[11] "stAddress2"            "city"                 
+[13] "postalCode"            "location"             
+[15] "altitude"              "testStation"          
+[17] "lastCommunicationTime" "landMark"             
+```
+
+```r
+nrow(stations)
+```
+
+```
+[1] 509
+```
+
+## Ergast
+
+The Ergast Developer API is an experimental web service which provides a historical record of motor racing data for non-commercial purposes.
+
+
+```r
+res <- fromJSON('http://ergast.com/api/f1/2004/1/results.json')
+drivers <- res$MRData$RaceTable$Races$Results[[1]]$Driver
+colnames(drivers)
+```
+
+```
+[1] "driverId"        "code"            "url"             "givenName"      
+[5] "familyName"      "dateOfBirth"     "nationality"     "permanentNumber"
+```
+
+```r
+drivers[1:10, c("givenName", "familyName", "code", "nationality")]
+```
+
+```
+   givenName    familyName code nationality
+1    Michael    Schumacher  MSC      German
+2     Rubens   Barrichello  BAR   Brazilian
+3   Fernando        Alonso  ALO     Spanish
+4       Ralf    Schumacher  SCH      German
+5       Juan Pablo Montoya  MON   Colombian
+6     Jenson        Button  BUT     British
+7      Jarno        Trulli  TRU     Italian
+8      David     Coulthard  COU     British
+9     Takuma          Sato  SAT    Japanese
+10 Giancarlo    Fisichella  FIS     Italian
+```
+
+
+## ProPublica
+
+Below an example from the [ProPublica Nonprofit Explorer API](http://projects.propublica.org/nonprofits/api) where we retrieve the first 10 pages of tax-exempt organizations in the USA, ordered by revenue. The `rbind.pages` function is used to combine the pages into a single data frame.
+
+
+
+```r
+#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:10){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i), flatten=TRUE)
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)
+```
+
+```
+[1] 275
+```
+
+```r
+filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]
+```
+
+```
+                              organization.sub_name organization.city
+1                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+2                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+3                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+4  DAVIDSON COUNTY COMMUNITY COLLEGE FOUNDATION INC         LEXINGTON
+5                       KAISER FOUNDATION HOSPITALS           OAKLAND
+6                       KAISER FOUNDATION HOSPITALS           OAKLAND
+7                       KAISER FOUNDATION HOSPITALS           OAKLAND
+8                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+9                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+10                  PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+    totrevenue
+1  42346486950
+2  40148558254
+3  37786011714
+4  30821445312
+5  20013171194
+6  18543043972
+7  17980030355
+8  10619215354
+9  10452560305
+10  9636630380
+```
+
+
+## New York Times
+
+The New York Times has several APIs as part of the NYT developer network. These interface to data from various departments, such as news articles, book reviews, real estate, etc. Registration is required (but free) and a key can be obtained at [here](http://developer.nytimes.com/docs/reference/keys). The code below includes some example keys for illustration purposes.
+
+
+```r
+#search for articles
+article_key <- "&api-key=c2fede7bd9aea57c898f538e5ec0a1ee:6:68700045"
+url <- "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=obamacare+socialism"
+req <- fromJSON(paste0(url, article_key))
+articles <- req$response$docs
+colnames(articles)
+```
+
+```
+ [1] "web_url"          "snippet"          "lead_paragraph"  
+ [4] "abstract"         "print_page"       "blog"            
+ [7] "source"           "multimedia"       "headline"        
+[10] "keywords"         "pub_date"         "document_type"   
+[13] "news_desk"        "section_name"     "subsection_name" 
+[16] "byline"           "type_of_material" "_id"             
+[19] "word_count"      
+```
+
+```r
+#search for best sellers
+bestseller_key <- "&api-key=5e260a86a6301f55546c83a47d139b0d:3:68700045"
+url <- "http://api.nytimes.com/svc/books/v2/lists/overview.json?published_date=2013-01-01"
+req <- fromJSON(paste0(url, bestseller_key))
+bestsellers <- req$results$list
+category1 <- bestsellers[[1, "books"]]
+subset(category1, select = c("author", "title", "publisher"))
+```
+
+```
+           author                title                  publisher
+1   Gillian Flynn            GONE GIRL           Crown Publishing
+2    John Grisham        THE RACKETEER Knopf Doubleday Publishing
+3       E L James FIFTY SHADES OF GREY Knopf Doubleday Publishing
+4 Nicholas Sparks           SAFE HAVEN   Grand Central Publishing
+5  David Baldacci        THE FORGOTTEN   Grand Central Publishing
+```
+
+```r
+#movie reviews
+movie_key <- "&api-key=5a3daaeee6bbc6b9df16284bc575e5ba:0:68700045"
+url <- "http://api.nytimes.com/svc/movies/v2/reviews/dvd-picks.json?order=by-date"
+req <- fromJSON(paste0(url, movie_key))
+reviews <- req$results
+colnames(reviews)
+```
+
+```
+ [1] "nyt_movie_id"     "display_title"    "sort_name"       
+ [4] "mpaa_rating"      "critics_pick"     "thousand_best"   
+ [7] "byline"           "headline"         "capsule_review"  
+[10] "summary_short"    "publication_date" "opening_date"    
+[13] "dvd_release_date" "date_updated"     "seo_name"        
+[16] "link"             "related_urls"     "multimedia"      
+```
+
+```r
+reviews[1:5, c("display_title", "byline", "mpaa_rating")]
+```
+
+```
+       display_title         byline mpaa_rating
+1    Tom at the Farm Stephen Holden          NR
+2     A Little Chaos Stephen Holden           R
+3           Big Game   Andy Webster        PG13
+4          Balls Out   Andy Webster           R
+5 Mad Max: Fury Road    A. O. Scott           R
+```
+
+## CrunchBase
+
+CrunchBase is the free database of technology companies, people, and investors that anyone can edit.
+
+
+```r
+key <- "f6dv6cas5vw7arn5b9d7mdm3"
+res <- fromJSON(paste0("http://api.crunchbase.com/v/1/search.js?query=R&api_key=", key))
+head(res$results)
+```
+
+## Sunlight Foundation
+
+The Sunlight Foundation is a non-profit that helps to make government transparent and accountable through data, tools, policy and journalism. Register a free key at [here](http://sunlightfoundation.com/api/accounts/register/). An example key is provided.
+
+
+```r
+key <- "&apikey=39c83d5a4acc42be993ee637e2e4ba3d"
+
+#Find bills about drones
+drone_bills <- fromJSON(paste0("http://openstates.org/api/v1/bills/?q=drone", key))
+drone_bills$title <- substring(drone_bills$title, 1, 40)
+print(drone_bills[1:5, c("title", "state", "chamber", "type")])
+```
+
+```
+                                     title state chamber type
+1                            WILDLIFE-TECH    il   lower bill
+2 Criminalizes the unlawful use of an unma    ny   lower bill
+3 Criminalizes the unlawful use of an unma    ny   lower bill
+4 Relating to: criminal procedure and prov    wi   lower bill
+5 Relating to: criminal procedure and prov    wi   upper bill
+```
+
+```r
+#Congress mentioning "constitution"
+res <- fromJSON(paste0("http://capitolwords.org/api/1/dates.json?phrase=immigration", key))
+wordcount <- res$results
+wordcount$day <- as.Date(wordcount$day)
+summary(wordcount)
+```
+
+```
+     count              day               raw_count      
+ Min.   :   1.00   Min.   :1996-01-02   Min.   :   1.00  
+ 1st Qu.:   3.00   1st Qu.:2001-01-22   1st Qu.:   3.00  
+ Median :   8.00   Median :2005-11-16   Median :   8.00  
+ Mean   :  25.27   Mean   :2005-10-02   Mean   :  25.27  
+ 3rd Qu.:  21.00   3rd Qu.:2010-05-12   3rd Qu.:  21.00  
+ Max.   :1835.00   Max.   :2015-08-05   Max.   :1835.00  
+```
+
+```r
+#Local legislators
+legislators <- fromJSON(paste0("http://congress.api.sunlightfoundation.com/",
+  "legislators/locate?latitude=42.96&longitude=-108.09", key))
+subset(legislators$results, select=c("last_name", "chamber", "term_start", "twitter_id"))
+```
+
+```
+  last_name chamber term_start      twitter_id
+1    Lummis   house 2015-01-06   CynthiaLummis
+2      Enzi  senate 2015-01-06     SenatorEnzi
+3  Barrasso  senate 2013-01-03 SenJohnBarrasso
+```
+
+## Twitter
+
+The twitter API requires OAuth2 authentication. Some example code:
+
+
+```r
+#Create your own appication key at https://dev.twitter.com/apps
+consumer_key = "EZRy5JzOH2QQmVAe9B4j2w";
+consumer_secret = "OIDC4MdfZJ82nbwpZfoUO4WOLTYjoRhpHRAWj6JMec";
+
+#Use basic auth
+library(httr)
+secret <- RCurl::base64(paste(consumer_key, consumer_secret, sep = ":"));
+req <- POST("https://api.twitter.com/oauth2/token",
+  add_headers(
+    "Authorization" = paste("Basic", secret),
+    "Content-Type" = "application/x-www-form-urlencoded;charset=UTF-8"
+  ),
+  body = "grant_type=client_credentials"
+);
+
+#Extract the access token
+token <- paste("Bearer", content(req)$access_token)
+
+#Actual API call
+url <- "https://api.twitter.com/1.1/statuses/user_timeline.json?count=10&screen_name=Rbloggers"
+req <- GET(url, add_headers(Authorization = token))
+json <- content(req, as = "text")
+tweets <- fromJSON(json)
+substring(tweets$text, 1, 100)
+```
+
+```
+ [1] "Analysing longitudinal data: Multilevel growth models (II) http://t.co/unUxszG7VJ #rstats"           
+ [2] "RcppDE 0.1.4 http://t.co/3qPhFzoOpj #rstats"                                                         
+ [3] "Minimalist Maps http://t.co/fpkNznuCoX #rstats"                                                      
+ [4] "Tutorials freely available of course I taught: including ggplot2, dplyr and shiny http://t.co/WsxX4U"
+ [5] "Deploying Shiny apps with shinyapps.io http://t.co/tjef1pbKLt #rstats"                               
+ [6] "Bootstrap Evaluation of Clusters http://t.co/EbY7ziKCz5 #rstats"                                     
+ [7] "Add external code to Rmarkdown http://t.co/RCJEmS8gyP #rstats"                                       
+ [8] "Linear models with weighted observations http://t.co/pUoHpvxAGC #rstats"                             
+ [9] "dplyr 0.4.3 http://t.co/ze3zc8t7qj #rstats"                                                          
+[10] "xkcd survey and the power to shape the internet http://t.co/vNaKhxWxE4 #rstats"                      
+```
+
diff --git a/vignettes/json-apis.Rmd.orig b/vignettes/json-apis.Rmd.orig
new file mode 100644
index 0000000..da5df05
--- /dev/null
+++ b/vignettes/json-apis.Rmd.orig
@@ -0,0 +1,184 @@
+---
+title: "Fetching JSON data from REST APIs"
+date: "`r Sys.Date()`"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Fetching JSON data from REST APIs}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+```{r echo=FALSE}
+library(knitr)
+opts_chunk$set(comment="")
+
+#this replaces tabs by spaces because latex-verbatim doesn't like tabs
+#no longer needed with yajl
+#toJSON <- function(...){
+#  gsub("\t", "  ", jsonlite::toJSON(...), fixed=TRUE);
+#}
+```
+
+This section lists some examples of public HTTP APIs that publish data in JSON format. These are great to get a sense of the complex structures that are encountered in real world JSON data. All services are free, but some require registration/authentication. Each example returns lots of data, therefore not all output is printed in this document.
+
+```{r message=FALSE}
+library(jsonlite)
+```
+
+## Github
+
+Github is an online code repository and has APIs to get live data on almost all activity. Below some examples from a well known R package and author:
+
+```{r}
+hadley_orgs <- fromJSON("https://api.github.com/users/hadley/orgs")
+hadley_repos <- fromJSON("https://api.github.com/users/hadley/repos")
+gg_commits <- fromJSON("https://api.github.com/repos/hadley/ggplot2/commits")
+gg_issues <- fromJSON("https://api.github.com/repos/hadley/ggplot2/issues")
+
+#latest issues
+paste(format(gg_issues$user$login), ":", gg_issues$title)
+```
+
+## CitiBike NYC
+
+A single public API that shows location, status and current availability for all stations in the New York City bike sharing imitative.
+
+```{r}
+citibike <- fromJSON("http://citibikenyc.com/stations/json")
+stations <- citibike$stationBeanList
+colnames(stations)
+nrow(stations)
+```
+
+## Ergast
+
+The Ergast Developer API is an experimental web service which provides a historical record of motor racing data for non-commercial purposes.
+
+```{r}
+res <- fromJSON('http://ergast.com/api/f1/2004/1/results.json')
+drivers <- res$MRData$RaceTable$Races$Results[[1]]$Driver
+colnames(drivers)
+drivers[1:10, c("givenName", "familyName", "code", "nationality")]
+```
+
+
+## ProPublica
+
+Below an example from the [ProPublica Nonprofit Explorer API](http://projects.propublica.org/nonprofits/api) where we retrieve the first 10 pages of tax-exempt organizations in the USA, ordered by revenue. The `rbind.pages` function is used to combine the pages into a single data frame.
+
+
+```{r, message=FALSE}
+#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:10){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i), flatten=TRUE)
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)
+filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]
+```
+
+
+## New York Times
+
+The New York Times has several APIs as part of the NYT developer network. These interface to data from various departments, such as news articles, book reviews, real estate, etc. Registration is required (but free) and a key can be obtained at [here](http://developer.nytimes.com/docs/reference/keys). The code below includes some example keys for illustration purposes.
+
+```{r}
+#search for articles
+article_key <- "&api-key=c2fede7bd9aea57c898f538e5ec0a1ee:6:68700045"
+url <- "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=obamacare+socialism"
+req <- fromJSON(paste0(url, article_key))
+articles <- req$response$docs
+colnames(articles)
+
+#search for best sellers
+bestseller_key <- "&api-key=5e260a86a6301f55546c83a47d139b0d:3:68700045"
+url <- "http://api.nytimes.com/svc/books/v2/lists/overview.json?published_date=2013-01-01"
+req <- fromJSON(paste0(url, bestseller_key))
+bestsellers <- req$results$list
+category1 <- bestsellers[[1, "books"]]
+subset(category1, select = c("author", "title", "publisher"))
+
+#movie reviews
+movie_key <- "&api-key=5a3daaeee6bbc6b9df16284bc575e5ba:0:68700045"
+url <- "http://api.nytimes.com/svc/movies/v2/reviews/dvd-picks.json?order=by-date"
+req <- fromJSON(paste0(url, movie_key))
+reviews <- req$results
+colnames(reviews)
+reviews[1:5, c("display_title", "byline", "mpaa_rating")]
+
+```
+
+## CrunchBase
+
+CrunchBase is the free database of technology companies, people, and investors that anyone can edit.
+
+```{r eval=FALSE}
+key <- "f6dv6cas5vw7arn5b9d7mdm3"
+res <- fromJSON(paste0("http://api.crunchbase.com/v/1/search.js?query=R&api_key=", key))
+head(res$results)
+```
+
+## Sunlight Foundation
+
+The Sunlight Foundation is a non-profit that helps to make government transparent and accountable through data, tools, policy and journalism. Register a free key at [here](http://sunlightfoundation.com/api/accounts/register/). An example key is provided.
+
+```{r}
+key <- "&apikey=39c83d5a4acc42be993ee637e2e4ba3d"
+
+#Find bills about drones
+drone_bills <- fromJSON(paste0("http://openstates.org/api/v1/bills/?q=drone", key))
+drone_bills$title <- substring(drone_bills$title, 1, 40)
+print(drone_bills[1:5, c("title", "state", "chamber", "type")])
+
+#Congress mentioning "constitution"
+res <- fromJSON(paste0("http://capitolwords.org/api/1/dates.json?phrase=immigration", key))
+wordcount <- res$results
+wordcount$day <- as.Date(wordcount$day)
+summary(wordcount)
+
+#Local legislators
+legislators <- fromJSON(paste0("http://congress.api.sunlightfoundation.com/",
+  "legislators/locate?latitude=42.96&longitude=-108.09", key))
+subset(legislators$results, select=c("last_name", "chamber", "term_start", "twitter_id"))
+```
+
+## Twitter
+
+The twitter API requires OAuth2 authentication. Some example code:
+
+```{r}
+#Create your own appication key at https://dev.twitter.com/apps
+consumer_key = "EZRy5JzOH2QQmVAe9B4j2w";
+consumer_secret = "OIDC4MdfZJ82nbwpZfoUO4WOLTYjoRhpHRAWj6JMec";
+
+#Use basic auth
+library(httr)
+secret <- RCurl::base64(paste(consumer_key, consumer_secret, sep = ":"));
+req <- POST("https://api.twitter.com/oauth2/token",
+  add_headers(
+    "Authorization" = paste("Basic", secret),
+    "Content-Type" = "application/x-www-form-urlencoded;charset=UTF-8"
+  ),
+  body = "grant_type=client_credentials"
+);
+
+#Extract the access token
+token <- paste("Bearer", content(req)$access_token)
+
+#Actual API call
+url <- "https://api.twitter.com/1.1/statuses/user_timeline.json?count=10&screen_name=Rbloggers"
+req <- GET(url, add_headers(Authorization = token))
+json <- content(req, as = "text")
+tweets <- fromJSON(json)
+substring(tweets$text, 1, 100)
+```
+
diff --git a/vignettes/json-mapping.Rnw.orig b/vignettes/json-mapping.Rnw.orig
new file mode 100644
index 0000000..3d9fd44
--- /dev/null
+++ b/vignettes/json-mapping.Rnw.orig
@@ -0,0 +1,583 @@
+%\VignetteEngine{knitr::knitr}
+%\VignetteIndexEntry{A mapping between JSON data and R objects}
+
+<<echo=FALSE>>=
+#For JSS
+#opts_chunk$set(prompt=TRUE, highlight=FALSE, background="white")
+#options(prompt = "R> ", continue = "+  ", width = 70, useFancyQuotes = FALSE)
+@
+
+%This is a template.
+%Actual text goes in sources/content.Rnw
+\documentclass{article}
+\author{Jeroen Ooms}
+
+%useful packages
+\usepackage{url}
+\usepackage{fullpage}
+\usepackage{xspace}
+\usepackage{booktabs}
+\usepackage{enumitem}
+\usepackage[hidelinks]{hyperref}
+\usepackage[round]{natbib}
+\usepackage{fancyvrb}
+\usepackage[toc,page]{appendix}
+\usepackage{breakurl}
+
+%for table positioning
+\usepackage{float}
+\restylefloat{table}
+
+%support for accents
+\usepackage[utf8]{inputenc}
+
+%support for ascii art
+\usepackage{pmboxdraw}
+
+%use vspace instead of indentation for paragraphs
+\usepackage{parskip}
+
+%extra line spacing
+\usepackage{setspace}
+\setstretch{1.25}
+
+%knitr style verbatim blocks
+\newenvironment{codeblock}{
+  \VerbatimEnvironment
+  \definecolor{shadecolor}{rgb}{0.95, 0.95, 0.95}\color{fgcolor}
+  \color{black}
+  \begin{kframe}
+  \begin{BVerbatim}
+}{
+  \end{BVerbatim}
+  \end{kframe}
+}
+
+%placeholders for JSS/RJournal
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\proglang}[1]{\texttt{#1}}
+
+%shorthands
+\newcommand{\JSON}{\texttt{JSON}\xspace}
+\newcommand{\R}{\proglang{R}\xspace}
+\newcommand{\C}{\proglang{C}\xspace}
+\newcommand{\toJSON}{\code{toJSON}\xspace}
+\newcommand{\fromJSON}{\code{fromJSON}\xspace}
+\newcommand{\XML}{\pkg{XML}\xspace}
+\newcommand{\jsonlite}{\pkg{jsonlite}\xspace}
+\newcommand{\RJSONIO}{\pkg{RJSONIO}\xspace}
+\newcommand{\API}{\texttt{API}\xspace}
+\newcommand{\JavaScript}{\proglang{JavaScript}\xspace}
+
+
+%trick for using same content file as chatper and article
+\newcommand{\maintitle}[1]{
+  \title{#1}
+  \maketitle
+}
+
+%actual document
+\begin{document}
+
+
+\maintitle{The \jsonlite Package: A Practical and Consistent Mapping Between \JSON Data and \R Objects}
+
+<<echo=FALSE, message=FALSE>>=
+library(jsonlite)
+library(knitr)
+opts_chunk$set(comment="")
+
+#this replaces tabs by spaces because latex-verbatim doesn't like tabs
+toJSON <- function(...){
+  gsub("\t", "  ", jsonlite::toJSON(...), fixed=TRUE);
+}
+@
+
+\begin{abstract}
+A naive realization of \JSON data in \R maps \JSON \emph{arrays} to an unnamed list, and \JSON \emph{objects} to a named list. However, in practice a list is an awkward, inefficient type to store and manipulate data. Most statistical applications work with (homogeneous) vectors, matrices or data frames. Therefore \JSON packages in \R typically define certain special cases of \JSON structures which map to simpler \R types. Currently no formal guidelines or consensus exists on how \R data  [...]
+\end{abstract}
+
+
+\section{Introduction}
+
+\emph{JavaScript Object Notation} (\JSON) is a text format for the serialization of structured data \citep{crockford2006application}. It is derived from the object literals of \proglang{JavaScript}, as defined in the \proglang{ECMAScript} programming language standard \citep{ecma1999262}. Design of \JSON is simple and concise in comparison with other text based formats, and it was originally proposed by Douglas Crockford as a ``fat-free alternative to \XML'' \citep{crockford2006json}. Th [...]
+
+The emphasis of this paper is not on discussing the \JSON format or any particular implementation for using \JSON with \R.  We refer to \cite{nolan2014xml} for a comprehensive introduction, or one of the many tutorials available on the web. Instead we take a high level view and discuss how \R data structures are most naturally represented in \JSON. This is not a trivial problem, particularly for complex or relational data as they frequently appear in statistical applications. Several \R  [...]
+
+%When relying on \JSON as the data interchange format, the mapping between \R objects and \JSON data must be consistent and unambiguous. Clients relying on \JSON to get data in and out of \R must know exactly what to expect in order to facilitate reliable communication, even if the data themselves are dynamic. Similarly, \R code using dynamic \JSON data from an external source is only reliable when the conversion from \JSON to \R is consistent. This document attempts to take away some of [...]
+
+\subsection{Parsing and type safety}
+
+The \JSON format specifies 4 primitive types (\texttt{string}, \texttt{number}, \texttt{boolean}, \texttt{null}) and two \emph{universal structures}:
+
+\begin{itemize} %[itemsep=3pt, topsep=5pt]
+  \item A \JSON \emph{object}: an unordered collection of zero or more name-value
+   pairs, where a name is a string and a value is a string, number,
+   boolean, null, object, or array.
+  \item A \JSON \emph{array}: an ordered sequence of zero or more values.
+\end{itemize}
+
+\noindent Both these structures are heterogeneous; i.e. they are allowed to contain elements of different types. Therefore, the native \R realization of these structures is a \texttt{named list} for \JSON objects, and \texttt{unnamed list} for \JSON arrays. However, in practice a list is an awkward, inefficient type to store and manipulate data in \R.  Most statistical applications work with (homogeneous) vectors, matrices or data frames. In order to give these data structures a \JSON re [...]
+
+<<>>=
+txt <- '[12, 3, 7]'
+x <- fromJSON(txt)
+is(x)
+print(x)
+@
+
+This seems very reasonable and it is the only practical solution to represent vectors in \JSON. However the price we pay is that automatic simplification can compromise type-safety in the context of dynamic data. For example, suppose an \R package uses \fromJSON to pull data from a \JSON \API on the web and that for some particular combination of parameters the result includes a \texttt{null} value, e.g: \texttt{[12, null, 7]}. This is actually quite common, many \API's use \texttt{null} [...]
+
+The lesson here is that we need to be very specific and explicit about the mapping that is implemented to convert between \JSON data and \R objects. When relying on \JSON as a data interchange format, the behavior of the parser must be consistent and unambiguous. Clients relying on \JSON to get data in and out of \R must know exactly what to expect in order to facilitate reliable communication, even if the content of the data is dynamic. Similarly, \R code using dynamic \JSON data from a [...]
+
+% \subsection{A Bidirectional Mapping}
+%
+% - bidirectional: one-to-one correspondence between JSON and \R classes with minimal coersing.
+% - relation is functional in each direction: json interface to \R objects, and \R objects can be used to manipulate a JSON structure.
+% - Results in unique coupling between json and objects that makes it natural to manipulate JSON in \R, and access \R objects from their JSON representation.
+% - Mild assumption of consistency.
+% - Supported classes: vectors of type numeric, character, logical, data frame and matrix.
+% - Natural class is implicit in the structure, rather than explicitly encode using metadata.
+% - Will show examples of why this is powerful.
+
+\subsection[Reference implementation: the jsonlite package]{Reference implementation: the \jsonlite package}
+
+The \jsonlite package provides a reference implementation of the conventions proposed in this document. It is a fork of the \RJSONIO package by Duncan Temple Lang, which builds on \texttt{libjson} \texttt{C++} library from Jonathan Wallace. \jsonlite uses the parser from \RJSONIO, but the \R code has been rewritten from scratch. Both packages implement \toJSON and \fromJSON functions, but their output is quite different. Finally, the \jsonlite package contains a large set of unit tests t [...]
+
+<<eval=FALSE>>=
+library(testthat)
+test_package("jsonlite")
+@
+
+Note that even though \JSON allows for inserting arbitrary white space and indentation, the unit tests assume that white space is trimmed.
+
+\subsection{Class-based versus type-based encoding}
+\label{serializejson}
+
+The \jsonlite package actually implements two systems for translating between \R objects and \JSON. This document focuses on the \toJSON and \fromJSON functions which use \R's class-based method dispatch. For all of the common classes in \R, the \jsonlite package implements \toJSON methods as described in this document. Users in \R can extend this system by implementing additional methods for other classes. This also means that classes that do not have the \toJSON method defined are not  [...]
+
+The alternative to class-based method dispatch is to use type-based encoding, which \jsonlite implements in the functions \texttt{serializeJSON} and \code{unserializeJSON}. All data structures in \R get stored in memory using one of the internal \texttt{SEXP} storage types, and \code{serializeJSON} defines an encoding schema which captures the type, value, and attributes for each storage type. The resulting \JSON closely resembles the internal structure of the underlying \C data types, a [...]
+
+\subsection{Scope and limitations}
+
+Before continuing, we want to stress some limitations of encoding \R data structures in \JSON. Most importantly, there are limitations to the types of objects that can be represented. In general, temporary in-memory properties such as connections, file descriptors and (recursive) memory references are always difficult if not impossible to store in a sensible way, regardless of the language or serialization method. This document focuses on the common \R classes that hold \emph{data}, such [...]
+
+Then there are limitations introduced by the format. Because \JSON is a human readable, text-based format, it does not support binary data, and numbers are stored in their decimal notation. The latter leads to loss of precision for real numbers, depending on how many digits the user decides to print. Several dialects of \JSON exists such as \texttt{BSON} \citep{chodorow2013mongodb} or \texttt{MSGPACK} \citep{msgpack}, which extend the format with various binary types. However, these form [...]
+
+Finally, as mentioned earlier, \fromJSON is not a perfect inverse function of \toJSON, as is the case for \code{serialializeJSON} and \code{unserializeJSON}. The class based mappings are designed for concise and practical encoding of the various common data structures. Our implementation of \toJSON and \fromJSON approximates a reversible mapping between \R objects and \JSON for the standard data classes, but there are always limitations and edge cases. For example, the \JSON representati [...]
+
+% \subsection{Goals: Consistent and Practical}
+%
+% It can be helpful to see the problem from both sides. The \R user needs to interface external \JSON data from within \R.  This includes reading data from a public source/API, or posting a specific \JSON structure to an online service. From perspective of the \R user, \JSON data should be realized in \R using classes which are most natural in \R for a particular structure. A proper mapping is one which allows the \R user to read any incoming data or generate a specific \JSON structures  [...]
+%
+% Both sides come together in the context of an RPC service such as OpenCPU. OpenCPU exposes a HTTP API to let 3rd party clients call \R functions over HTTP. The function arguments are posted using \JSON and OpenCPU automatically converts these into \R objects to construct the \R function call. The return value of the function is then converted to \JSON and sent back to the client. To the client, the service works as a \JSON API, but it is implemented as standard \R function uses standar [...]
+%
+% \begin{itemize}
+%   \item{Recognize and comply with existing conventions of encoding common data structures in \JSON, in particular (relational) data sets.}
+%   \item{Consistently use a particular schema for a class of objects, including edge cases.}
+%   \item{Avoid R-specific peculiarities to minimize opportunities for misinterpretation.}
+%   \item{Mapping should optimally be reversible, but at least coercible for the standard classes.}
+%   \item{Robustness principle: be strict on output but tolerant on input.}
+% \end{itemize}
+
+
+\section[Converting between JSON data and R classes]{Converting between \JSON data and \R classes}
+
+This section lists examples of how the common \R classes are represented in \JSON. As explained before, the \toJSON function relies on method dispatch, which means that objects get encoded according to their \texttt{class} attribute. If an object has multiple \texttt{class} values, \R uses the first occurring class which has a \toJSON method. If none of the classes of an object has a \toJSON method, an error is raised.
+
+\subsection{Atomic vectors}
+
+The most basic data type in \R is the atomic vector. Atomic vectors hold an ordered, homogeneous set of values of type \texttt{logical} (booleans), \texttt{character} (strings), \texttt{raw} (bytes), \texttt{numeric} (doubles), \texttt{complex} (complex numbers with a real and imaginary part), or \texttt{integer}. Because \R is fully vectorized, there is no user level notion of a primitive: a scalar value is considered a vector of length 1. Atomic vectors map to \JSON arrays:
+
+<<>>=
+x <- c(1, 2, pi)
+toJSON(x)
+@
+
+The \JSON array is the only appropriate structure to encode a vector, even though vectors in \R are homogeneous, whereas the \JSON array is actually heterogeneous, but \JSON does not make this distinction.
+
+\subsubsection{Missing values}
+
+A typical domain specific problem when working with statistical data is presented by missing values: a concept foreign to many other languages. Besides regular values, each vector type in \R except for \texttt{raw} can hold \texttt{NA} as a value. Vectors of type \texttt{double} and \texttt{complex} define three additional types of non finite values: \texttt{NaN}, \texttt{Inf} and \texttt{-Inf}. The \JSON format does not natively support any of these types; therefore such values values n [...]
+
+<<>>=
+x <- c(TRUE, FALSE, NA)
+toJSON(x)
+@
+
+The other option is to encode missing values as strings by wrapping them in double quotes:
+
+<<>>=
+x <- c(1,2,NA,NaN,Inf,10)
+toJSON(x)
+@
+
+Both methods result in valid \JSON, but both have a limitation: the problem with the \texttt{null} type is that it is impossible to distinguish between different types of missing data, which could be a problem for numeric vectors. The values \texttt{Inf}, \texttt{-Inf}, \texttt{NA} and \texttt{NaN} carry different meanings, and these should not get lost in the encoding. The problem with encoding missing values as strings is that this method can not be used for character vectors, because  [...]
+
+\begin{itemize}
+ \item Missing values in non-numeric vectors (\texttt{logical}, \texttt{character}) are encoded as \texttt{null}.
+ \item Missing values in numeric vectors (\texttt{double}, \texttt{integer}, \texttt{complex}) are encoded as strings.
+\end{itemize}
+
+We expect that these conventions are most likely to result in the correct interpretation of missing values. Some examples:
+
+<<>>=
+toJSON(c(TRUE, NA, NA, FALSE))
+toJSON(c("FOO", "BAR", NA, "NA"))
+toJSON(c(3.14, NA, NaN, 21, Inf, -Inf))
+
+#Non-default behavior
+toJSON(c(3.14, NA, NaN, 21, Inf, -Inf), na="null")
+@
+
+\subsubsection{Special vector types: dates, times, factor, complex}
+
+Besides missing values, \JSON also lacks native support for some of the basic vector types in \R that frequently appear in data sets. These include vectors of class \texttt{Date}, \texttt{POSIXt} (timestamps), \texttt{factors} and \texttt{complex} vectors. By default, the \jsonlite package coerces these types to strings (using \texttt{as.character}):
+
+<<>>=
+toJSON(Sys.time() + 1:3)
+toJSON(as.Date(Sys.time()) + 1:3)
+toJSON(factor(c("foo", "bar", "foo")))
+toJSON(complex(real=runif(3), imaginary=rnorm(3)))
+@
+
+When parsing such \JSON strings, these values will appear as character vectors. In order to obtain the original types, the user needs to manually coerce them back to the desired type using the corresponding \texttt{as} function, e.g. \code{as.POSIXct}, \code{as.Date}, \code{as.factor} or \code{as.complex}. In this respect, \JSON is subject to the same limitations as text based formats such as \texttt{CSV}.
+
+\subsubsection{Special cases: vectors of length 0 or 1}
+
+Two edge cases deserve special attention: vectors of length 0 and vectors of length 1. In \jsonlite these are encoded respectively as an empty array, and an array of length 1:
+
+<<>>=
+#vectors of length 0 and 1
+toJSON(vector())
+toJSON(pi)
+
+#vectors of length 0 and 1 in a named list
+toJSON(list(foo=vector()))
+toJSON(list(foo=pi))
+
+#vectors of length 0 and 1 in an unnamed list
+toJSON(list(vector()))
+toJSON(list(pi))
+@
+
+This might seem obvious but these cases result in very different behavior between different \JSON packages. This is probably caused by the fact that \R does not have a scalar type, and some package authors decided to treat vectors of length 1 as if they were a scalar. For example, in the current implementations, both \RJSONIO and \pkg{rjson} encode a vector of length one as a \JSON primitive when it appears within a list:
+
+<<>>=
+# Other packages make different choices:
+cat(rjson::toJSON(list(n = c(1))))
+cat(rjson::toJSON(list(n = c(1, 2))))
+@
+
+When encoding a single dataset this seems harmless, but in the context of dynamic data this inconsistency is almost guaranteed to cause bugs. For example, imagine an \R web service which lets the user fit a linear model and sends back the fitted parameter estimates as a \JSON array. The client code then parses the \JSON, and iterates over the array of coefficients to display them in a \texttt{GUI}. All goes well, until the user decides to fit a model with only one predictor. If the \JSON [...]
+
+\subsection{Matrices}
+
+Arguably one of the strongest sides of \R is its ability to interface libraries for basic linear algebra subprograms \citep{lawson1979basic} such as \texttt{LAPACK} \citep{anderson1999lapack}. These libraries provide well tuned, high performance implementations of important linear algebra operations to calculate anything from inner products and eigen values to singular value decompositions, which are in turn building blocks of statistical methods such as linear regression or principal co [...]
+
+<<>>=
+x <- matrix(1:12, nrow=3, ncol=4)
+print(x)
+print(x[2,4])
+@
+
+ A matrix is stored in memory as a single atomic vector with an attribute called \texttt{"dim"} defining the dimensions of the matrix. The product of the dimensions is equal to the length of the vector.
+
+<<>>=
+attributes(volcano)
+length(volcano)
+@
+
+ Even though the matrix is stored as a single vector, the way it is printed and indexed makes it conceptually a 2 dimensional structure. In \jsonlite a matrix maps to an array of equal-length subarrays:
+
+<<>>=
+x <- matrix(1:12, nrow=3, ncol=4)
+toJSON(x)
+@
+
+We expect this representation will be the most intuitive to interpret, also within languages that do not have a native notion of a matrix. Note that even though \R stores matrices in \emph{column major} order, \jsonlite encodes matrices in \emph{row major} order. This is a more conventional and intuitive way to represent matrices and is consistent with the row-based encoding of data frames discussed in the next section. When the \JSON string is properly indented (recall that white space  [...]
+
+\begin{verbatim}
+[ [ 1, 4, 7, 10 ],
+  [ 2, 5, 8, 11 ],
+  [ 3, 6, 9, 12 ] ]
+\end{verbatim}
+
+ Because the matrix is implemented in \R as an atomic vector, it automatically inherits the conventions mentioned earlier with respect to edge cases and missing values:
+
+<<>>=
+x <- matrix(c(1,2,4,NA), nrow=2)
+toJSON(x)
+toJSON(x, na="null")
+toJSON(matrix(pi))
+@
+
+
+\subsubsection{Matrix row and column names}
+
+Besides the \texttt{"dim"} attribute, the matrix class has an additional, optional attribute: \texttt{"dimnames"}. This attribute holds names for the rows and columns in the matrix. However, we decided not to include this information in the default \JSON mapping for matrices for several reasons. First of all, because this attribute is optional, either row or column names or both could be \texttt{NULL}. This makes it difficult to define a practical mapping that covers all cases with and w [...]
+
+When row or column names of a matrix seem to contain vital information, we might want to transform the data into a more appropriate structure. \cite{tidydata} calls this \emph{``tidying''} the data and outlines best practices on storing statistical data in its most appropriate form. He lists the issue where \emph{``column headers are values, not variable names''} as the most common source of untidy data. This often happens when the structure is optimized for presentation (e.g. printing), [...]
+
+<<>>=
+x <- matrix(c(NA,1,2,5,NA,3), nrow=3)
+row.names(x) <- c("Joe", "Jane", "Mary");
+colnames(x) <- c("Treatment A", "Treatment B")
+print(x)
+toJSON(x)
+@
+
+Wickham recommends that the data be \emph{melted} into its \emph{tidy} form. Once the data is tidy, the \JSON encoding will naturally contain the treatment values:
+
+<<>>=
+library(reshape2)
+y <- melt(x, varnames=c("Subject", "Treatment"))
+print(y)
+toJSON(y, pretty=TRUE)
+@
+
+In some other cases, the column headers actually do contain variable names, and melting is inappropriate. For data sets with records consisting of a set of named columns (fields), \R has more natural and flexible class: the data-frame. The \toJSON method for data frames (described later) is more suitable when we want to refer to rows or fields by their name. Any matrix can easily be converted to a data-frame using the \code{as.data.frame} function:
+
+<<>>=
+toJSON(as.data.frame(x), pretty=TRUE)
+@
+
+For some cases this results in the desired output, but in this example melting seems more appropriate.
+
+\subsection{Lists}
+
+The \texttt{list} is the most general purpose data structure in \R.  It holds an ordered set of elements, including other lists, each of arbitrary type and size. Two types of lists are distinguished: named lists and unnamed lists. A list is considered a named list if it has an attribute called \texttt{"names"}. In practice, a named list is any list for which we can access an element by its name, whereas elements of an unnamed lists can only be accessed using their index number:
+
+<<>>=
+mylist1 <- list("foo" = 123, "bar"= 456)
+print(mylist1$bar)
+mylist2 <- list(123, 456)
+print(mylist2[[2]])
+@
+
+\subsubsection{Unnamed lists}
+
+Just like vectors, an unnamed list maps to a \JSON array:
+
+<<>>=
+toJSON(list(c(1,2), "test", TRUE, list(c(1,2))))
+@
+
+Note that even though both vectors and lists are encoded using \JSON arrays, they can be distinguished from their contents: an \R vector results in a \JSON array containing only primitives, whereas a list results in a \JSON array containing only objects and arrays. This allows the \JSON parser to reconstruct the original type from encoded vectors and arrays:
+
+<<>>=
+x <- list(c(1,2,NA), "test", FALSE, list(foo="bar"))
+identical(fromJSON(toJSON(x)), x)
+@
+
+ The only exception is the empty list and empty vector, which are both encoded as \texttt{[ ]} and therefore indistinguishable, but this is rarely a problem in practice.
+
+\subsubsection{Named lists}
+
+A named list in \R maps to a \JSON \emph{object}:
+
+<<>>=
+toJSON(list(foo=c(1,2), bar="test"))
+@
+
+ Because a list can contain other lists, this works recursively:
+
+<<tidy=FALSE>>=
+toJSON(list(foo=list(bar=list(baz=pi))))
+@
+
+ Named lists map almost perfectly to \JSON objects with one exception: list elements can have empty names:
+
+<<>>=
+x <- list(foo=123, "test", TRUE)
+attr(x, "names")
+x$foo
+x[[2]]
+@
+
+ In a \JSON object, each element in an object must have a valid name. To ensure this property, \jsonlite uses the same solution as the \code{print} method, which is to fall back on indices for elements that do not have a proper name:
+
+<<>>=
+x <- list(foo=123, "test", TRUE)
+print(x)
+toJSON(x)
+@
+
+ This behavior ensures that all generated \JSON is valid, however named lists with empty names should be avoided where possible. When actually designing \R objects that should be interoperable, it is recommended that each list element is given a proper name.
+
+\subsection{Data frame}
+
+The \texttt{data frame} is perhaps the most central data structure in \R from the user point of view. This class holds tabular data in which each column is named and (usually) homogeneous. Conceptually it is very similar to a table in relational data bases such as \texttt{MySQL}, where \emph{fields} are referred to as \emph{column names}, and \emph{records} are called \emph{rows}. Like a matrix, a data frame can be subsetted with two indices, to extract certain rows and columns of the data:
+
+<<>>=
+is(iris)
+names(iris)
+print(iris[1:3, c(1,5)])
+print(iris[1:3, c("Sepal.Width", "Species")])
+@
+
+ For the previously discussed classes such as vectors and matrices, behavior of \jsonlite was quite similar to the other available packages that implement \toJSON and \fromJSON functions, with only minor differences for missing values and edge cases. But when it comes to data frames, \jsonlite takes a completely different approach. The behavior of \jsonlite is designed for compatibility with conventional ways of encoding table-like structures outside the \R community. The implementation  [...]
+
+\subsubsection{Column based versus row based tables}
+
+Generally speaking, tabular data structures can be implemented in two different ways: in a column based, or row based fashion. A column based structure consists of a named collection of equal-length, homogeneous arrays representing the table columns. In a row-based structure on the other hand, the table is implemented as a set of heterogeneous associative arrays representing table rows with field values for each particular record. Even though most languages provide flexible and abstracte [...]
+
+The data frame class in \R is implemented in a column based fashion: it constitutes of a \texttt{named list} of equal-length vectors. Thereby the columns in the data frame naturally inherit the properties from atomic vectors discussed before, such as homogeneity, missing values, etc. Another argument for column-based implementation is that statistical methods generally operate on columns. For example, the \code{lm} function fits a \emph{linear regression} by extracting the columns from a [...]
+
+Unfortunately \R is an exception in its preference for column-based storage: most languages, systems, databases, \API's, etc, are optimized for record based operations. For this reason, the conventional way to store and communicate tabular data in \JSON seems to almost exclusively row based. This discrepancy presents various complications when converting between data frames and \JSON. The remaining of this section discusses details and challenges of consistently mapping record based \JSO [...]
+
+\subsubsection{Row based data frame encoding}
+
+The encoding of data frames is one of the major differences between \jsonlite and implementations from other currently available packages. Instead of using the column-based encoding also used for lists, \jsonlite maps data frames by default to an array of records:
+
+<<>>=
+toJSON(iris[1:2,], pretty=TRUE)
+@
+
+ This output looks a bit like a list of named lists. However, there is one major difference: the individual records contain \JSON primitives, whereas lists always contain \JSON objects or arrays:
+
+<<>>=
+toJSON(list(list(Species="Foo", Width=21)), pretty=TRUE)
+@
+
+ This leads to the following convention: when encoding \R objects, \JSON primitives only appear in vectors and data-frame rows. Primitives within a \JSON array indicate a vector, and primitives appearing inside a \JSON object indicate a data-frame row. A \JSON encoded \texttt{list}, (named or unnamed) will never contain \JSON primitives. This is a subtle but important convention that helps to distinguish between \R classes from their \JSON representation, without explicitly encoding any  [...]
+
+\subsubsection{Missing values in data frames}
+
+The section on atomic vectors discussed two methods of encoding missing data appearing in a vector: either using strings or using the \JSON \texttt{null} type. When a missing value appears in a data frame, there is a third option: simply not include this field in \JSON record:
+
+<<>>=
+x <- data.frame(foo=c(FALSE, TRUE,NA,NA), bar=c("Aladdin", NA, NA, "Mario"))
+print(x)
+toJSON(x, pretty=TRUE)
+@
+
+ The default behavior of \jsonlite is to omit missing data from records in a data frame. This seems to be the most conventional method used on the web, and we expect this encoding will most likely lead to the correct interpretation of \emph{missingness}, even in languages without an explicit notion of \texttt{NA}.
+
+\subsubsection{Relational data: nested records}
+
+Nested datasets are somewhat unusual in \R, but frequently encountered in \JSON. Such structures do not really fit the vector based paradigm which makes them harder to manipulate in \R.  However, nested structures are too common in \JSON to ignore, and with a little work most cases still map to a data frame quite nicely. The most common scenario is a dataset in which a certain field within each record contains a \emph{subrecord} with additional fields. The \jsonlite implementation maps t [...]
+
+<<tidy=FALSE>>=
+options(stringsAsFactors=FALSE)
+x <- data.frame(driver = c("Bowser", "Peach"), occupation = c("Koopa", "Princess"))
+x$vehicle <- data.frame(model = c("Piranha Prowler", "Royal Racer"))
+x$vehicle$stats <- data.frame(speed = c(55, 34), weight = c(67, 24), drift = c(35, 32))
+str(x)
+toJSON(x, pretty=TRUE)
+myjson <- toJSON(x)
+y <- fromJSON(myjson)
+identical(x,y)
+@
+
+ When encountering \JSON data containing nested records on the web, chances are that these data were generated from \emph{relational} database. The \JSON field containing a subrecord represents a \emph{foreign key} pointing to a record in an external table. For the purpose of encoding these into a single \JSON structure, the tables were joined into a nested structure. The directly nested subrecord represents a \emph{one-to-one} or \emph{many-to-one} relation between the parent and child  [...]
+
+<<>>=
+y <- fromJSON(myjson, flatten=TRUE)
+str(y)
+@
+
+\subsubsection{Relational data: nested tables}
+
+The one-to-one relation discussed above is relatively easy to store in \R, because each record contains at most one subrecord. Therefore we can use either a nested data frame, or flatten the data frame. However, things get more difficult when \JSON records contain a field with a nested array. Such a structure appears in relational data in case of a \emph{one-to-many} relation. A standard textbook illustration is the relation between authors and titles. For example, a field can contain an [...]
+
+<<tidy=FALSE>>=
+x <- data.frame(author = c("Homer", "Virgil", "Jeroen"))
+x$poems <- list(c("Iliad", "Odyssey"), c("Eclogues", "Georgics", "Aeneid"), vector());
+names(x)
+toJSON(x, pretty = TRUE)
+@
+
+ As can be seen from the example, the way to store this in a data frame is using a list of character vectors. This works, and although unconventional, we can still create and read such structures in \R relatively easily. However, in practice the one-to-many relation is often more complex. It results in fields containing a \emph{set of records}. In \R, the only way to model this is as a column containing a list of data frames, one separate data frame for each row:
+
+<<tidy=FALSE>>=
+x <- data.frame(author = c("Homer", "Virgil", "Jeroen"))
+x$poems <- list(
+  data.frame(title=c("Iliad", "Odyssey"), year=c(-1194, -800)),
+  data.frame(title=c("Eclogues", "Georgics", "Aeneid"), year=c(-44, -29, -19)),
+  data.frame()
+)
+toJSON(x, pretty=TRUE)
+@
+
+ Because \R doesn't have native support for relational data, there is no natural class to store such structures. The best we can do is a column containing a list of sub-dataframes. This does the job, and allows the \R user to access or generate nested \JSON structures. However, a data frame like this cannot be flattened, and the class does not guarantee that each of the individual nested data frames contain the same fields, as would be the case in an actual relational data base.
+
+
+\section{Structural consistency and type safety in dynamic data}
+
+Systems that automatically exchange information over some interface, protocol or \API require well defined and unambiguous meaning and arrangement of data. In order to process and interpret input and output, contents must obey a steady structure. Such structures are usually described either informally in documentation or more formally in a schema language. The previous section emphasized the importance of consistency in the mapping between \JSON data and \R classes. This section takes a  [...]
+
+\subsection{Classes, types and data}
+
+Most object-oriented languages are designed with the idea that all objects of a certain class implement the same fields and methods. In strong-typed languages such as \proglang{S4} or \proglang{Java}, names and types of the fields are formally declared in a class definition. In other languages such as \proglang{S3} or \proglang{JavaScript}, the fields are not enforced by the language but rather at the discretion of the programmer. One way or another they assume that members of a certain  [...]
+
+Some data interchange formats such as \texttt{XML} or \texttt{Protocol Buffers} take a formal approach to this matter, and have well established \emph{schema languages} and \emph{interface description languages}. Using such a meta language it is possible to define the exact structure, properties and actions of data interchange in a formal arrangement. However, in \JSON, such formal definitions are relatively uncommon. Some initiatives for \JSON schema languages exist \citep{jsonschema},  [...]
+
+\subsection{Rule 1: Fixed keys}
+
+When using \JSON without a schema, there are no restrictions on the keys (field names) that can appear in a particular object. However, a source of data that returns a different set of keys every time it is called makes it very difficult to write software to process these data. Hence, the first rule is to limit \JSON interfaces to a finite set of keys that are known \emph{a priory} by all parties. It can be helpful to think about this in analogy with for example a relational database. He [...]
+
+A beautiful example of this in practice was given by Mike Dewar at the New York Open Statistical Programming Meetup on Jan. 12, 2012 \citep{jsonkeys}. In his talk he emphasizes to use \JSON keys only for \emph{names}, and not for \emph{data}. He refers to this principle as the ``golden rule'', and explains how he learned his lesson the hard way. In one of his early applications, timeseries data was encoded by using the epoch timestamp as the \JSON key. Therefore the keys are different ea [...]
+
+\begin{verbatim}
+[
+  { "1325344443" : 124 },
+  { "1325344456" : 131 },
+  { "1325344478" : 137 }
+]
+\end{verbatim}
+
+ Even though being valid \JSON, dynamic keys as in the example above are likely to introduce trouble. Most software will have great difficulty processing these values if we can not specify the keys in the code. Moreover when documenting the API, either informally or formally using a schema language, we need to describe for each property in the data what the value means and is composed of. Thereby a client or consumer can implement code that interprets and process each element in the data [...]
+
+\begin{verbatim}
+[
+  { "time": "1325344443" : "price": 124 },
+  { "time": "1325344456" : "price": 131 },
+  { "time": "1325344478" : "price": 137 }
+]
+\end{verbatim}
+
+ This structure will play much nicer with existing software that assumes fixed keys. Moreover, the structure can easily be described in documentation, or captured in a schema. Even when we have no intention of writing documentation or a schema for a dynamic \JSON source, it is still wise to design the structure in such away that it \emph{could} be described by a schema. When the keys are fixed, a well chosen example can provide all the information required for the consumer to implement c [...]
+
+In the context of \R, consistency of keys is closely related to Wikcham's concept of \emph{tidy data} discussed earlier. Wickham states that the most common reason for messy data are column headers containing values instead of variable names. Column headers in tabular datasets become keys when converted to \JSON. Therefore, when headers are actually values, \JSON keys contain in fact data and can become unpredictable. The cure to inconsistent keys is almost always to tidy the data accord [...]
+
+\subsection{Rule 2: Consistent types}
+
+In a strong typed language, fields declare their class before any values are assigned. Thereby the type of a given field is identical in all objects of a particular class, and arrays only contain objects of a single type. The \proglang{S3} system in \R is weakly typed and puts no formal restrictions on the class of a certain properties, or the types of objects that can be combined into a collection. For example, the list below contains a character vector, a numeric vector and a list:
+
+<<>>=
+#Heterogeneous lists are bad!
+x <- list("FOO", 1:3, list("bar"=pi))
+toJSON(x)
+@
+
+ However even though it is possible to generate such \JSON, it is bad practice. Fields or collections with ambiguous object types are difficult to describe, interpret and process in the context of inter-system communication. When using \JSON to exchange dynamic data, it is important that each property and array is \emph{type consistent}. In dynamically typed languages, the programmer needs to make sure that properties are of the correct type before encoding into \JSON. For \R, this means [...]
+
+ Note that consistency is somewhat subjective as it refers to the \emph{meaning} of the elements; they do not necessarily have precisely the same structure. What is important is to keep in mind that the consumer of the data can interpret and process each element identically, e.g. iterate over the elements in the collection and apply the same method to each of them. To illustrate this, lets take the example of the data frame:
+
+<<>>=
+#conceptually homogenous array
+x <- data.frame(name=c("Jay", "Mary", NA, NA), gender=c("M", NA, NA, "F"))
+toJSON(x, pretty=TRUE)
+@
+
+The \JSON array above has 4 elements, each of which a \JSON object. However, due to the \texttt{NA} values, some records have more fields than others. But as long as they are conceptually the same type (e.g. a person), the consumer can iterate over the elements to process each person in the set according to a predefined action. For example each element could be used to construct a \texttt{Person} object. A collection of different object classes should be separated and organized using a n [...]
+
+<<tidy=FALSE>>=
+x <- list(
+  humans = data.frame(name = c("Jay", "Mary"), married = c(TRUE, FALSE)),
+  horses = data.frame(name = c("Star", "Dakota"), price = c(5000, 30000))
+)
+toJSON(x, pretty=TRUE)
+@
+
+ This might seem obvious, but dynamic languages such as \R can make it dangerously tempting to generate data containing mixed-type collections. Such inconsistent typing makes it very difficult to consume the data and creates a likely source of nasty bugs. Using consistent field names/types and homogeneous \JSON arrays is a strong convention among public \JSON \API's, for good reasons. We recommend \R users to respect these conventions when generating \JSON data in \R.
+
+
+%references
+\bibliographystyle{plainnat}
+\bibliography{references}
+
+%end
+\end{document}
diff --git a/vignettes/json-mapping.pdf.asis b/vignettes/json-mapping.pdf.asis
new file mode 100644
index 0000000..1b7eb64
--- /dev/null
+++ b/vignettes/json-mapping.pdf.asis
@@ -0,0 +1,6 @@
+%\VignetteIndexEntry{A mapping between JSON data and R objects}
+%\VignetteEngine{R.rsp::asis}
+%\VignetteKeyword{PDF}
+%\VignetteKeyword{HTML}
+%\VignetteKeyword{vignette}
+%\VignetteKeyword{package}
diff --git a/vignettes/json-opencpu.Rnw b/vignettes/json-opencpu.Rnw
new file mode 100644
index 0000000..fae8b9f
--- /dev/null
+++ b/vignettes/json-opencpu.Rnw
@@ -0,0 +1,132 @@
+%\VignetteEngine{knitr::knitr}
+%\VignetteIndexEntry{Simple JSON RPC with OpenCPU}
+
+%This is a template.
+%Actual text goes in sources/content.Rnw
+\documentclass{article}
+\author{Jeroen Ooms}
+
+%useful packages
+\usepackage{url}
+\usepackage{fullpage}
+\usepackage{xspace}
+\usepackage{hyperref}
+\usepackage{fancyvrb}
+
+%for table positioning
+\usepackage{float}
+\restylefloat{table}
+
+%support for accents
+\usepackage[utf8]{inputenc}
+
+%support for ascii art
+\usepackage{pmboxdraw}
+
+%use vspace instead of indentation for paragraphs
+\usepackage{parskip}
+
+%extra line spacing
+\usepackage{setspace}
+\setstretch{1.25}
+
+%knitr style verbatim blocks
+\newenvironment{codeblock}{
+  \VerbatimEnvironment
+  \definecolor{shadecolor}{rgb}{0.95, 0.95, 0.95}\color{fgcolor}
+  \color{black}
+  \begin{kframe}
+  \begin{BVerbatim}
+}{
+  \end{BVerbatim}
+  \end{kframe}
+}
+
+%placeholders for JSS/RJournal
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\file}[1]{\texttt{#1}}
+\newcommand{\dfn}[1]{\emph{#1}}
+\newcommand{\proglang}[1]{\texttt{#1}}
+
+%shorthands
+\newcommand{\JSON}{\texttt{JSON}\xspace}
+\newcommand{\R}{\texttt{R}\xspace}
+\newcommand{\C}{\texttt{C}\xspace}
+\newcommand{\toJSON}{\texttt{toJSON}\xspace}
+\newcommand{\fromJSON}{\texttt{fromJSON}\xspace}
+\newcommand{\XML}{\pkg{XML}\xspace}
+\newcommand{\jsonlite}{\pkg{jsonlite}\xspace}
+\newcommand{\RJSONIO}{\pkg{RJSONIO}\xspace}
+\newcommand{\API}{\texttt{API}\xspace}
+\newcommand{\JavaScript}{\texttt{JavaScript}\xspace}
+
+%trick for using same content file as chatper and article
+\newcommand{\maintitle}[1]{
+  \title{#1}
+  \maketitle
+}
+
+%actual document
+\begin{document}
+
+
+
+\section*{Simple \JSON RPC with OpenCPU}
+
+The \jsonlite package is used by \texttt{OpenCPU} to convert between \JSON data and \R objects. Thereby clients can retrieve \R objects, or remotely call \R functions using \JSON where the function arguments as well as function return value are \JSON objects. For example to download the \texttt{Boston} data from the \texttt{MASS} package:\\
+
+\begin{tabular}{|l|l|}
+  \hline
+     \textbf{Command in R} & \textbf{Example URL on OpenCPU} \\
+  \hline
+     \texttt{toJSON(Boston, digits=4)} & \url{https://demo.ocpu.io/MASS/data/Boston/json?digits=4} \\
+  \hline
+     \texttt{toJSON(Boston, dataframe="col")} & \url{https://demo.ocpu.io/MASS/data/Boston/json?dataframe=col} \\
+  \hline
+     \texttt{toJSON(Boston, pretty=FALSE)} & \url{https://demo.ocpu.io/MASS/data/Boston/json?pretty=false} \\
+  \hline
+\end{tabular}
+\newline
+
+To calculate the variance of some the numbers \texttt{1:9} in the command line using using \texttt{curl}:
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/stats/R/var/json -d "x=[1,2,3,4,5,6,7,8,9]"
+\end{Verbatim}
+
+Or equivalently post the entire body in \JSON format:
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/stats/R/var/json -H "Content-Type: application/json" \
+-d "{\"x\":[1,2,3,4,5,6,7,8,9]}"
+\end{Verbatim}
+
+Below an example where we call the \texttt{melt} function from the \texttt{reshape2} package using some example rows from the \texttt{airquality} data. Here both input and output consist of a data frame.
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/reshape2/R/melt/json -d 'id=["Month", "Day"]&data=[
+  { "Ozone" : 41, "Solar.R" : 190, "Wind" : 7.4, "Temp" : 67, "Month" : 5, "Day" : 1 },
+  { "Ozone" : 36, "Solar.R" : 118, "Wind" : 8, "Temp" : 72, "Month" : 5, "Day" : 2 } ]'
+\end{Verbatim}
+
+Or equivalently:
+
+\begin{Verbatim}[frame=single]
+curl https://demo.ocpu.io/reshape2/R/melt/json -H "Content-Type: application/json" \
+  -d '{"id" : ["Month", "Day"], "data" : [
+    { "Ozone" : 41, "Solar.R" : 190, "Wind" : 7.4, "Temp" : 67, "Month" : 5, "Day" : 1 },
+    { "Ozone" : 36, "Solar.R" : 118, "Wind" : 8, "Temp" : 72, "Month" : 5, "Day" : 2 }
+  ] }'
+\end{Verbatim}
+
+This request basically executes the following \R code:
+
+<<eval=FALSE>>=
+mydata <- airquality[1:2,]
+y <- reshape2::melt(data = mydata, id = c("Month", "Day"))
+toJSON(y)
+@
+
+%end
+\end{document}
diff --git a/vignettes/json-paging.Rmd b/vignettes/json-paging.Rmd
new file mode 100644
index 0000000..14860b2
--- /dev/null
+++ b/vignettes/json-paging.Rmd
@@ -0,0 +1,223 @@
+---
+title: "Combining pages of JSON data with jsonlite"
+date: "2015-09-06"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Combining pages of JSON data with jsonlite}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+
+
+
+
+
+The [jsonlite](https://cran.r-project.org/package=jsonlite) package is a `JSON` parser/generator for R which is optimized for pipelines and web APIs. It is used by the OpenCPU system and many other packages to get data in and out of R using the `JSON` format.
+
+## A bidirectional mapping
+
+One of the main strengths of `jsonlite` is that it implements a bidirectional [mapping](http://arxiv.org/abs/1403.2805) between JSON and data frames. Thereby it can convert nested collections of JSON records, as they often appear on the web, immediately into the appropriate R structure. For example to grab some data from ProPublica we can simply use:
+
+
+```r
+library(jsonlite)
+mydata <- fromJSON("https://projects.propublica.org/forensics/geos.json", flatten = TRUE)
+View(mydata)
+```
+
+The `mydata` object is a data frame which can be used directly for modeling or visualization, without the need for any further complicated data manipulation.
+
+## Paging with jsonlite
+
+A question that comes up frequently is how to combine pages of data. Most web APIs limit the amount of data that can be retrieved per request. If the client needs more data than what can fits in a single request, it needs to break down the data into multiple requests that each retrieve a fragment (page) of data, not unlike pages in a book. In practice this is often implemented using a `page` parameter in the API. Below an example from the [ProPublica Nonprofit Explorer API](http://projec [...]
+
+
+```r
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+mydata0 <- fromJSON(paste0(baseurl, "&page=0"), flatten = TRUE)
+mydata1 <- fromJSON(paste0(baseurl, "&page=1"), flatten = TRUE)
+mydata2 <- fromJSON(paste0(baseurl, "&page=2"), flatten = TRUE)
+
+#The actual data is in the filings element
+mydata0$filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]
+```
+
+```
+                              organization.sub_name organization.city
+1                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+2                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+3                 KAISER FOUNDATION HEALTH PLAN INC           OAKLAND
+4  DAVIDSON COUNTY COMMUNITY COLLEGE FOUNDATION INC         LEXINGTON
+5                       KAISER FOUNDATION HOSPITALS           OAKLAND
+6                       KAISER FOUNDATION HOSPITALS           OAKLAND
+7                       KAISER FOUNDATION HOSPITALS           OAKLAND
+8                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+9                   PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+10                  PARTNERS HEALTHCARE SYSTEM INC        CHARLESTOWN
+    totrevenue
+1  42346486950
+2  40148558254
+3  37786011714
+4  30821445312
+5  20013171194
+6  18543043972
+7  17980030355
+8  10619215354
+9  10452560305
+10  9636630380
+```
+
+To analyze or visualize these data, we need to combine the pages into a single dataset. We can do this with the `rbind.pages` function. Note that in this example, the actual data is contained by the `filings` field:
+
+
+```r
+#Rows per data frame
+nrow(mydata0$filings)
+```
+
+```
+[1] 25
+```
+
+```r
+#Combine data frames
+filings <- rbind.pages(
+  list(mydata0$filings, mydata1$filings, mydata2$filings)
+)
+
+#Total number of rows
+nrow(filings)
+```
+
+```
+[1] 75
+```
+
+## Automatically combining many pages
+
+We can write a simple loop that automatically downloads and combines many pages. For example to retrieve the first 20 pages with non-profits from the example above:
+
+
+```r
+#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:20){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i))
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)
+```
+
+```
+[1] 525
+```
+
+```r
+colnames(filings)
+```
+
+```
+  [1] "tax_prd"               "tax_prd_yr"           
+  [3] "formtype"              "pdf_url"              
+  [5] "updated"               "totrevenue"           
+  [7] "totfuncexpns"          "totassetsend"         
+  [9] "totliabend"            "pct_compnsatncurrofcr"
+ [11] "tax_pd"                "subseccd"             
+ [13] "unrelbusinccd"         "initiationfees"       
+ [15] "grsrcptspublicuse"     "grsincmembers"        
+ [17] "grsincother"           "totcntrbgfts"         
+ [19] "totprgmrevnue"         "invstmntinc"          
+ [21] "txexmptbndsproceeds"   "royaltsinc"           
+ [23] "grsrntsreal"           "grsrntsprsnl"         
+ [25] "rntlexpnsreal"         "rntlexpnsprsnl"       
+ [27] "rntlincreal"           "rntlincprsnl"         
+ [29] "netrntlinc"            "grsalesecur"          
+ [31] "grsalesothr"           "cstbasisecur"         
+ [33] "cstbasisothr"          "gnlsecur"             
+ [35] "gnlsothr"              "netgnls"              
+ [37] "grsincfndrsng"         "lessdirfndrsng"       
+ [39] "netincfndrsng"         "grsincgaming"         
+ [41] "lessdirgaming"         "netincgaming"         
+ [43] "grsalesinvent"         "lesscstofgoods"       
+ [45] "netincsales"           "miscrevtot11e"        
+ [47] "compnsatncurrofcr"     "othrsalwages"         
+ [49] "payrolltx"             "profndraising"        
+ [51] "txexmptbndsend"        "secrdmrtgsend"        
+ [53] "unsecurednotesend"     "retainedearnend"      
+ [55] "totnetassetend"        "nonpfrea"             
+ [57] "gftgrntsrcvd170"       "txrevnuelevied170"    
+ [59] "srvcsval170"           "grsinc170"            
+ [61] "grsrcptsrelated170"    "totgftgrntrcvd509"    
+ [63] "grsrcptsadmissn509"    "txrevnuelevied509"    
+ [65] "srvcsval509"           "subtotsuppinc509"     
+ [67] "totsupp509"            "ein"                  
+ [69] "organization"          "eostatus"             
+ [71] "tax_yr"                "operatingcd"          
+ [73] "assetcdgen"            "transinccd"           
+ [75] "subcd"                 "grscontrgifts"        
+ [77] "intrstrvnue"           "dividndsamt"          
+ [79] "totexcapgn"            "totexcapls"           
+ [81] "grsprofitbus"          "otherincamt"          
+ [83] "compofficers"          "contrpdpbks"          
+ [85] "totrcptperbks"         "totexpnspbks"         
+ [87] "excessrcpts"           "totexpnsexempt"       
+ [89] "netinvstinc"           "totaxpyr"             
+ [91] "adjnetinc"             "invstgovtoblig"       
+ [93] "invstcorpstk"          "invstcorpbnd"         
+ [95] "totinvstsec"           "fairmrktvalamt"       
+ [97] "undistribincyr"        "cmpmininvstret"       
+ [99] "sec4940notxcd"         "sec4940redtxcd"       
+[101] "infleg"                "contractncd"          
+[103] "claimstatcd"           "propexchcd"           
+[105] "brwlndmnycd"           "furngoodscd"          
+[107] "paidcmpncd"            "trnsothasstscd"       
+[109] "agremkpaycd"           "undistrinccd"         
+[111] "dirindirintcd"         "invstjexmptcd"        
+[113] "propgndacd"            "excesshldcd"          
+[115] "grntindivcd"           "nchrtygrntcd"         
+[117] "nreligiouscd"          "grsrents"             
+[119] "costsold"              "totrcptnetinc"        
+[121] "trcptadjnetinc"        "topradmnexpnsa"       
+[123] "topradmnexpnsb"        "topradmnexpnsd"       
+[125] "totexpnsnetinc"        "totexpnsadjnet"       
+[127] "othrcashamt"           "mrtgloans"            
+[129] "othrinvstend"          "fairmrktvaleoy"       
+[131] "mrtgnotespay"          "tfundnworth"          
+[133] "invstexcisetx"         "sect511tx"            
+[135] "subtitleatx"           "esttaxcr"             
+[137] "txwithldsrc"           "txpaidf2758"          
+[139] "erronbkupwthld"        "estpnlty"             
+[141] "balduopt"              "crelamt"              
+[143] "tfairmrktunuse"        "distribamt"           
+[145] "adjnetinccola"         "adjnetinccolb"        
+[147] "adjnetinccolc"         "adjnetinccold"        
+[149] "adjnetinctot"          "qlfydistriba"         
+[151] "qlfydistribb"          "qlfydistribc"         
+[153] "qlfydistribd"          "qlfydistribtot"       
+[155] "valassetscola"         "valassetscolb"        
+[157] "valassetscolc"         "valassetscold"        
+[159] "valassetstot"          "qlfyasseta"           
+[161] "qlfyassetb"            "qlfyassetc"           
+[163] "qlfyassetd"            "qlfyassettot"         
+[165] "endwmntscola"          "endwmntscolb"         
+[167] "endwmntscolc"          "endwmntscold"         
+[169] "endwmntstot"           "totsuprtcola"         
+[171] "totsuprtcolb"          "totsuprtcolc"         
+[173] "totsuprtcold"          "totsuprttot"          
+[175] "pubsuprtcola"          "pubsuprtcolb"         
+[177] "pubsuprtcolc"          "pubsuprtcold"         
+[179] "pubsuprttot"           "grsinvstinca"         
+[181] "grsinvstincb"          "grsinvstincc"         
+[183] "grsinvstincd"          "grsinvstinctot"       
+```
+
+From here, we can go straight to analyzing the filings data without any further tedious data manipulation.
diff --git a/vignettes/json-paging.Rmd.orig b/vignettes/json-paging.Rmd.orig
new file mode 100644
index 0000000..25b5a8f
--- /dev/null
+++ b/vignettes/json-paging.Rmd.orig
@@ -0,0 +1,92 @@
+---
+title: "Combining pages of JSON data with jsonlite"
+date: "`r Sys.Date()`"
+output:
+  html_document
+vignette: >
+  %\VignetteIndexEntry{Combining pages of JSON data with jsonlite}
+  %\VignetteEngine{knitr::rmarkdown}
+  \usepackage[utf8]{inputenc}
+---
+
+
+```{r echo=FALSE}
+library(knitr)
+opts_chunk$set(comment="")
+
+#this replaces tabs by spaces because latex-verbatim doesn't like tabs
+toJSON <- function(...){
+  gsub("\t", "  ", jsonlite::toJSON(...), fixed=TRUE);
+}
+```
+
+```{r echo=FALSE, message=FALSE}
+library(jsonlite)
+```
+
+The [jsonlite](https://cran.r-project.org/package=jsonlite) package is a `JSON` parser/generator for R which is optimized for pipelines and web APIs. It is used by the OpenCPU system and many other packages to get data in and out of R using the `JSON` format.
+
+## A bidirectional mapping
+
+One of the main strengths of `jsonlite` is that it implements a bidirectional [mapping](http://arxiv.org/abs/1403.2805) between JSON and data frames. Thereby it can convert nested collections of JSON records, as they often appear on the web, immediately into the appropriate R structure. For example to grab some data from ProPublica we can simply use:
+
+```{r eval=FALSE}
+library(jsonlite)
+mydata <- fromJSON("https://projects.propublica.org/forensics/geos.json", flatten = TRUE)
+View(mydata)
+```
+
+The `mydata` object is a data frame which can be used directly for modeling or visualization, without the need for any further complicated data manipulation.
+
+## Paging with jsonlite
+
+A question that comes up frequently is how to combine pages of data. Most web APIs limit the amount of data that can be retrieved per request. If the client needs more data than what can fits in a single request, it needs to break down the data into multiple requests that each retrieve a fragment (page) of data, not unlike pages in a book. In practice this is often implemented using a `page` parameter in the API. Below an example from the [ProPublica Nonprofit Explorer API](http://projec [...]
+
+```{r}
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+mydata0 <- fromJSON(paste0(baseurl, "&page=0"), flatten = TRUE)
+mydata1 <- fromJSON(paste0(baseurl, "&page=1"), flatten = TRUE)
+mydata2 <- fromJSON(paste0(baseurl, "&page=2"), flatten = TRUE)
+
+#The actual data is in the filings element
+mydata0$filings[1:10, c("organization.sub_name", "organization.city", "totrevenue")]
+```
+
+To analyze or visualize these data, we need to combine the pages into a single dataset. We can do this with the `rbind.pages` function. Note that in this example, the actual data is contained by the `filings` field:
+
+```{r}
+#Rows per data frame
+nrow(mydata0$filings)
+
+#Combine data frames
+filings <- rbind.pages(
+  list(mydata0$filings, mydata1$filings, mydata2$filings)
+)
+
+#Total number of rows
+nrow(filings)
+```
+
+## Automatically combining many pages
+
+We can write a simple loop that automatically downloads and combines many pages. For example to retrieve the first 20 pages with non-profits from the example above:
+
+```{r, message=FALSE}
+#store all pages in a list first
+baseurl <- "https://projects.propublica.org/nonprofits/api/v1/search.json?order=revenue&sort_order=desc"
+pages <- list()
+for(i in 0:20){
+  mydata <- fromJSON(paste0(baseurl, "&page=", i))
+  message("Retrieving page ", i)
+  pages[[i+1]] <- mydata$filings
+}
+
+#combine all into one
+filings <- rbind.pages(pages)
+
+#check output
+nrow(filings)
+colnames(filings)
+```
+
+From here, we can go straight to analyzing the filings data without any further tedious data manipulation.
diff --git a/vignettes/precompile.R b/vignettes/precompile.R
new file mode 100644
index 0000000..8bb86f7
--- /dev/null
+++ b/vignettes/precompile.R
@@ -0,0 +1,8 @@
+#Vignettes that depend on internet access have been precompiled:
+
+library(knitr)
+knit("vignettes/json-apis.Rmd.orig", "vignettes/json-apis.Rmd")
+knit("vignettes/json-paging.Rmd.orig", "vignettes/json-paging.Rmd")
+
+library(devtools)
+build_vignettes()
diff --git a/vignettes/references.bib b/vignettes/references.bib
new file mode 100644
index 0000000..5e23504
--- /dev/null
+++ b/vignettes/references.bib
@@ -0,0 +1,150 @@
+ at manual{jsonschema,
+  title={{JSON Schema: Core Definitions and Terminology}},
+  organization={Internet Engineering Task Force (IETF)},
+  author={F. Galiegue and K. Zyp},
+  year={2013},
+  url={https://tools.ietf.org/html/draft-zyp-json-schema-04},
+}
+
+ at manual{msgpack,
+  title={{MessagePack: It's Like JSON. But Fast and Small}},
+  author={Sadayuki Furuhashi},
+  year={2014},
+  url={http://msgpack.org/},
+}
+
+ at BOOK{chodorow2013mongodb, 
+  title={MongoDB: The Definitive Guide},
+  author={Kristina Chodorow},
+  publisher={O'Reilly Media},
+  year={2013},
+  month={5},
+  edition={Second},
+  isbn={9781449344689},
+  url={http://amazon.com/o/ASIN/1449344682/},
+  price={$39.99},
+  totalpages={432},
+  timestamp={2014.05.05},
+}
+
+ at misc{jsonkeys,
+  title={First Steps in Data Visualisation Using \texttt{d3.js}},
+  author={Mike Dewar},
+  organization={bit.ly},
+  year={2012},
+  note={New York Open Statistical Programming Meetup on Jan. 12, 2012},
+  url={http://vimeo.com/35005701#t=7m17s}
+}
+
+ at article{lawson1979basic,
+ author = {Lawson, C. L. and Hanson, R. J. and Kincaid, D. R. and Krogh, F. T.},
+ title = {Basic Linear Algebra Subprograms for Fortran Usage},
+ journal = {ACM Transactions on Mathematical Software},
+ issue_date = {Sept. 1979},
+ volume = {5},
+ number = {3},
+ month = sep,
+ year = {1979},
+ issn = {0098-3500},
+ pages = {308--323},
+ numpages = {16},
+ url = {http://doi.acm.org/10.1145/355841.355847},
+ doi = {10.1145/355841.355847},
+ acmid = {355847},
+ publisher = {ACM},
+ address = {New York, NY, USA},
+} 
+
+ at BOOK{anderson1999lapack, 
+  title={LAPACK Users' Guide (Software, Environments and Tools)},
+  author={E. Anderson and Z. Bai and C. Bischof and S. Blackford and J. Demmel and J. Dongarra and J. Du Croz and A. Greenbaum and S. Hammarling and A. McKenney and D. Sorensen},
+  publisher={Society for Industrial and Applied Mathematics},
+  year={1987},
+  month={1},
+  edition={3},
+  isbn={9780898714470},
+  url={http://amazon.com/o/ASIN/0898714478/},
+  price={$65.00},
+  totalpages={429},
+  timestamp={2014.05.05},
+}
+
+ at Manual{R,
+  title = {R: A Language and Environment for Statistical Computing},
+  author = {{R Core Team}},
+  organization = {R Foundation for Statistical Computing},
+  address = {Vienna, Austria},
+  year = {2014},
+  url = {http://www.R-project.org/},
+}
+
+ at Manual{RJSONIO,
+  title = {{\pkg{RJSONIO}: Serialize \R Objects to \JSON, \JavaScript Object Notation}},
+  author = {Duncan {Temple Lang}},
+  year = {2013},
+  note = {\R package version 1.0-3},
+  url = {http://CRAN.R-project.org/package=RJSONIO},
+}
+
+ at Manual{rjson,
+  title = {{\pkg{rjson}: \JSON for \R}},
+  author = {Alex Couture-Beil},
+  year = {2013},
+  note = {\R package version 0.2.13},
+  url = {http://CRAN.R-project.org/package=rjson},
+}
+
+ at Manual{jsonlite,
+  title = {{\pkg{jsonlite}: A Smarter \JSON Encoder for \R}},
+  author = {Jeroen Ooms and Duncan Temple Lang and Jonathan Wallace},
+  note = {\R package version 0.9.8},
+  url = {http://github.com/jeroenooms/jsonlite#readme},
+  year = {2014}
+}
+
+ at misc{crockford2006application,
+  author="D. Crockford",
+  title="{The \texttt{application/json} Media Type for \JavaScript Object Notation (\JSON)}",
+  series="Request for Comments",
+  number="4627",
+  howpublished="RFC 4627 (Informational)",
+  publisher="IETF",
+  organization="Internet Engineering Task Force",
+  year=2006,
+  month=jul,
+    note="Obsoleted by RFCs 7158, 7159",
+  url="http://www.ietf.org/rfc/rfc4627.txt",
+}
+
+ at article{ecma1999262,
+  title={{\proglang{ECMAScript} Language Specification}},
+  author={{Ecma International}},
+  journal={{European Association for Standardizing Information and Communication Systems}},
+  year={1999},
+  url={http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf}
+}
+
+ at article{tidydata,
+  title={{Tidy Data}},
+  author={Wickham, Hadley},
+  journal={Under review},
+  year={2014},
+  url={http://vita.had.co.nz/papers/tidy-data.pdf}
+}
+
+ at inproceedings{crockford2006json,
+  title={{JSON: The Fat-free Alternative to XML}},
+  author={Crockford, Douglas},
+  booktitle={Proceedings of XML},
+  volume={2006},
+  year={2006},
+  url={http://www.json.org/fatfree.html}
+}
+
+ at book{nolan2014xml,
+  title={XML and Web Technologies for Data Sciences with \R},
+  author={Nolan, Deborah and Temple Lang, Duncan},
+  year={2014},
+  publisher={Springer-Verlag},
+  url={http://link.springer.com/book/10.1007/978-1-4614-7900-0}
+}
\ No newline at end of file

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-jsonlite.git



More information about the debian-science-commits mailing list