Update libwebrtc third-party jsoncpp to M78
https://bugs.webkit.org/show_bug.cgi?id=202729

Reviewed by Eric Carlson.

* Source/third_party/jsoncpp: Updated.


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@250972 268f45cc-cd09-0410-ab3c-d52691b4dbfc
diff --git a/Source/ThirdParty/libwebrtc/CMakeLists.txt b/Source/ThirdParty/libwebrtc/CMakeLists.txt
index 48aea22..c16991f 100644
--- a/Source/ThirdParty/libwebrtc/CMakeLists.txt
+++ b/Source/ThirdParty/libwebrtc/CMakeLists.txt
@@ -286,8 +286,8 @@
     Source/third_party/boringssl/src/ssl/tls_method.cc
     Source/third_party/boringssl/src/ssl/tls_record.cc
     Source/third_party/boringssl/src/third_party/fiat/curve25519.c
-    Source/third_party/jsoncpp/overrides/src/lib_json/json_reader.cpp
-    Source/third_party/jsoncpp/overrides/src/lib_json/json_value.cpp
+    Source/third_party/jsoncpp/source/src/lib_json/json_reader.cpp
+    Source/third_party/jsoncpp/source/src/lib_json/json_value.cpp
     Source/third_party/jsoncpp/source/src/lib_json/json_writer.cpp
     Source/third_party/libyuv/source/compare.cc
     Source/third_party/libyuv/source/compare_common.cc
@@ -1414,6 +1414,7 @@
     Source
     Source/third_party/abseil-cpp
     Source/third_party/boringssl/src/include
+    Source/third_party/jsoncpp/generated
     Source/third_party/jsoncpp/overrides/include
     Source/third_party/jsoncpp/source/include
     Source/third_party/jsoncpp/source/src/lib_json
diff --git a/Source/ThirdParty/libwebrtc/ChangeLog b/Source/ThirdParty/libwebrtc/ChangeLog
index 2083df7..105bca9 100644
--- a/Source/ThirdParty/libwebrtc/ChangeLog
+++ b/Source/ThirdParty/libwebrtc/ChangeLog
@@ -1,3 +1,12 @@
+2019-10-10  Youenn Fablet  <youenn@apple.com>
+
+        Update libwebrtc third-party jsoncpp to M78
+        https://bugs.webkit.org/show_bug.cgi?id=202729
+
+        Reviewed by Eric Carlson.
+
+        * Source/third_party/jsoncpp: Updated.
+
 2019-10-10  youenn fablet  <youenn@apple.com>
 
         Rename yasm-1.3.0 folder to yasm
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/BUILD.gn b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/BUILD.gn
index cf40895..f2f48b9 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/BUILD.gn
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/BUILD.gn
@@ -2,18 +2,26 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import("//testing/libfuzzer/fuzzer_test.gni")
+
 config("jsoncpp_config") {
   include_dirs = [
-    "overrides/include",
     "source/include",
+    "generated",
   ]
+
+  # TODO(crbug.com/983223): Update JsonCpp BUILD.gn to remove deprecated
+  # declaration flag.
+  # This temporary flag allowing clients to update to the new version, and then
+  # update to the new StreamWriter and CharReader classes.
+  if (!is_win || is_clang) {
+    cflags_cc = [ "-Wno-deprecated-declarations" ]
+  }
 }
 
 source_set("jsoncpp") {
   sources = [
-    "overrides/include/json/value.h",
-    "overrides/src/lib_json/json_reader.cpp",
-    "overrides/src/lib_json/json_value.cpp",
+    "generated/version.h",
     "source/include/json/assertions.h",
     "source/include/json/autolink.h",
     "source/include/json/config.h",
@@ -21,15 +29,38 @@
     "source/include/json/forwards.h",
     "source/include/json/json.h",
     "source/include/json/reader.h",
+    "source/include/json/value.h",
     "source/include/json/writer.h",
-    "source/src/lib_json/json_batchallocator.h",
+    "source/src/lib_json/json_reader.cpp",
     "source/src/lib_json/json_tool.h",
+    "source/src/lib_json/json_value.cpp",
     "source/src/lib_json/json_writer.cpp",
   ]
 
   public_configs = [ ":jsoncpp_config" ]
 
-  defines = [ "JSON_USE_EXCEPTION=0" ]
+  defines = [
+    "JSON_USE_EXCEPTION=0",
+    "JSON_USE_NULLREF=0",
+  ]
 
   include_dirs = [ "source/src/lib_json" ]
+
+  if (!is_win || is_clang) {
+    cflags_cc = [ "-Wno-implicit-fallthrough" ]
+  }
+}
+
+fuzzer_test("jsoncpp_fuzzer") {
+  sources = [
+    "fuzzers/json_fuzzer.cc",
+  ]
+
+  deps = [
+    ":jsoncpp",
+  ]
+
+  include_dirs = [ "generated" ]
+
+  dict = "//testing/libfuzzer/fuzzers/dicts/json.dict"
 }
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/OWNERS b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/OWNERS
index 4fb2fed..f0e7354 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/OWNERS
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/OWNERS
@@ -1,2 +1,3 @@
-pthatcher@chromium.org
-sbc@chromium.org
+jophba@chromium.org
+
+# COMPONENT: Internals
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/README.chromium b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/README.chromium
index 5e7dac6..48bc543 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/README.chromium
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/README.chromium
@@ -6,12 +6,11 @@
 Security Critical: yes
 
 Description:
-The libjingle/webrtc/liblouis_nacl uses jsoncpp for signaling message
-serialization and deserialization. This library is also shipped in NaCl
-SDK and used in several examples.
+JsonCpp is used by multiple projects for parsing and generating JSON data. This
+project is mirrored here from the public GitHub project, with a custom BUILD.gn
+to allow for building with our Ninja + GN configuration. The main project uses
+Meson or CMake for building.
 
-Local modifications:
-The overrides are used to eliminate static initializers in json_value.cpp and
-json_reader.cpp adds includes for <string> and <istream> to build on libc++
-(https://llvm.org/PR25232), and to change some one-bit bit fields to unsigned
-so that they can hold the '1' values that are stored in them.
+Note: to update this project to a new version, regenerating the version.h header
+is required. This can be done by installing either CMake or Meson, building the
+project, and copying the generated version.h to the generated/ subfolder.
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/fuzzers/json_fuzzer.cc b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/fuzzers/json_fuzzer.cc
new file mode 100644
index 0000000..c23b670
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/fuzzers/json_fuzzer.cc
@@ -0,0 +1,73 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// JsonCpp fuzzing wrapper to help with automated fuzz testing.
+
+#include <stdint.h>
+#include <array>
+#include <climits>
+#include <cstdio>
+#include <iostream>
+#include <memory>
+#include "third_party/jsoncpp/source/include/json/json.h"
+
+namespace {
+// JsonCpp has a few different parsing options. The code below makes sure that
+// the most intersting variants are tested.
+enum { kBuilderConfigDefault = 0, kBuilderConfigStrict, kNumBuilderConfig };
+}  // namespace
+
+static const std::array<Json::CharReaderBuilder, kNumBuilderConfig>&
+Initialize() {
+  static std::array<Json::CharReaderBuilder, kNumBuilderConfig> builders{};
+
+  Json::CharReaderBuilder::strictMode(
+      &builders[kBuilderConfigStrict].settings_);
+
+  return builders;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  const auto& reader_builders = Initialize();
+
+  for (const auto& reader_builder : reader_builders) {
+    // Parse Json.
+    auto reader =
+        std::unique_ptr<Json::CharReader>(reader_builder.newCharReader());
+    Json::Value root;
+    bool res = reader->parse(reinterpret_cast<const char*>(data),
+                             reinterpret_cast<const char*>(data + size), &root,
+                             nullptr /* errs */);
+    if (!res) {
+      continue;
+    }
+
+    // Write and re-read json.
+    const Json::StreamWriterBuilder writer_builder;
+    auto writer =
+        std::unique_ptr<Json::StreamWriter>(writer_builder.newStreamWriter());
+    std::stringstream out_stream;
+    writer->write(root, &out_stream);
+    std::string output_json = out_stream.str();
+
+    Json::Value root_again;
+    res = reader->parse(output_json.data(),
+                        output_json.data() + output_json.length(), &root_again,
+                        nullptr /* errs */);
+    if (!res) {
+      continue;
+    }
+
+    // Run equality test.
+    // Note: This actually causes the Json::Value tree to be traversed and all
+    // the values to be dereferenced (until two of them are found not equal),
+    // which is great for detecting memory corruption bugs when compiled with
+    // AddressSanitizer. The result of the comparison is ignored, as it is
+    // expected that both the original and the re-read version will differ from
+    // time to time (e.g. due to floating point accuracy loss).
+    (void)(root == root_again);
+  }
+
+  return 0;
+}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/generated/version.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/generated/version.h
new file mode 100644
index 0000000..9a76082
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/generated/version.h
@@ -0,0 +1,22 @@
+// DO NOT EDIT. This file (and "version") is a template used by the build system
+// (either CMake or Meson) to generate a "version.h" header file.
+#ifndef JSON_VERSION_H_INCLUDED
+#define JSON_VERSION_H_INCLUDED
+
+#define JSONCPP_VERSION_STRING "1.9.0"
+#define JSONCPP_VERSION_MAJOR 1
+#define JSONCPP_VERSION_MINOR 9
+#define JSONCPP_VERSION_PATCH 0
+#define JSONCPP_VERSION_QUALIFIER
+#define JSONCPP_VERSION_HEXA                                       \
+  ((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | \
+   (JSONCPP_VERSION_PATCH << 8))
+
+#ifdef JSONCPP_USING_SECURE_MEMORY
+#undef JSONCPP_USING_SECURE_MEMORY
+#endif
+#define JSONCPP_USING_SECURE_MEMORY 0
+// If non-zero, the library zeroes any memory that it has allocated before
+// it frees its memory.
+
+#endif  // JSON_VERSION_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/include/json/value.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/include/json/value.h
deleted file mode 100644
index 5707260..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/include/json/value.h
+++ /dev/null
@@ -1,1111 +0,0 @@
-// Copyright 2007-2010 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-#ifndef CPPTL_JSON_H_INCLUDED
-# define CPPTL_JSON_H_INCLUDED
-
-#if !defined(JSON_IS_AMALGAMATION)
-# include "third_party/jsoncpp/source/include/json/forwards.h"
-#endif // if !defined(JSON_IS_AMALGAMATION)
-# include <string>
-# include <vector>
-
-# ifndef JSON_USE_CPPTL_SMALLMAP
-#  include <map>
-# else
-#  include <cpptl/smallmap.h>
-# endif
-# ifdef JSON_USE_CPPTL
-#  include <cpptl/forwards.h>
-# endif
-
-/** \brief JSON (JavaScript Object Notation).
- */
-namespace Json {
-
-   /** \brief Type of the value held by a Value object.
-    */
-   enum ValueType
-   {
-      nullValue = 0, ///< 'null' value
-      intValue,      ///< signed integer value
-      uintValue,     ///< unsigned integer value
-      realValue,     ///< double value
-      stringValue,   ///< UTF-8 string value
-      booleanValue,  ///< bool value
-      arrayValue,    ///< array value (ordered list)
-      objectValue    ///< object value (collection of name/value pairs).
-   };
-
-   enum CommentPlacement
-   {
-      commentBefore = 0,        ///< a comment placed on the line before a value
-      commentAfterOnSameLine,   ///< a comment just after a value on the same line
-      commentAfter,             ///< a comment on the line after a value (only make sense for root value)
-      numberOfCommentPlacement
-   };
-
-//# ifdef JSON_USE_CPPTL
-//   typedef CppTL::AnyEnumerator<const char *> EnumMemberNames;
-//   typedef CppTL::AnyEnumerator<const Value &> EnumValues;
-//# endif
-
-   /** \brief Lightweight wrapper to tag static string.
-    *
-    * Value constructor and objectValue member assignement takes advantage of the
-    * StaticString and avoid the cost of string duplication when storing the
-    * string or the member name.
-    *
-    * Example of usage:
-    * \code
-    * Json::Value aValue( StaticString("some text") );
-    * Json::Value object;
-    * static const StaticString code("code");
-    * object[code] = 1234;
-    * \endcode
-    */
-   class JSON_API StaticString
-   {
-   public:
-      explicit StaticString( const char *czstring )
-         : str_( czstring )
-      {
-      }
-
-      operator const char *() const
-      {
-         return str_;
-      }
-
-      const char *c_str() const
-      {
-         return str_;
-      }
-
-   private:
-      const char *str_;
-   };
-
-   /** \brief Represents a <a HREF="http://www.json.org">JSON</a> value.
-    *
-    * This class is a discriminated union wrapper that can represents a:
-    * - signed integer [range: Value::minInt - Value::maxInt]
-    * - unsigned integer (range: 0 - Value::maxUInt)
-    * - double
-    * - UTF-8 string
-    * - boolean
-    * - 'null'
-    * - an ordered list of Value
-    * - collection of name/value pairs (javascript object)
-    *
-    * The type of the held value is represented by a #ValueType and 
-    * can be obtained using type().
-    *
-    * values of an #objectValue or #arrayValue can be accessed using operator[]() methods. 
-    * Non const methods will automatically create the a #nullValue element 
-    * if it does not exist. 
-    * The sequence of an #arrayValue will be automatically resize and initialized 
-    * with #nullValue. resize() can be used to enlarge or truncate an #arrayValue.
-    *
-    * The get() methods can be used to obtanis default value in the case the required element
-    * does not exist.
-    *
-    * It is possible to iterate over the list of a #objectValue values using 
-    * the getMemberNames() method.
-    */
-   class JSON_API Value 
-   {
-      friend class ValueIteratorBase;
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-      friend class ValueInternalLink;
-      friend class ValueInternalMap;
-# endif
-   public:
-      typedef std::vector<std::string> Members;
-      typedef ValueIterator iterator;
-      typedef ValueConstIterator const_iterator;
-      typedef Json::UInt UInt;
-      typedef Json::Int Int;
-# if defined(JSON_HAS_INT64)
-      typedef Json::UInt64 UInt64;
-      typedef Json::Int64 Int64;
-#endif // defined(JSON_HAS_INT64)
-      typedef Json::LargestInt LargestInt;
-      typedef Json::LargestUInt LargestUInt;
-      typedef Json::ArrayIndex ArrayIndex;
-
-      static const Value& null;
-      /// Minimum signed integer value that can be stored in a Json::Value.
-      static const LargestInt minLargestInt;
-      /// Maximum signed integer value that can be stored in a Json::Value.
-      static const LargestInt maxLargestInt;
-      /// Maximum unsigned integer value that can be stored in a Json::Value.
-      static const LargestUInt maxLargestUInt;
-
-      /// Minimum signed int value that can be stored in a Json::Value.
-      static const Int minInt;
-      /// Maximum signed int value that can be stored in a Json::Value.
-      static const Int maxInt;
-      /// Maximum unsigned int value that can be stored in a Json::Value.
-      static const UInt maxUInt;
-
-# if defined(JSON_HAS_INT64)
-      /// Minimum signed 64 bits int value that can be stored in a Json::Value.
-      static const Int64 minInt64;
-      /// Maximum signed 64 bits int value that can be stored in a Json::Value.
-      static const Int64 maxInt64;
-      /// Maximum unsigned 64 bits int value that can be stored in a Json::Value.
-      static const UInt64 maxUInt64;
-#endif // defined(JSON_HAS_INT64)
-
-   private:
-#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-# ifndef JSON_VALUE_USE_INTERNAL_MAP
-      class CZString 
-      {
-      public:
-         enum DuplicationPolicy 
-         {
-            noDuplication = 0,
-            duplicate,
-            duplicateOnCopy
-         };
-         CZString( ArrayIndex index );
-         CZString( const char *cstr, DuplicationPolicy allocate );
-         CZString( const CZString &other );
-         ~CZString();
-         CZString &operator =( const CZString &other );
-         bool operator<( const CZString &other ) const;
-         bool operator==( const CZString &other ) const;
-         ArrayIndex index() const;
-         const char *c_str() const;
-         bool isStaticString() const;
-      private:
-         void swap( CZString &other );
-         const char *cstr_;
-         ArrayIndex index_;
-      };
-
-   public:
-#  ifndef JSON_USE_CPPTL_SMALLMAP
-      typedef std::map<CZString, Value> ObjectValues;
-#  else
-      typedef CppTL::SmallMap<CZString, Value> ObjectValues;
-#  endif // ifndef JSON_USE_CPPTL_SMALLMAP
-# endif // ifndef JSON_VALUE_USE_INTERNAL_MAP
-#endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
-   public:
-      /** \brief Create a default Value of the given type.
-
-        This is a very useful constructor.
-        To create an empty array, pass arrayValue.
-        To create an empty object, pass objectValue.
-        Another Value can then be set to this one by assignment.
-    This is useful since clear() and resize() will not alter types.
-
-        Examples:
-    \code
-    Json::Value null_value; // null
-    Json::Value arr_value(Json::arrayValue); // []
-    Json::Value obj_value(Json::objectValue); // {}
-    \endcode
-      */
-      Value( ValueType type = nullValue );
-      Value( Int value );
-      Value( UInt value );
-#if defined(JSON_HAS_INT64)
-      Value( Int64 value );
-      Value( UInt64 value );
-#endif // if defined(JSON_HAS_INT64)
-      Value( double value );
-      Value( const char *value );
-      Value( const char *beginValue, const char *endValue );
-      /** \brief Constructs a value from a static string.
-
-       * Like other value string constructor but do not duplicate the string for
-       * internal storage. The given string must remain alive after the call to this
-       * constructor.
-       * Example of usage:
-       * \code
-       * Json::Value aValue( StaticString("some text") );
-       * \endcode
-       */
-      Value( const StaticString &value );
-      Value( const std::string &value );
-# ifdef JSON_USE_CPPTL
-      Value( const CppTL::ConstString &value );
-# endif
-      Value( bool value );
-      Value( const Value &other );
-      ~Value();
-
-      Value &operator=( const Value &other );
-      /// Swap values.
-      /// \note Currently, comments are intentionally not swapped, for
-      /// both logic and efficiency.
-      void swap( Value &other );
-
-      ValueType type() const;
-
-      bool operator <( const Value &other ) const;
-      bool operator <=( const Value &other ) const;
-      bool operator >=( const Value &other ) const;
-      bool operator >( const Value &other ) const;
-
-      bool operator ==( const Value &other ) const;
-      bool operator !=( const Value &other ) const;
-
-      int compare( const Value &other ) const;
-
-      const char *asCString() const;
-      std::string asString() const;
-# ifdef JSON_USE_CPPTL
-      CppTL::ConstString asConstString() const;
-# endif
-      Int asInt() const;
-      UInt asUInt() const;
-#if defined(JSON_HAS_INT64)
-      Int64 asInt64() const;
-      UInt64 asUInt64() const;
-#endif // if defined(JSON_HAS_INT64)
-      LargestInt asLargestInt() const;
-      LargestUInt asLargestUInt() const;
-      float asFloat() const;
-      double asDouble() const;
-      bool asBool() const;
-
-      bool isNull() const;
-      bool isBool() const;
-      bool isInt() const;
-      bool isInt64() const;
-      bool isUInt() const;
-      bool isUInt64() const;
-      bool isIntegral() const;
-      bool isDouble() const;
-      bool isNumeric() const;
-      bool isString() const;
-      bool isArray() const;
-      bool isObject() const;
-
-      bool isConvertibleTo( ValueType other ) const;
-
-      /// Number of values in array or object
-      ArrayIndex size() const;
-
-      /// \brief Return true if empty array, empty object, or null;
-      /// otherwise, false.
-      bool empty() const;
-
-      /// Return isNull()
-      bool operator!() const;
-
-      /// Remove all object members and array elements.
-      /// \pre type() is arrayValue, objectValue, or nullValue
-      /// \post type() is unchanged
-      void clear();
-
-      /// Resize the array to size elements. 
-      /// New elements are initialized to null.
-      /// May only be called on nullValue or arrayValue.
-      /// \pre type() is arrayValue or nullValue
-      /// \post type() is arrayValue
-      void resize( ArrayIndex size );
-
-      /// Access an array element (zero based index ).
-      /// If the array contains less than index element, then null value are inserted
-      /// in the array so that its size is index+1.
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      Value &operator[]( ArrayIndex index );
-
-      /// Access an array element (zero based index ).
-      /// If the array contains less than index element, then null value are inserted
-      /// in the array so that its size is index+1.
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      Value &operator[]( int index );
-
-      /// Access an array element (zero based index )
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      const Value &operator[]( ArrayIndex index ) const;
-
-      /// Access an array element (zero based index )
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      const Value &operator[]( int index ) const;
-
-      /// If the array contains at least index+1 elements, returns the element value, 
-      /// otherwise returns defaultValue.
-      Value get( ArrayIndex index, 
-                 const Value &defaultValue ) const;
-      /// Return true if index < size().
-      bool isValidIndex( ArrayIndex index ) const;
-      /// \brief Append value to array at the end.
-      ///
-      /// Equivalent to jsonvalue[jsonvalue.size()] = value;
-      Value &append( const Value &value );
-
-      /// Access an object value by name, create a null member if it does not exist.
-      Value &operator[]( const char *key );
-      /// Access an object value by name, returns null if there is no member with that name.
-      const Value &operator[]( const char *key ) const;
-      /// Access an object value by name, create a null member if it does not exist.
-      Value &operator[]( const std::string &key );
-      /// Access an object value by name, returns null if there is no member with that name.
-      const Value &operator[]( const std::string &key ) const;
-      /** \brief Access an object value by name, create a null member if it does not exist.
-
-       * If the object as no entry for that name, then the member name used to store
-       * the new entry is not duplicated.
-       * Example of use:
-       * \code
-       * Json::Value object;
-       * static const StaticString code("code");
-       * object[code] = 1234;
-       * \endcode
-       */
-      Value &operator[]( const StaticString &key );
-# ifdef JSON_USE_CPPTL
-      /// Access an object value by name, create a null member if it does not exist.
-      Value &operator[]( const CppTL::ConstString &key );
-      /// Access an object value by name, returns null if there is no member with that name.
-      const Value &operator[]( const CppTL::ConstString &key ) const;
-# endif
-      /// Return the member named key if it exist, defaultValue otherwise.
-      Value get( const char *key, 
-                 const Value &defaultValue ) const;
-      /// Return the member named key if it exist, defaultValue otherwise.
-      Value get( const std::string &key,
-                 const Value &defaultValue ) const;
-# ifdef JSON_USE_CPPTL
-      /// Return the member named key if it exist, defaultValue otherwise.
-      Value get( const CppTL::ConstString &key,
-                 const Value &defaultValue ) const;
-# endif
-      /// \brief Remove and return the named member.  
-      ///
-      /// Do nothing if it did not exist.
-      /// \return the removed Value, or null.
-      /// \pre type() is objectValue or nullValue
-      /// \post type() is unchanged
-      Value removeMember( const char* key );
-      /// Same as removeMember(const char*)
-      Value removeMember( const std::string &key );
-
-      /// Return true if the object has a member named key.
-      bool isMember( const char *key ) const;
-      /// Return true if the object has a member named key.
-      bool isMember( const std::string &key ) const;
-# ifdef JSON_USE_CPPTL
-      /// Return true if the object has a member named key.
-      bool isMember( const CppTL::ConstString &key ) const;
-# endif
-
-      /// \brief Return a list of the member names.
-      ///
-      /// If null, return an empty list.
-      /// \pre type() is objectValue or nullValue
-      /// \post if type() was nullValue, it remains nullValue
-      Members getMemberNames() const;
-
-//# ifdef JSON_USE_CPPTL
-//      EnumMemberNames enumMemberNames() const;
-//      EnumValues enumValues() const;
-//# endif
-
-      /// Comments must be //... or /* ... */
-      void setComment( const char *comment,
-                       CommentPlacement placement );
-      /// Comments must be //... or /* ... */
-      void setComment( const std::string &comment,
-                       CommentPlacement placement );
-      bool hasComment( CommentPlacement placement ) const;
-      /// Include delimiters and embedded newlines.
-      std::string getComment( CommentPlacement placement ) const;
-
-      std::string toStyledString() const;
-
-      const_iterator begin() const;
-      const_iterator end() const;
-
-      iterator begin();
-      iterator end();
-
-   private:
-      Value &resolveReference( const char *key, 
-                               bool isStatic );
-
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-      inline bool isItemAvailable() const
-      {
-         return itemIsUsed_ == 0;
-      }
-
-      inline void setItemUsed( bool isUsed = true )
-      {
-         itemIsUsed_ = isUsed ? 1 : 0;
-      }
-
-      inline bool isMemberNameStatic() const
-      {
-         return memberNameIsStatic_ == 0;
-      }
-
-      inline void setMemberNameIsStatic( bool isStatic )
-      {
-         memberNameIsStatic_ = isStatic ? 1 : 0;
-      }
-# endif // # ifdef JSON_VALUE_USE_INTERNAL_MAP
-
-   private:
-      struct CommentInfo
-      {
-         CommentInfo();
-         ~CommentInfo();
-
-         void setComment( const char *text );
-
-         char *comment_;
-      };
-
-      //struct MemberNamesTransform
-      //{
-      //   typedef const char *result_type;
-      //   const char *operator()( const CZString &name ) const
-      //   {
-      //      return name.c_str();
-      //   }
-      //};
-
-      union ValueHolder
-      {
-         LargestInt int_;
-         LargestUInt uint_;
-         double real_;
-         bool bool_;
-         char *string_;
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-         ValueInternalArray *array_;
-         ValueInternalMap *map_;
-#else
-         ObjectValues *map_;
-# endif
-      } value_;
-      ValueType type_ : 8;
-      // One-bit bitfields must be unsigned to allow storing 1.
-      // They must be 32-bits to share storage with ValueHolder.
-      unsigned int allocated_ : 1;
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-      unsigned int itemIsUsed_ : 1;      // used by the ValueInternalMap container.
-      unsigned int memberNameIsStatic_ : 1; // used by the ValueInternalMap container.
-# endif
-      CommentInfo *comments_;
-   };
-
-
-   /** \brief Experimental and untested: represents an element of the "path" to access a node.
-    */
-   class PathArgument
-   {
-   public:
-      friend class Path;
-
-      PathArgument();
-      PathArgument( ArrayIndex index );
-      PathArgument( const char *key );
-      PathArgument( const std::string &key );
-
-   private:
-      enum Kind
-      {
-         kindNone = 0,
-         kindIndex,
-         kindKey
-      };
-      std::string key_;
-      ArrayIndex index_;
-      Kind kind_;
-   };
-
-   /** \brief Experimental and untested: represents a "path" to access a node.
-    *
-    * Syntax:
-    * - "." => root node
-    * - ".[n]" => elements at index 'n' of root node (an array value)
-    * - ".name" => member named 'name' of root node (an object value)
-    * - ".name1.name2.name3"
-    * - ".[0][1][2].name1[3]"
-    * - ".%" => member name is provided as parameter
-    * - ".[%]" => index is provied as parameter
-    */
-   class Path
-   {
-   public:
-      Path( const std::string &path,
-            const PathArgument &a1 = PathArgument(),
-            const PathArgument &a2 = PathArgument(),
-            const PathArgument &a3 = PathArgument(),
-            const PathArgument &a4 = PathArgument(),
-            const PathArgument &a5 = PathArgument() );
-
-      const Value &resolve( const Value &root ) const;
-      Value resolve( const Value &root, 
-                     const Value &defaultValue ) const;
-      /// Creates the "path" to access the specified node and returns a reference on the node.
-      Value &make( Value &root ) const;
-
-   private:
-      typedef std::vector<const PathArgument *> InArgs;
-      typedef std::vector<PathArgument> Args;
-
-      void makePath( const std::string &path,
-                     const InArgs &in );
-      void addPathInArg( const std::string &path, 
-                         const InArgs &in, 
-                         InArgs::const_iterator &itInArg, 
-                         PathArgument::Kind kind );
-      void invalidPath( const std::string &path, 
-                        int location );
-
-      Args args_;
-   };
-
-
-
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   /** \brief Allocator to customize Value internal map.
-    * Below is an example of a simple implementation (default implementation actually
-    * use memory pool for speed).
-    * \code
-      class DefaultValueMapAllocator : public ValueMapAllocator
-      {
-      public: // overridden from ValueMapAllocator
-         virtual ValueInternalMap *newMap()
-         {
-            return new ValueInternalMap();
-         }
-
-         virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other )
-         {
-            return new ValueInternalMap( other );
-         }
-
-         virtual void destructMap( ValueInternalMap *map )
-         {
-            delete map;
-         }
-
-         virtual ValueInternalLink *allocateMapBuckets( unsigned int size )
-         {
-            return new ValueInternalLink[size];
-         }
-
-         virtual void releaseMapBuckets( ValueInternalLink *links )
-         {
-            delete [] links;
-         }
-
-         virtual ValueInternalLink *allocateMapLink()
-         {
-            return new ValueInternalLink();
-         }
-
-         virtual void releaseMapLink( ValueInternalLink *link )
-         {
-            delete link;
-         }
-      };
-    * \endcode
-    */ 
-   class JSON_API ValueMapAllocator
-   {
-   public:
-      virtual ~ValueMapAllocator();
-      virtual ValueInternalMap *newMap() = 0;
-      virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other ) = 0;
-      virtual void destructMap( ValueInternalMap *map ) = 0;
-      virtual ValueInternalLink *allocateMapBuckets( unsigned int size ) = 0;
-      virtual void releaseMapBuckets( ValueInternalLink *links ) = 0;
-      virtual ValueInternalLink *allocateMapLink() = 0;
-      virtual void releaseMapLink( ValueInternalLink *link ) = 0;
-   };
-
-   /** \brief ValueInternalMap hash-map bucket chain link (for internal use only).
-    * \internal previous_ & next_ allows for bidirectional traversal.
-    */
-   class JSON_API ValueInternalLink
-   {
-   public:
-      enum { itemPerLink = 6 };  // sizeof(ValueInternalLink) = 128 on 32 bits architecture.
-      enum InternalFlags { 
-         flagAvailable = 0,
-         flagUsed = 1
-      };
-
-      ValueInternalLink();
-
-      ~ValueInternalLink();
-
-      Value items_[itemPerLink];
-      char *keys_[itemPerLink];
-      ValueInternalLink *previous_;
-      ValueInternalLink *next_;
-   };
-
-
-   /** \brief A linked page based hash-table implementation used internally by Value.
-    * \internal ValueInternalMap is a tradional bucket based hash-table, with a linked
-    * list in each bucket to handle collision. There is an addional twist in that
-    * each node of the collision linked list is a page containing a fixed amount of
-    * value. This provides a better compromise between memory usage and speed.
-    * 
-    * Each bucket is made up of a chained list of ValueInternalLink. The last
-    * link of a given bucket can be found in the 'previous_' field of the following bucket.
-    * The last link of the last bucket is stored in tailLink_ as it has no following bucket.
-    * Only the last link of a bucket may contains 'available' item. The last link always
-    * contains at least one element unless is it the bucket one very first link.
-    */
-   class JSON_API ValueInternalMap
-   {
-      friend class ValueIteratorBase;
-      friend class Value;
-   public:
-      typedef unsigned int HashKey;
-      typedef unsigned int BucketIndex;
-
-# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-      struct IteratorState
-      {
-         IteratorState() 
-            : map_(0)
-            , link_(0)
-            , itemIndex_(0)
-            , bucketIndex_(0) 
-         {
-         }
-         ValueInternalMap *map_;
-         ValueInternalLink *link_;
-         BucketIndex itemIndex_;
-         BucketIndex bucketIndex_;
-      };
-# endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
-      ValueInternalMap();
-      ValueInternalMap( const ValueInternalMap &other );
-      ValueInternalMap &operator =( const ValueInternalMap &other );
-      ~ValueInternalMap();
-
-      void swap( ValueInternalMap &other );
-
-      BucketIndex size() const;
-
-      void clear();
-
-      bool reserveDelta( BucketIndex growth );
-
-      bool reserve( BucketIndex newItemCount );
-
-      const Value *find( const char *key ) const;
-
-      Value *find( const char *key );
-
-      Value &resolveReference( const char *key, 
-                               bool isStatic );
-
-      void remove( const char *key );
-
-      void doActualRemove( ValueInternalLink *link, 
-                           BucketIndex index,
-                           BucketIndex bucketIndex );
-
-      ValueInternalLink *&getLastLinkInBucket( BucketIndex bucketIndex );
-
-      Value &setNewItem( const char *key, 
-                         bool isStatic, 
-                         ValueInternalLink *link, 
-                         BucketIndex index );
-
-      Value &unsafeAdd( const char *key, 
-                        bool isStatic, 
-                        HashKey hashedKey );
-
-      HashKey hash( const char *key ) const;
-
-      int compare( const ValueInternalMap &other ) const;
-
-   private:
-      void makeBeginIterator( IteratorState &it ) const;
-      void makeEndIterator( IteratorState &it ) const;
-      static bool equals( const IteratorState &x, const IteratorState &other );
-      static void increment( IteratorState &iterator );
-      static void incrementBucket( IteratorState &iterator );
-      static void decrement( IteratorState &iterator );
-      static const char *key( const IteratorState &iterator );
-      static const char *key( const IteratorState &iterator, bool &isStatic );
-      static Value &value( const IteratorState &iterator );
-      static int distance( const IteratorState &x, const IteratorState &y );
-
-   private:
-      ValueInternalLink *buckets_;
-      ValueInternalLink *tailLink_;
-      BucketIndex bucketsSize_;
-      BucketIndex itemCount_;
-   };
-
-   /** \brief A simplified deque implementation used internally by Value.
-   * \internal
-   * It is based on a list of fixed "page", each page contains a fixed number of items.
-   * Instead of using a linked-list, a array of pointer is used for fast item look-up.
-   * Look-up for an element is as follow:
-   * - compute page index: pageIndex = itemIndex / itemsPerPage
-   * - look-up item in page: pages_[pageIndex][itemIndex % itemsPerPage]
-   *
-   * Insertion is amortized constant time (only the array containing the index of pointers
-   * need to be reallocated when items are appended).
-   */
-   class JSON_API ValueInternalArray
-   {
-      friend class Value;
-      friend class ValueIteratorBase;
-   public:
-      enum { itemsPerPage = 8 };    // should be a power of 2 for fast divide and modulo.
-      typedef Value::ArrayIndex ArrayIndex;
-      typedef unsigned int PageIndex;
-
-# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-      struct IteratorState // Must be a POD
-      {
-         IteratorState() 
-            : array_(0)
-            , currentPageIndex_(0)
-            , currentItemIndex_(0) 
-         {
-         }
-         ValueInternalArray *array_;
-         Value **currentPageIndex_;
-         unsigned int currentItemIndex_;
-      };
-# endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
-      ValueInternalArray();
-      ValueInternalArray( const ValueInternalArray &other );
-      ValueInternalArray &operator =( const ValueInternalArray &other );
-      ~ValueInternalArray();
-      void swap( ValueInternalArray &other );
-
-      void clear();
-      void resize( ArrayIndex newSize );
-
-      Value &resolveReference( ArrayIndex index );
-
-      Value *find( ArrayIndex index ) const;
-
-      ArrayIndex size() const;
-
-      int compare( const ValueInternalArray &other ) const;
-
-   private:
-      static bool equals( const IteratorState &x, const IteratorState &other );
-      static void increment( IteratorState &iterator );
-      static void decrement( IteratorState &iterator );
-      static Value &dereference( const IteratorState &iterator );
-      static Value &unsafeDereference( const IteratorState &iterator );
-      static int distance( const IteratorState &x, const IteratorState &y );
-      static ArrayIndex indexOf( const IteratorState &iterator );
-      void makeBeginIterator( IteratorState &it ) const;
-      void makeEndIterator( IteratorState &it ) const;
-      void makeIterator( IteratorState &it, ArrayIndex index ) const;
-
-      void makeIndexValid( ArrayIndex index );
-
-      Value **pages_;
-      ArrayIndex size_;
-      PageIndex pageCount_;
-   };
-
-   /** \brief Experimental: do not use. Allocator to customize Value internal array.
-    * Below is an example of a simple implementation (actual implementation use
-    * memory pool).
-      \code
-class DefaultValueArrayAllocator : public ValueArrayAllocator
-{
-public: // overridden from ValueArrayAllocator
-   virtual ~DefaultValueArrayAllocator()
-   {
-   }
-
-   virtual ValueInternalArray *newArray()
-   {
-      return new ValueInternalArray();
-   }
-
-   virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other )
-   {
-      return new ValueInternalArray( other );
-   }
-
-   virtual void destruct( ValueInternalArray *array )
-   {
-      delete array;
-   }
-
-   virtual void reallocateArrayPageIndex( Value **&indexes, 
-                                          ValueInternalArray::PageIndex &indexCount,
-                                          ValueInternalArray::PageIndex minNewIndexCount )
-   {
-      ValueInternalArray::PageIndex newIndexCount = (indexCount*3)/2 + 1;
-      if ( minNewIndexCount > newIndexCount )
-         newIndexCount = minNewIndexCount;
-      void *newIndexes = realloc( indexes, sizeof(Value*) * newIndexCount );
-      if ( !newIndexes )
-         throw std::bad_alloc();
-      indexCount = newIndexCount;
-      indexes = static_cast<Value **>( newIndexes );
-   }
-   virtual void releaseArrayPageIndex( Value **indexes, 
-                                       ValueInternalArray::PageIndex indexCount )
-   {
-      if ( indexes )
-         free( indexes );
-   }
-
-   virtual Value *allocateArrayPage()
-   {
-      return static_cast<Value *>( malloc( sizeof(Value) * ValueInternalArray::itemsPerPage ) );
-   }
-
-   virtual void releaseArrayPage( Value *value )
-   {
-      if ( value )
-         free( value );
-   }
-};
-      \endcode
-    */ 
-   class JSON_API ValueArrayAllocator
-   {
-   public:
-      virtual ~ValueArrayAllocator();
-      virtual ValueInternalArray *newArray() = 0;
-      virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other ) = 0;
-      virtual void destructArray( ValueInternalArray *array ) = 0;
-      /** \brief Reallocate array page index.
-       * Reallocates an array of pointer on each page.
-       * \param indexes [input] pointer on the current index. May be \c NULL.
-       *                [output] pointer on the new index of at least 
-       *                         \a minNewIndexCount pages. 
-       * \param indexCount [input] current number of pages in the index.
-       *                   [output] number of page the reallocated index can handle.
-       *                            \b MUST be >= \a minNewIndexCount.
-       * \param minNewIndexCount Minimum number of page the new index must be able to
-       *                         handle.
-       */
-      virtual void reallocateArrayPageIndex( Value **&indexes, 
-                                             ValueInternalArray::PageIndex &indexCount,
-                                             ValueInternalArray::PageIndex minNewIndexCount ) = 0;
-      virtual void releaseArrayPageIndex( Value **indexes, 
-                                          ValueInternalArray::PageIndex indexCount ) = 0;
-      virtual Value *allocateArrayPage() = 0;
-      virtual void releaseArrayPage( Value *value ) = 0;
-   };
-#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP
-
-
-   /** \brief base class for Value iterators.
-    *
-    */
-   class ValueIteratorBase
-   {
-   public:
-      typedef unsigned int size_t;
-      typedef int difference_type;
-      typedef ValueIteratorBase SelfType;
-
-      ValueIteratorBase();
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      explicit ValueIteratorBase( const Value::ObjectValues::iterator &current );
-#else
-      ValueIteratorBase( const ValueInternalArray::IteratorState &state );
-      ValueIteratorBase( const ValueInternalMap::IteratorState &state );
-#endif
-
-      bool operator ==( const SelfType &other ) const
-      {
-         return isEqual( other );
-      }
-
-      bool operator !=( const SelfType &other ) const
-      {
-         return !isEqual( other );
-      }
-
-      difference_type operator -( const SelfType &other ) const
-      {
-         return computeDistance( other );
-      }
-
-      /// Return either the index or the member name of the referenced value as a Value.
-      Value key() const;
-
-      /// Return the index of the referenced Value. -1 if it is not an arrayValue.
-      UInt index() const;
-
-      /// Return the member name of the referenced Value. "" if it is not an objectValue.
-      const char *memberName() const;
-
-   protected:
-      Value &deref() const;
-
-      void increment();
-
-      void decrement();
-
-      difference_type computeDistance( const SelfType &other ) const;
-
-      bool isEqual( const SelfType &other ) const;
-
-      void copy( const SelfType &other );
-
-   private:
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      Value::ObjectValues::iterator current_;
-      // Indicates that iterator is for a null value.
-      bool isNull_;
-#else
-      union
-      {
-         ValueInternalArray::IteratorState array_;
-         ValueInternalMap::IteratorState map_;
-      } iterator_;
-      bool isArray_;
-#endif
-   };
-
-   /** \brief const iterator for object and array value.
-    *
-    */
-   class ValueConstIterator : public ValueIteratorBase
-   {
-      friend class Value;
-   public:
-      typedef unsigned int size_t;
-      typedef int difference_type;
-      typedef const Value &reference;
-      typedef const Value *pointer;
-      typedef ValueConstIterator SelfType;
-
-      ValueConstIterator();
-   private:
-      /*! \internal Use by Value to create an iterator.
-       */
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      explicit ValueConstIterator( const Value::ObjectValues::iterator &current );
-#else
-      ValueConstIterator( const ValueInternalArray::IteratorState &state );
-      ValueConstIterator( const ValueInternalMap::IteratorState &state );
-#endif
-   public:
-      SelfType &operator =( const ValueIteratorBase &other );
-
-      SelfType operator++( int )
-      {
-         SelfType temp( *this );
-         ++*this;
-         return temp;
-      }
-
-      SelfType operator--( int )
-      {
-         SelfType temp( *this );
-         --*this;
-         return temp;
-      }
-
-      SelfType &operator--()
-      {
-         decrement();
-         return *this;
-      }
-
-      SelfType &operator++()
-      {
-         increment();
-         return *this;
-      }
-
-      reference operator *() const
-      {
-         return deref();
-      }
-   };
-
-
-   /** \brief Iterator for object and array value.
-    */
-   class ValueIterator : public ValueIteratorBase
-   {
-      friend class Value;
-   public:
-      typedef unsigned int size_t;
-      typedef int difference_type;
-      typedef Value &reference;
-      typedef Value *pointer;
-      typedef ValueIterator SelfType;
-
-      ValueIterator();
-      ValueIterator( const ValueConstIterator &other );
-      ValueIterator( const ValueIterator &other );
-   private:
-      /*! \internal Use by Value to create an iterator.
-       */
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      explicit ValueIterator( const Value::ObjectValues::iterator &current );
-#else
-      ValueIterator( const ValueInternalArray::IteratorState &state );
-      ValueIterator( const ValueInternalMap::IteratorState &state );
-#endif
-   public:
-
-      SelfType &operator =( const SelfType &other );
-
-      SelfType operator++( int )
-      {
-         SelfType temp( *this );
-         ++*this;
-         return temp;
-      }
-
-      SelfType operator--( int )
-      {
-         SelfType temp( *this );
-         --*this;
-         return temp;
-      }
-
-      SelfType &operator--()
-      {
-         decrement();
-         return *this;
-      }
-
-      SelfType &operator++()
-      {
-         increment();
-         return *this;
-      }
-
-      reference operator *() const
-      {
-         return deref();
-      }
-   };
-
-
-} // namespace Json
-
-
-#endif // CPPTL_JSON_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/src/lib_json/json_reader.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/src/lib_json/json_reader.cpp
deleted file mode 100644
index f8cfad7..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/src/lib_json/json_reader.cpp
+++ /dev/null
@@ -1,920 +0,0 @@
-// Copyright 2007-2011 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-#if !defined(JSON_IS_AMALGAMATION)
-# include <json/assertions.h>
-# include <json/reader.h>
-# include <json/value.h>
-# include "json_tool.h"
-#endif // if !defined(JSON_IS_AMALGAMATION)
-#include <utility>
-#include <cstdio>
-#include <cassert>
-#include <cstring>
-#include <stdexcept>
-#include <string>
-#include <istream>
-
-#if _MSC_VER >= 1400 // VC++ 8.0
-#pragma warning( disable : 4996 )   // disable warning about strdup being deprecated.
-#endif
-
-namespace Json {
-
-// Implementation of class Features
-// ////////////////////////////////
-
-Features::Features()
-   : allowComments_( true )
-   , strictRoot_( false )
-{
-}
-
-
-Features 
-Features::all()
-{
-   return Features();
-}
-
-
-Features 
-Features::strictMode()
-{
-   Features features;
-   features.allowComments_ = false;
-   features.strictRoot_ = true;
-   return features;
-}
-
-// Implementation of class Reader
-// ////////////////////////////////
-
-
-static inline bool 
-in( Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4 )
-{
-   return c == c1  ||  c == c2  ||  c == c3  ||  c == c4;
-}
-
-static inline bool 
-in( Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4, Reader::Char c5 )
-{
-   return c == c1  ||  c == c2  ||  c == c3  ||  c == c4  ||  c == c5;
-}
-
-
-static bool 
-containsNewLine( Reader::Location begin, 
-                 Reader::Location end )
-{
-   for ( ;begin < end; ++begin )
-      if ( *begin == '\n'  ||  *begin == '\r' )
-         return true;
-   return false;
-}
-
-
-// Class Reader
-// //////////////////////////////////////////////////////////////////
-
-Reader::Reader()
-    : errors_(),
-      document_(),
-      begin_(),
-      end_(),
-      current_(),
-      lastValueEnd_(),
-      lastValue_(),
-      commentsBefore_(),
-      features_( Features::all() ),
-      collectComments_()
-{
-}
-
-
-Reader::Reader( const Features &features )
-    : errors_(),
-      document_(),
-      begin_(),
-      end_(),
-      current_(),
-      lastValueEnd_(),
-      lastValue_(),
-      commentsBefore_(),
-      features_( features ),
-      collectComments_()
-{
-}
-
-
-bool
-Reader::parse( const std::string &document, 
-               Value &root,
-               bool collectComments )
-{
-   document_ = document;
-   const char *begin = document_.c_str();
-   const char *end = begin + document_.length();
-   return parse( begin, end, root, collectComments );
-}
-
-
-bool
-Reader::parse( std::istream& sin,
-               Value &root,
-               bool collectComments )
-{
-   //std::istream_iterator<char> begin(sin);
-   //std::istream_iterator<char> end;
-   // Those would allow streamed input from a file, if parse() were a
-   // template function.
-
-   // Since std::string is reference-counted, this at least does not
-   // create an extra copy.
-   std::string doc;
-   std::getline(sin, doc, (char)EOF);
-   return parse( doc, root, collectComments );
-}
-
-bool 
-Reader::parse( const char *beginDoc, const char *endDoc, 
-               Value &root,
-               bool collectComments )
-{
-   if ( !features_.allowComments_ )
-   {
-      collectComments = false;
-   }
-
-   begin_ = beginDoc;
-   end_ = endDoc;
-   collectComments_ = collectComments;
-   current_ = begin_;
-   lastValueEnd_ = 0;
-   lastValue_ = 0;
-   commentsBefore_ = "";
-   errors_.clear();
-   while ( !nodes_.empty() )
-      nodes_.pop();
-   nodes_.push( &root );
-   
-   bool successful = readValue();
-   Token token;
-   skipCommentTokens( token );
-   if ( collectComments_  &&  !commentsBefore_.empty() )
-      root.setComment( commentsBefore_, commentAfter );
-   if ( features_.strictRoot_ )
-   {
-      if ( !root.isArray()  &&  !root.isObject() )
-      {
-         // Set error location to start of doc, ideally should be first token found in doc
-         token.type_ = tokenError;
-         token.start_ = beginDoc;
-         token.end_ = endDoc;
-         addError( "A valid JSON document must be either an array or an object value.",
-                   token );
-         return false;
-      }
-   }
-   return successful;
-}
-
-
-bool
-Reader::readValue()
-{
-   Token token;
-   skipCommentTokens( token );
-   bool successful = true;
-
-   if ( collectComments_  &&  !commentsBefore_.empty() )
-   {
-      currentValue().setComment( commentsBefore_, commentBefore );
-      commentsBefore_ = "";
-   }
-
-
-   switch ( token.type_ )
-   {
-   case tokenObjectBegin:
-      successful = readObject( token );
-      break;
-   case tokenArrayBegin:
-      successful = readArray( token );
-      break;
-   case tokenNumber:
-      successful = decodeNumber( token );
-      break;
-   case tokenString:
-      successful = decodeString( token );
-      break;
-   case tokenTrue:
-      currentValue() = true;
-      break;
-   case tokenFalse:
-      currentValue() = false;
-      break;
-   case tokenNull:
-      currentValue() = Value();
-      break;
-   default:
-      return addError( "Syntax error: value, object or array expected.", token );
-   }
-
-   if ( collectComments_ )
-   {
-      lastValueEnd_ = current_;
-      lastValue_ = &currentValue();
-   }
-
-   return successful;
-}
-
-
-void 
-Reader::skipCommentTokens( Token &token )
-{
-   if ( features_.allowComments_ )
-   {
-      do
-      {
-         readToken( token );
-      }
-      while ( token.type_ == tokenComment );
-   }
-   else
-   {
-      readToken( token );
-   }
-}
-
-
-bool 
-Reader::expectToken( TokenType type, Token &token, const char *message )
-{
-   readToken( token );
-   if ( token.type_ != type )
-      return addError( message, token );
-   return true;
-}
-
-
-bool 
-Reader::readToken( Token &token )
-{
-   skipSpaces();
-   token.start_ = current_;
-   Char c = getNextChar();
-   bool ok = true;
-   switch ( c )
-   {
-   case '{':
-      token.type_ = tokenObjectBegin;
-      break;
-   case '}':
-      token.type_ = tokenObjectEnd;
-      break;
-   case '[':
-      token.type_ = tokenArrayBegin;
-      break;
-   case ']':
-      token.type_ = tokenArrayEnd;
-      break;
-   case '"':
-      token.type_ = tokenString;
-      ok = readString();
-      break;
-   case '/':
-      token.type_ = tokenComment;
-      ok = readComment();
-      break;
-   case '0':
-   case '1':
-   case '2':
-   case '3':
-   case '4':
-   case '5':
-   case '6':
-   case '7':
-   case '8':
-   case '9':
-   case '-':
-      token.type_ = tokenNumber;
-      readNumber();
-      break;
-   case 't':
-      token.type_ = tokenTrue;
-      ok = match( "rue", 3 );
-      break;
-   case 'f':
-      token.type_ = tokenFalse;
-      ok = match( "alse", 4 );
-      break;
-   case 'n':
-      token.type_ = tokenNull;
-      ok = match( "ull", 3 );
-      break;
-   case ',':
-      token.type_ = tokenArraySeparator;
-      break;
-   case ':':
-      token.type_ = tokenMemberSeparator;
-      break;
-   case 0:
-      token.type_ = tokenEndOfStream;
-      break;
-   default:
-      ok = false;
-      break;
-   }
-   if ( !ok )
-      token.type_ = tokenError;
-   token.end_ = current_;
-   return true;
-}
-
-
-void 
-Reader::skipSpaces()
-{
-   while ( current_ != end_ )
-   {
-      Char c = *current_;
-      if ( c == ' '  ||  c == '\t'  ||  c == '\r'  ||  c == '\n' )
-         ++current_;
-      else
-         break;
-   }
-}
-
-
-bool 
-Reader::match( Location pattern, 
-               int patternLength )
-{
-   if ( end_ - current_ < patternLength )
-      return false;
-   int index = patternLength;
-   while ( index-- )
-      if ( current_[index] != pattern[index] )
-         return false;
-   current_ += patternLength;
-   return true;
-}
-
-
-bool
-Reader::readComment()
-{
-   Location commentBegin = current_ - 1;
-   Char c = getNextChar();
-   bool successful = false;
-   if ( c == '*' )
-      successful = readCStyleComment();
-   else if ( c == '/' )
-      successful = readCppStyleComment();
-   if ( !successful )
-      return false;
-
-   if ( collectComments_ )
-   {
-      CommentPlacement placement = commentBefore;
-      if ( lastValueEnd_  &&  !containsNewLine( lastValueEnd_, commentBegin ) )
-      {
-         if ( c != '*'  ||  !containsNewLine( commentBegin, current_ ) )
-            placement = commentAfterOnSameLine;
-      }
-
-      addComment( commentBegin, current_, placement );
-   }
-   return true;
-}
-
-
-void 
-Reader::addComment( Location begin, 
-                    Location end, 
-                    CommentPlacement placement )
-{
-   assert( collectComments_ );
-   if ( placement == commentAfterOnSameLine )
-   {
-      assert( lastValue_ != 0 );
-      lastValue_->setComment( std::string( begin, end ), placement );
-   }
-   else
-   {
-      if ( !commentsBefore_.empty() )
-         commentsBefore_ += "\n";
-      commentsBefore_ += std::string( begin, end );
-   }
-}
-
-
-bool 
-Reader::readCStyleComment()
-{
-   while ( current_ != end_ )
-   {
-      Char c = getNextChar();
-      if ( c == '*'  &&  *current_ == '/' )
-         break;
-   }
-   return getNextChar() == '/';
-}
-
-
-bool 
-Reader::readCppStyleComment()
-{
-   while ( current_ != end_ )
-   {
-      Char c = getNextChar();
-      if (  c == '\r'  ||  c == '\n' )
-         break;
-   }
-   return true;
-}
-
-
-void 
-Reader::readNumber()
-{
-   while ( current_ != end_ )
-   {
-      if ( !(*current_ >= '0'  &&  *current_ <= '9')  &&
-           !in( *current_, '.', 'e', 'E', '+', '-' ) )
-         break;
-      ++current_;
-   }
-}
-
-bool
-Reader::readString()
-{
-   Char c = 0;
-   while ( current_ != end_ )
-   {
-      c = getNextChar();
-      if ( c == '\\' )
-         getNextChar();
-      else if ( c == '"' )
-         break;
-   }
-   return c == '"';
-}
-
-
-bool 
-Reader::readObject( Token &/*tokenStart*/ )
-{
-   Token tokenName;
-   std::string name;
-   currentValue() = Value( objectValue );
-   while ( readToken( tokenName ) )
-   {
-      bool initialTokenOk = true;
-      while ( tokenName.type_ == tokenComment  &&  initialTokenOk )
-         initialTokenOk = readToken( tokenName );
-      if  ( !initialTokenOk )
-         break;
-      if ( tokenName.type_ == tokenObjectEnd  &&  name.empty() )  // empty object
-         return true;
-      if ( tokenName.type_ != tokenString )
-         break;
-      
-      name = "";
-      if ( !decodeString( tokenName, name ) )
-         return recoverFromError( tokenObjectEnd );
-
-      Token colon;
-      if ( !readToken( colon ) ||  colon.type_ != tokenMemberSeparator )
-      {
-         return addErrorAndRecover( "Missing ':' after object member name", 
-                                    colon, 
-                                    tokenObjectEnd );
-      }
-      Value &value = currentValue()[ name ];
-      nodes_.push( &value );
-      bool ok = readValue();
-      nodes_.pop();
-      if ( !ok ) // error already set
-         return recoverFromError( tokenObjectEnd );
-
-      Token comma;
-      if ( !readToken( comma )
-            ||  ( comma.type_ != tokenObjectEnd  &&  
-                  comma.type_ != tokenArraySeparator &&
-                  comma.type_ != tokenComment ) )
-      {
-         return addErrorAndRecover( "Missing ',' or '}' in object declaration", 
-                                    comma, 
-                                    tokenObjectEnd );
-      }
-      bool finalizeTokenOk = true;
-      while ( comma.type_ == tokenComment &&
-              finalizeTokenOk )
-         finalizeTokenOk = readToken( comma );
-      if ( comma.type_ == tokenObjectEnd )
-         return true;
-   }
-   return addErrorAndRecover( "Missing '}' or object member name", 
-                              tokenName, 
-                              tokenObjectEnd );
-}
-
-
-bool 
-Reader::readArray( Token &/*tokenStart*/ )
-{
-   currentValue() = Value( arrayValue );
-   skipSpaces();
-   if ( *current_ == ']' ) // empty array
-   {
-      Token endArray;
-      readToken( endArray );
-      return true;
-   }
-   int index = 0;
-   for (;;)
-   {
-      Value &value = currentValue()[ index++ ];
-      nodes_.push( &value );
-      bool ok = readValue();
-      nodes_.pop();
-      if ( !ok ) // error already set
-         return recoverFromError( tokenArrayEnd );
-
-      Token token;
-      // Accept Comment after last item in the array.
-      ok = readToken( token );
-      while ( token.type_ == tokenComment  &&  ok )
-      {
-         ok = readToken( token );
-      }
-      bool badTokenType = ( token.type_ != tokenArraySeparator  &&
-                            token.type_ != tokenArrayEnd );
-      if ( !ok  ||  badTokenType )
-      {
-         return addErrorAndRecover( "Missing ',' or ']' in array declaration", 
-                                    token, 
-                                    tokenArrayEnd );
-      }
-      if ( token.type_ == tokenArrayEnd )
-         break;
-   }
-   return true;
-}
-
-
-bool 
-Reader::decodeNumber( Token &token )
-{
-   bool isDouble = false;
-   for ( Location inspect = token.start_; inspect != token.end_; ++inspect )
-   {
-      isDouble = isDouble  
-                 ||  in( *inspect, '.', 'e', 'E', '+' )  
-                 ||  ( *inspect == '-'  &&  inspect != token.start_ );
-   }
-   if ( isDouble )
-      return decodeDouble( token );
-   // Attempts to parse the number as an integer. If the number is
-   // larger than the maximum supported value of an integer then
-   // we decode the number as a double.
-   Location current = token.start_;
-   bool isNegative = *current == '-';
-   if ( isNegative )
-      ++current;
-   Value::LargestUInt maxIntegerValue = isNegative ? Value::LargestUInt(-Value::minLargestInt) 
-                                                   : Value::maxLargestUInt;
-   Value::LargestUInt threshold = maxIntegerValue / 10;
-   Value::LargestUInt value = 0;
-   while ( current < token.end_ )
-   {
-      Char c = *current++;
-      if ( c < '0'  ||  c > '9' )
-         return addError( "'" + std::string( token.start_, token.end_ ) + "' is not a number.", token );
-      Value::UInt digit(c - '0');
-      if ( value >= threshold )
-      {
-         // We've hit or exceeded the max value divided by 10 (rounded down). If
-         // a) we've only just touched the limit, b) this is the last digit, and
-         // c) it's small enough to fit in that rounding delta, we're okay.
-         // Otherwise treat this number as a double to avoid overflow.
-         if (value > threshold ||
-             current != token.end_ ||
-             digit > maxIntegerValue % 10)
-         {
-            return decodeDouble( token );
-         }
-      }
-      value = value * 10 + digit;
-   }
-   if ( isNegative )
-      currentValue() = -Value::LargestInt( value );
-   else if ( value <= Value::LargestUInt(Value::maxInt) )
-      currentValue() = Value::LargestInt( value );
-   else
-      currentValue() = value;
-   return true;
-}
-
-
-bool 
-Reader::decodeDouble( Token &token )
-{
-   double value = 0;
-   const int bufferSize = 32;
-   int count;
-   int length = int(token.end_ - token.start_);
-
-   // Sanity check to avoid buffer overflow exploits.
-   if (length < 0) {
-      return addError( "Unable to parse token length", token );
-   }
-
-   // Avoid using a string constant for the format control string given to
-   // sscanf, as this can cause hard to debug crashes on OS X. See here for more
-   // info:
-   //
-   //     http://developer.apple.com/library/mac/#DOCUMENTATION/DeveloperTools/gcc-4.0.1/gcc/Incompatibilities.html
-   char format[] = "%lf";
-
-   if ( length <= bufferSize )
-   {
-      Char buffer[bufferSize+1];
-      memcpy( buffer, token.start_, length );
-      buffer[length] = 0;
-      count = sscanf( buffer, format, &value );
-   }
-   else
-   {
-      std::string buffer( token.start_, token.end_ );
-      count = sscanf( buffer.c_str(), format, &value );
-   }
-
-   if ( count != 1 )
-      return addError( "'" + std::string( token.start_, token.end_ ) + "' is not a number.", token );
-   currentValue() = value;
-   return true;
-}
-
-
-bool 
-Reader::decodeString( Token &token )
-{
-   std::string decoded;
-   if ( !decodeString( token, decoded ) )
-      return false;
-   currentValue() = decoded;
-   return true;
-}
-
-
-bool 
-Reader::decodeString( Token &token, std::string &decoded )
-{
-   decoded.reserve( token.end_ - token.start_ - 2 );
-   Location current = token.start_ + 1; // skip '"'
-   Location end = token.end_ - 1;      // do not include '"'
-   while ( current != end )
-   {
-      Char c = *current++;
-      if ( c == '"' )
-         break;
-      else if ( c == '\\' )
-      {
-         if ( current == end )
-            return addError( "Empty escape sequence in string", token, current );
-         Char escape = *current++;
-         switch ( escape )
-         {
-         case '"': decoded += '"'; break;
-         case '/': decoded += '/'; break;
-         case '\\': decoded += '\\'; break;
-         case 'b': decoded += '\b'; break;
-         case 'f': decoded += '\f'; break;
-         case 'n': decoded += '\n'; break;
-         case 'r': decoded += '\r'; break;
-         case 't': decoded += '\t'; break;
-         case 'u':
-            {
-               unsigned int unicode;
-               if ( !decodeUnicodeCodePoint( token, current, end, unicode ) )
-                  return false;
-               decoded += codePointToUTF8(unicode);
-            }
-            break;
-         default:
-            return addError( "Bad escape sequence in string", token, current );
-         }
-      }
-      else
-      {
-         decoded += c;
-      }
-   }
-   return true;
-}
-
-bool
-Reader::decodeUnicodeCodePoint( Token &token, 
-                                     Location &current, 
-                                     Location end, 
-                                     unsigned int &unicode )
-{
-
-   if ( !decodeUnicodeEscapeSequence( token, current, end, unicode ) )
-      return false;
-   if (unicode >= 0xD800 && unicode <= 0xDBFF)
-   {
-      // surrogate pairs
-      if (end - current < 6)
-         return addError( "additional six characters expected to parse unicode surrogate pair.", token, current );
-      unsigned int surrogatePair;
-      if (*(current++) == '\\' && *(current++)== 'u')
-      {
-         if (decodeUnicodeEscapeSequence( token, current, end, surrogatePair ))
-         {
-            unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
-         } 
-         else
-            return false;
-      } 
-      else
-         return addError( "expecting another \\u token to begin the second half of a unicode surrogate pair", token, current );
-   }
-   return true;
-}
-
-bool 
-Reader::decodeUnicodeEscapeSequence( Token &token, 
-                                     Location &current, 
-                                     Location end, 
-                                     unsigned int &unicode )
-{
-   if ( end - current < 4 )
-      return addError( "Bad unicode escape sequence in string: four digits expected.", token, current );
-   unicode = 0;
-   for ( int index =0; index < 4; ++index )
-   {
-      Char c = *current++;
-      unicode *= 16;
-      if ( c >= '0'  &&  c <= '9' )
-         unicode += c - '0';
-      else if ( c >= 'a'  &&  c <= 'f' )
-         unicode += c - 'a' + 10;
-      else if ( c >= 'A'  &&  c <= 'F' )
-         unicode += c - 'A' + 10;
-      else
-         return addError( "Bad unicode escape sequence in string: hexadecimal digit expected.", token, current );
-   }
-   return true;
-}
-
-
-bool 
-Reader::addError( const std::string &message, 
-                  Token &token,
-                  Location extra )
-{
-   ErrorInfo info;
-   info.token_ = token;
-   info.message_ = message;
-   info.extra_ = extra;
-   errors_.push_back( info );
-   return false;
-}
-
-
-bool 
-Reader::recoverFromError( TokenType skipUntilToken )
-{
-   int errorCount = int(errors_.size());
-   Token skip;
-   for (;;)
-   {
-      if ( !readToken(skip) )
-         errors_.resize( errorCount ); // discard errors caused by recovery
-      if ( skip.type_ == skipUntilToken  ||  skip.type_ == tokenEndOfStream )
-         break;
-   }
-   errors_.resize( errorCount );
-   return false;
-}
-
-
-bool 
-Reader::addErrorAndRecover( const std::string &message, 
-                            Token &token,
-                            TokenType skipUntilToken )
-{
-   addError( message, token );
-   return recoverFromError( skipUntilToken );
-}
-
-
-Value &
-Reader::currentValue()
-{
-   return *(nodes_.top());
-}
-
-
-Reader::Char 
-Reader::getNextChar()
-{
-   if ( current_ == end_ )
-      return 0;
-   return *current_++;
-}
-
-
-void 
-Reader::getLocationLineAndColumn( Location location,
-                                  int &line,
-                                  int &column ) const
-{
-   Location current = begin_;
-   Location lastLineStart = current;
-   line = 0;
-   while ( current < location  &&  current != end_ )
-   {
-      Char c = *current++;
-      if ( c == '\r' )
-      {
-         if ( *current == '\n' )
-            ++current;
-         lastLineStart = current;
-         ++line;
-      }
-      else if ( c == '\n' )
-      {
-         lastLineStart = current;
-         ++line;
-      }
-   }
-   // column & line start at 1
-   column = int(location - lastLineStart) + 1;
-   ++line;
-}
-
-
-std::string
-Reader::getLocationLineAndColumn( Location location ) const
-{
-   int line, column;
-   getLocationLineAndColumn( location, line, column );
-   char buffer[18+16+16+1];
-   sprintf( buffer, "Line %d, Column %d", line, column );
-   return buffer;
-}
-
-
-// Deprecated. Preserved for backward compatibility
-std::string 
-Reader::getFormatedErrorMessages() const
-{
-    return getFormattedErrorMessages();
-}
-
-
-std::string 
-Reader::getFormattedErrorMessages() const
-{
-   std::string formattedMessage;
-   for ( Errors::const_iterator itError = errors_.begin();
-         itError != errors_.end();
-         ++itError )
-   {
-      const ErrorInfo &error = *itError;
-      formattedMessage += "* " + getLocationLineAndColumn( error.token_.start_ ) + "\n";
-      formattedMessage += "  " + error.message_ + "\n";
-      if ( error.extra_ )
-         formattedMessage += "See " + getLocationLineAndColumn( error.extra_ ) + " for detail.\n";
-   }
-   return formattedMessage;
-}
-
-
-std::istream& operator>>( std::istream &sin, Value &root )
-{
-    Json::Reader reader;
-    bool ok = reader.parse(sin, root, true);
-    if (!ok) {
-      fprintf(
-          stderr,
-          "Error from reader: %s",
-          reader.getFormattedErrorMessages().c_str());
-
-      JSON_FAIL_MESSAGE("reader error");
-    }
-    return sin;
-}
-
-
-} // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/src/lib_json/json_value.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/src/lib_json/json_value.cpp
deleted file mode 100644
index a2a4a67..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/overrides/src/lib_json/json_value.cpp
+++ /dev/null
@@ -1,1930 +0,0 @@
-// Copyright 2011 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-#if !defined(JSON_IS_AMALGAMATION)
-# include <json/assertions.h>
-# include <json/value.h>
-# include <json/writer.h>
-# ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-#  include "json_batchallocator.h"
-# endif // #ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-#endif // if !defined(JSON_IS_AMALGAMATION)
-#include <math.h>
-#include <sstream>
-#include <utility>
-#include <stdexcept>
-#include <cstring>
-#include <cassert>
-#ifdef JSON_USE_CPPTL
-# include <cpptl/conststring.h>
-#endif
-#include <cstddef>    // size_t
-
-#define JSON_ASSERT_UNREACHABLE assert( false )
-
-namespace Json {
-
-// This is a walkaround to avoid the static initialization of Value::null.
-// kNull must be word-aligned to avoid crashing on ARM.  We use an alignment of
-// 8 (instead of 4) as a bit of future-proofing.
-#if defined(__ARMEL__)
-#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
-#else
-#define ALIGNAS(byte_alignment)
-#endif
-static const unsigned char ALIGNAS(8) kNull[sizeof(Value)] = {0};
-const Value& Value::null = reinterpret_cast<const Value&>(kNull);
-
-const Int Value::minInt = Int( ~(UInt(-1)/2) );
-const Int Value::maxInt = Int( UInt(-1)/2 );
-const UInt Value::maxUInt = UInt(-1);
-# if defined(JSON_HAS_INT64)
-const Int64 Value::minInt64 = Int64( ~(UInt64(-1)/2) );
-const Int64 Value::maxInt64 = Int64( UInt64(-1)/2 );
-const UInt64 Value::maxUInt64 = UInt64(-1);
-// The constant is hard-coded because some compiler have trouble
-// converting Value::maxUInt64 to a double correctly (AIX/xlC).
-// Assumes that UInt64 is a 64 bits integer.
-static const double maxUInt64AsDouble = 18446744073709551615.0;
-#endif // defined(JSON_HAS_INT64)
-const LargestInt Value::minLargestInt = LargestInt( ~(LargestUInt(-1)/2) );
-const LargestInt Value::maxLargestInt = LargestInt( LargestUInt(-1)/2 );
-const LargestUInt Value::maxLargestUInt = LargestUInt(-1);
-
-
-/// Unknown size marker
-static const unsigned int unknown = (unsigned)-1;
-
-#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-template <typename T, typename U>
-static inline bool InRange(double d, T min, U max) {
-   return d >= min && d <= max;
-}
-#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-static inline double integerToDouble( Json::UInt64 value )
-{
-    return static_cast<double>( Int64(value/2) ) * 2.0 + Int64(value & 1);
-}
-
-template<typename T>
-static inline double integerToDouble( T value )
-{
-    return static_cast<double>( value );
-}
-
-template <typename T, typename U>
-static inline bool InRange(double d, T min, U max) {
-   return d >= integerToDouble(min) && d <= integerToDouble(max);
-}
-#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-
-
-/** Duplicates the specified string value.
- * @param value Pointer to the string to duplicate. Must be zero-terminated if
- *              length is "unknown".
- * @param length Length of the value. if equals to unknown, then it will be
- *               computed using strlen(value).
- * @return Pointer on the duplicate instance of string.
- */
-static inline char *
-duplicateStringValue( const char *value, 
-                      unsigned int length = unknown )
-{
-   if ( length == unknown )
-      length = (unsigned int)strlen(value);
-
-   // Avoid an integer overflow in the call to malloc below by limiting length
-   // to a sane value.
-   if (length >= (unsigned)Value::maxInt)
-      length = Value::maxInt - 1;
-
-   char *newString = static_cast<char *>( malloc( length + 1 ) );
-   JSON_ASSERT_MESSAGE( newString != 0, "Failed to allocate string value buffer" );
-   memcpy( newString, value, length );
-   newString[length] = 0;
-   return newString;
-}
-
-
-/** Free the string duplicated by duplicateStringValue().
- */
-static inline void 
-releaseStringValue( char *value )
-{
-   if ( value )
-      free( value );
-}
-
-} // namespace Json
-
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// ValueInternals...
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-#if !defined(JSON_IS_AMALGAMATION)
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-#  include "json_internalarray.inl"
-#  include "json_internalmap.inl"
-# endif // JSON_VALUE_USE_INTERNAL_MAP
-
-# include "json_valueiterator.inl"
-#endif // if !defined(JSON_IS_AMALGAMATION)
-
-namespace Json {
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class Value::CommentInfo
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-
-
-Value::CommentInfo::CommentInfo()
-   : comment_( 0 )
-{
-}
-
-Value::CommentInfo::~CommentInfo()
-{
-   if ( comment_ )
-      releaseStringValue( comment_ );
-}
-
-
-void 
-Value::CommentInfo::setComment( const char *text )
-{
-   if ( comment_ )
-      releaseStringValue( comment_ );
-   JSON_ASSERT( text != 0 );
-   JSON_ASSERT_MESSAGE( text[0]=='\0' || text[0]=='/', "Comments must start with /");
-   // It seems that /**/ style comments are acceptable as well.
-   comment_ = duplicateStringValue( text );
-}
-
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class Value::CZString
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-# ifndef JSON_VALUE_USE_INTERNAL_MAP
-
-// Notes: index_ indicates if the string was allocated when
-// a string is stored.
-
-Value::CZString::CZString( ArrayIndex index )
-   : cstr_( 0 )
-   , index_( index )
-{
-}
-
-Value::CZString::CZString( const char *cstr, DuplicationPolicy allocate )
-   : cstr_( allocate == duplicate ? duplicateStringValue(cstr) 
-                                  : cstr )
-   , index_( allocate )
-{
-}
-
-Value::CZString::CZString( const CZString &other )
-: cstr_( other.index_ != noDuplication &&  other.cstr_ != 0
-                ?  duplicateStringValue( other.cstr_ )
-                : other.cstr_ )
-   , index_( other.cstr_ ? (other.index_ == noDuplication ? noDuplication : duplicate)
-                         : other.index_ )
-{
-}
-
-Value::CZString::~CZString()
-{
-   if ( cstr_  &&  index_ == duplicate )
-      releaseStringValue( const_cast<char *>( cstr_ ) );
-}
-
-void 
-Value::CZString::swap( CZString &other )
-{
-   std::swap( cstr_, other.cstr_ );
-   std::swap( index_, other.index_ );
-}
-
-Value::CZString &
-Value::CZString::operator =( const CZString &other )
-{
-   CZString temp( other );
-   swap( temp );
-   return *this;
-}
-
-bool 
-Value::CZString::operator<( const CZString &other ) const 
-{
-   if ( cstr_ )
-      return strcmp( cstr_, other.cstr_ ) < 0;
-   return index_ < other.index_;
-}
-
-bool 
-Value::CZString::operator==( const CZString &other ) const 
-{
-   if ( cstr_ )
-      return strcmp( cstr_, other.cstr_ ) == 0;
-   return index_ == other.index_;
-}
-
-
-ArrayIndex 
-Value::CZString::index() const
-{
-   return index_;
-}
-
-
-const char *
-Value::CZString::c_str() const
-{
-   return cstr_;
-}
-
-bool 
-Value::CZString::isStaticString() const
-{
-   return index_ == noDuplication;
-}
-
-#endif // ifndef JSON_VALUE_USE_INTERNAL_MAP
-
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class Value::Value
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-
-/*! \internal Default constructor initialization must be equivalent to:
- * memset( this, 0, sizeof(Value) )
- * This optimization is used in ValueInternalMap fast allocator.
- */
-Value::Value( ValueType type )
-   : type_( type )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   switch ( type )
-   {
-   case nullValue:
-      break;
-   case intValue:
-   case uintValue:
-      value_.int_ = 0;
-      break;
-   case realValue:
-      value_.real_ = 0.0;
-      break;
-   case stringValue:
-      value_.string_ = 0;
-      break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      value_.map_ = new ObjectValues();
-      break;
-#else
-   case arrayValue:
-      value_.array_ = arrayAllocator()->newArray();
-      break;
-   case objectValue:
-      value_.map_ = mapAllocator()->newMap();
-      break;
-#endif
-   case booleanValue:
-      value_.bool_ = false;
-      break;
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-}
-
-
-Value::Value( UInt value )
-   : type_( uintValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.uint_ = value;
-}
-
-Value::Value( Int value )
-   : type_( intValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.int_ = value;
-}
-
-
-# if defined(JSON_HAS_INT64)
-Value::Value( Int64 value )
-   : type_( intValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.int_ = value;
-}
-
-
-Value::Value( UInt64 value )
-   : type_( uintValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.uint_ = value;
-}
-#endif // defined(JSON_HAS_INT64)
-
-Value::Value( double value )
-   : type_( realValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.real_ = value;
-}
-
-Value::Value( const char *value )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( value );
-}
-
-
-Value::Value( const char *beginValue, 
-              const char *endValue )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( beginValue, 
-                                          (unsigned int)(endValue - beginValue) );
-}
-
-
-Value::Value( const std::string &value )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( value.c_str(), 
-                                          (unsigned int)value.length() );
-
-}
-
-Value::Value( const StaticString &value )
-   : type_( stringValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = const_cast<char *>( value.c_str() );
-}
-
-
-# ifdef JSON_USE_CPPTL
-Value::Value( const CppTL::ConstString &value )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( value, value.length() );
-}
-# endif
-
-Value::Value( bool value )
-   : type_( booleanValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.bool_ = value;
-}
-
-
-Value::Value( const Value &other )
-   : type_( other.type_ )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   switch ( type_ )
-   {
-   case nullValue:
-   case intValue:
-   case uintValue:
-   case realValue:
-   case booleanValue:
-      value_ = other.value_;
-      break;
-   case stringValue:
-      if ( other.value_.string_ )
-      {
-         value_.string_ = duplicateStringValue( other.value_.string_ );
-         allocated_ = true;
-      }
-      else
-         value_.string_ = 0;
-      break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      value_.map_ = new ObjectValues( *other.value_.map_ );
-      break;
-#else
-   case arrayValue:
-      value_.array_ = arrayAllocator()->newArrayCopy( *other.value_.array_ );
-      break;
-   case objectValue:
-      value_.map_ = mapAllocator()->newMapCopy( *other.value_.map_ );
-      break;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-   if ( other.comments_ )
-   {
-      comments_ = new CommentInfo[numberOfCommentPlacement];
-      for ( int comment =0; comment < numberOfCommentPlacement; ++comment )
-      {
-         const CommentInfo &otherComment = other.comments_[comment];
-         if ( otherComment.comment_ )
-            comments_[comment].setComment( otherComment.comment_ );
-      }
-   }
-}
-
-
-Value::~Value()
-{
-   switch ( type_ )
-   {
-   case nullValue:
-   case intValue:
-   case uintValue:
-   case realValue:
-   case booleanValue:
-      break;
-   case stringValue:
-      if ( allocated_ )
-         releaseStringValue( value_.string_ );
-      break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      delete value_.map_;
-      break;
-#else
-   case arrayValue:
-      arrayAllocator()->destructArray( value_.array_ );
-      break;
-   case objectValue:
-      mapAllocator()->destructMap( value_.map_ );
-      break;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-
-   if ( comments_ )
-      delete[] comments_;
-}
-
-Value &
-Value::operator=( const Value &other )
-{
-   Value temp( other );
-   swap( temp );
-   return *this;
-}
-
-void 
-Value::swap( Value &other )
-{
-   ValueType temp = type_;
-   type_ = other.type_;
-   other.type_ = temp;
-   std::swap( value_, other.value_ );
-   int temp2 = allocated_;
-   allocated_ = other.allocated_;
-   other.allocated_ = temp2;
-}
-
-ValueType 
-Value::type() const
-{
-   return type_;
-}
-
-
-int 
-Value::compare( const Value &other ) const
-{
-   if ( *this < other )
-      return -1;
-   if ( *this > other )
-      return 1;
-   return 0;
-}
-
-
-bool 
-Value::operator <( const Value &other ) const
-{
-   int typeDelta = type_ - other.type_;
-   if ( typeDelta )
-      return typeDelta < 0 ? true : false;
-   switch ( type_ )
-   {
-   case nullValue:
-      return false;
-   case intValue:
-      return value_.int_ < other.value_.int_;
-   case uintValue:
-      return value_.uint_ < other.value_.uint_;
-   case realValue:
-      return value_.real_ < other.value_.real_;
-   case booleanValue:
-      return value_.bool_ < other.value_.bool_;
-   case stringValue:
-      return ( value_.string_ == 0  &&  other.value_.string_ )
-             || ( other.value_.string_  
-                  &&  value_.string_  
-                  && strcmp( value_.string_, other.value_.string_ ) < 0 );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      {
-         int delta = int( value_.map_->size() - other.value_.map_->size() );
-         if ( delta )
-            return delta < 0;
-         return (*value_.map_) < (*other.value_.map_);
-      }
-#else
-   case arrayValue:
-      return value_.array_->compare( *(other.value_.array_) ) < 0;
-   case objectValue:
-      return value_.map_->compare( *(other.value_.map_) ) < 0;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-   return false;  // unreachable
-}
-
-bool 
-Value::operator <=( const Value &other ) const
-{
-   return !(other < *this);
-}
-
-bool 
-Value::operator >=( const Value &other ) const
-{
-   return !(*this < other);
-}
-
-bool 
-Value::operator >( const Value &other ) const
-{
-   return other < *this;
-}
-
-bool 
-Value::operator ==( const Value &other ) const
-{
-   //if ( type_ != other.type_ )
-   // GCC 2.95.3 says:
-   // attempt to take address of bit-field structure member `Json::Value::type_'
-   // Beats me, but a temp solves the problem.
-   int temp = other.type_;
-   if ( type_ != temp )
-      return false;
-   switch ( type_ )
-   {
-   case nullValue:
-      return true;
-   case intValue:
-      return value_.int_ == other.value_.int_;
-   case uintValue:
-      return value_.uint_ == other.value_.uint_;
-   case realValue:
-      return value_.real_ == other.value_.real_;
-   case booleanValue:
-      return value_.bool_ == other.value_.bool_;
-   case stringValue:
-      return ( value_.string_ == other.value_.string_ )
-             || ( other.value_.string_  
-                  &&  value_.string_  
-                  && strcmp( value_.string_, other.value_.string_ ) == 0 );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      return value_.map_->size() == other.value_.map_->size()
-             && (*value_.map_) == (*other.value_.map_);
-#else
-   case arrayValue:
-      return value_.array_->compare( *(other.value_.array_) ) == 0;
-   case objectValue:
-      return value_.map_->compare( *(other.value_.map_) ) == 0;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-   return false;  // unreachable
-}
-
-bool 
-Value::operator !=( const Value &other ) const
-{
-   return !( *this == other );
-}
-
-const char *
-Value::asCString() const
-{
-   JSON_ASSERT( type_ == stringValue );
-   return value_.string_;
-}
-
-
-std::string 
-Value::asString() const
-{
-   switch ( type_ )
-   {
-   case nullValue:
-      return "";
-   case stringValue:
-      return value_.string_ ? value_.string_ : "";
-   case booleanValue:
-      return value_.bool_ ? "true" : "false";
-   case intValue:
-      return valueToString( value_.int_ );
-   case uintValue:
-      return valueToString( value_.uint_ );
-   case realValue:
-      return valueToString( value_.real_ );
-   default:
-      JSON_FAIL_MESSAGE( "Type is not convertible to string" );
-   }
-}
-
-# ifdef JSON_USE_CPPTL
-CppTL::ConstString 
-Value::asConstString() const
-{
-   return CppTL::ConstString( asString().c_str() );
-}
-# endif
-
-
-Value::Int 
-Value::asInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      JSON_ASSERT_MESSAGE(isInt(), "LargestInt out of Int range");
-      return Int(value_.int_);
-   case uintValue:
-      JSON_ASSERT_MESSAGE(isInt(), "LargestUInt out of Int range");
-      return Int(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt, maxInt), "double out of Int range");
-      return Int(value_.real_);
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to Int.");
-}
-
-
-Value::UInt 
-Value::asUInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      JSON_ASSERT_MESSAGE(isUInt(), "LargestInt out of UInt range");
-      return UInt(value_.int_);
-   case uintValue:
-      JSON_ASSERT_MESSAGE(isUInt(), "LargestUInt out of UInt range");
-      return UInt(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt), "double out of UInt range");
-      return UInt( value_.real_ );
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to UInt.");
-}
-
-
-# if defined(JSON_HAS_INT64)
-
-Value::Int64
-Value::asInt64() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return Int64(value_.int_);
-   case uintValue:
-      JSON_ASSERT_MESSAGE(isInt64(), "LargestUInt out of Int64 range");
-      return Int64(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt64, maxInt64), "double out of Int64 range");
-      return Int64(value_.real_);
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to Int64.");
-}
-
-
-Value::UInt64
-Value::asUInt64() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      JSON_ASSERT_MESSAGE(isUInt64(), "LargestInt out of UInt64 range");
-      return UInt64(value_.int_);
-   case uintValue:
-      return UInt64(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt64), "double out of UInt64 range");
-      return UInt64( value_.real_ );
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to UInt64.");
-}
-# endif // if defined(JSON_HAS_INT64)
-
-
-LargestInt 
-Value::asLargestInt() const
-{
-#if defined(JSON_NO_INT64)
-    return asInt();
-#else
-    return asInt64();
-#endif
-}
-
-
-LargestUInt 
-Value::asLargestUInt() const
-{
-#if defined(JSON_NO_INT64)
-    return asUInt();
-#else
-    return asUInt64();
-#endif
-}
-
-
-double 
-Value::asDouble() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return static_cast<double>( value_.int_ );
-   case uintValue:
-#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return static_cast<double>( value_.uint_ );
-#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return integerToDouble( value_.uint_ );
-#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-   case realValue:
-      return value_.real_;
-   case nullValue:
-      return 0.0;
-   case booleanValue:
-      return value_.bool_ ? 1.0 : 0.0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to double.");
-}
-
-float
-Value::asFloat() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return static_cast<float>( value_.int_ );
-   case uintValue:
-#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return static_cast<float>( value_.uint_ );
-#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return integerToDouble( value_.uint_ );
-#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-   case realValue:
-      return static_cast<float>( value_.real_ );
-   case nullValue:
-      return 0.0;
-   case booleanValue:
-      return value_.bool_ ? 1.0f : 0.0f;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to float.");
-}
-
-bool 
-Value::asBool() const
-{
-   switch ( type_ )
-   {
-   case booleanValue:
-      return value_.bool_;
-   case nullValue:
-      return false;
-   case intValue:
-      return value_.int_ ? true : false;
-   case uintValue:
-      return value_.uint_ ? true : false;
-   case realValue:
-      return value_.real_ ? true : false;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to bool.");
-}
-
-
-bool 
-Value::isConvertibleTo( ValueType other ) const
-{
-   switch ( other )
-   {
-   case nullValue:
-      return ( isNumeric() && asDouble() == 0.0 )
-             || ( type_ == booleanValue && value_.bool_ == false )
-             || ( type_ == stringValue && asString() == "" )
-             || ( type_ == arrayValue && value_.map_->size() == 0 )
-             || ( type_ == objectValue && value_.map_->size() == 0 )
-             || type_ == nullValue;
-   case intValue:
-      return isInt()
-             || (type_ == realValue && InRange(value_.real_, minInt, maxInt))
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case uintValue:
-      return isUInt()
-             || (type_ == realValue && InRange(value_.real_, 0, maxUInt))
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case realValue:
-      return isNumeric()
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case booleanValue:
-      return isNumeric()
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case stringValue:
-      return isNumeric()
-             || type_ == booleanValue
-             || type_ == stringValue
-             || type_ == nullValue;
-   case arrayValue:
-      return type_ == arrayValue
-             || type_ == nullValue;
-   case objectValue:
-      return type_ == objectValue
-             || type_ == nullValue;
-   }
-   JSON_ASSERT_UNREACHABLE;
-   return false;
-}
-
-
-/// Number of values in array or object
-ArrayIndex 
-Value::size() const
-{
-   switch ( type_ )
-   {
-   case nullValue:
-   case intValue:
-   case uintValue:
-   case realValue:
-   case booleanValue:
-   case stringValue:
-      return 0;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:  // size of the array is highest index + 1
-      if ( !value_.map_->empty() )
-      {
-         ObjectValues::const_iterator itLast = value_.map_->end();
-         --itLast;
-         return (*itLast).first.index()+1;
-      }
-      return 0;
-   case objectValue:
-      return ArrayIndex( value_.map_->size() );
-#else
-   case arrayValue:
-      return Int( value_.array_->size() );
-   case objectValue:
-      return Int( value_.map_->size() );
-#endif
-   }
-   JSON_ASSERT_UNREACHABLE;
-   return 0; // unreachable;
-}
-
-
-bool 
-Value::empty() const
-{
-   if ( isNull() || isArray() || isObject() )
-      return size() == 0u;
-   else
-      return false;
-}
-
-
-bool
-Value::operator!() const
-{
-   return isNull();
-}
-
-
-void 
-Value::clear()
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue  || type_ == objectValue );
-
-   switch ( type_ )
-   {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      value_.map_->clear();
-      break;
-#else
-   case arrayValue:
-      value_.array_->clear();
-      break;
-   case objectValue:
-      value_.map_->clear();
-      break;
-#endif
-   default:
-      break;
-   }
-}
-
-void 
-Value::resize( ArrayIndex newSize )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue );
-   if ( type_ == nullValue )
-      *this = Value( arrayValue );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   ArrayIndex oldSize = size();
-   if ( newSize == 0 )
-      clear();
-   else if ( newSize > oldSize )
-      (*this)[ newSize - 1 ];
-   else
-   {
-      for ( ArrayIndex index = newSize; index < oldSize; ++index )
-      {
-         value_.map_->erase( index );
-      }
-      assert( size() == newSize );
-   }
-#else
-   value_.array_->resize( newSize );
-#endif
-}
-
-
-Value &
-Value::operator[]( ArrayIndex index )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue );
-   if ( type_ == nullValue )
-      *this = Value( arrayValue );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString key( index );
-   ObjectValues::iterator it = value_.map_->lower_bound( key );
-   if ( it != value_.map_->end()  &&  (*it).first == key )
-      return (*it).second;
-
-   ObjectValues::value_type defaultValue( key, null );
-   it = value_.map_->insert( it, defaultValue );
-   return (*it).second;
-#else
-   return value_.array_->resolveReference( index );
-#endif
-}
-
-
-Value &
-Value::operator[]( int index )
-{
-   JSON_ASSERT( index >= 0 );
-   return (*this)[ ArrayIndex(index) ];
-}
-
-
-const Value &
-Value::operator[]( ArrayIndex index ) const
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue );
-   if ( type_ == nullValue )
-      return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString key( index );
-   ObjectValues::const_iterator it = value_.map_->find( key );
-   if ( it == value_.map_->end() )
-      return null;
-   return (*it).second;
-#else
-   Value *value = value_.array_->find( index );
-   return value ? *value : null;
-#endif
-}
-
-
-const Value &
-Value::operator[]( int index ) const
-{
-   JSON_ASSERT( index >= 0 );
-   return (*this)[ ArrayIndex(index) ];
-}
-
-
-Value &
-Value::operator[]( const char *key )
-{
-   return resolveReference( key, false );
-}
-
-
-Value &
-Value::resolveReference( const char *key, 
-                         bool isStatic )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-      *this = Value( objectValue );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString actualKey( key, isStatic ? CZString::noDuplication 
-                                     : CZString::duplicateOnCopy );
-   ObjectValues::iterator it = value_.map_->lower_bound( actualKey );
-   if ( it != value_.map_->end()  &&  (*it).first == actualKey )
-      return (*it).second;
-
-   ObjectValues::value_type defaultValue( actualKey, null );
-   it = value_.map_->insert( it, defaultValue );
-   Value &value = (*it).second;
-   return value;
-#else
-   return value_.map_->resolveReference( key, isStatic );
-#endif
-}
-
-
-Value 
-Value::get( ArrayIndex index, 
-            const Value &defaultValue ) const
-{
-   const Value *value = &((*this)[index]);
-   return value == &null ? defaultValue : *value;
-}
-
-
-bool 
-Value::isValidIndex( ArrayIndex index ) const
-{
-   return index < size();
-}
-
-
-
-const Value &
-Value::operator[]( const char *key ) const
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-      return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString actualKey( key, CZString::noDuplication );
-   ObjectValues::const_iterator it = value_.map_->find( actualKey );
-   if ( it == value_.map_->end() )
-      return null;
-   return (*it).second;
-#else
-   const Value *value = value_.map_->find( key );
-   return value ? *value : null;
-#endif
-}
-
-
-Value &
-Value::operator[]( const std::string &key )
-{
-   return (*this)[ key.c_str() ];
-}
-
-
-const Value &
-Value::operator[]( const std::string &key ) const
-{
-   return (*this)[ key.c_str() ];
-}
-
-Value &
-Value::operator[]( const StaticString &key )
-{
-   return resolveReference( key, true );
-}
-
-
-# ifdef JSON_USE_CPPTL
-Value &
-Value::operator[]( const CppTL::ConstString &key )
-{
-   return (*this)[ key.c_str() ];
-}
-
-
-const Value &
-Value::operator[]( const CppTL::ConstString &key ) const
-{
-   return (*this)[ key.c_str() ];
-}
-# endif
-
-
-Value &
-Value::append( const Value &value )
-{
-   return (*this)[size()] = value;
-}
-
-
-Value 
-Value::get( const char *key, 
-            const Value &defaultValue ) const
-{
-   const Value *value = &((*this)[key]);
-   return value == &null ? defaultValue : *value;
-}
-
-
-Value 
-Value::get( const std::string &key,
-            const Value &defaultValue ) const
-{
-   return get( key.c_str(), defaultValue );
-}
-
-Value
-Value::removeMember( const char* key )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-      return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString actualKey( key, CZString::noDuplication );
-   ObjectValues::iterator it = value_.map_->find( actualKey );
-   if ( it == value_.map_->end() )
-      return null;
-   Value old(it->second);
-   value_.map_->erase(it);
-   return old;
-#else
-   Value *value = value_.map_->find( key );
-   if (value){
-      Value old(*value);
-      value_.map_.remove( key );
-      return old;
-   } else {
-      return null;
-   }
-#endif
-}
-
-Value
-Value::removeMember( const std::string &key )
-{
-   return removeMember( key.c_str() );
-}
-
-# ifdef JSON_USE_CPPTL
-Value 
-Value::get( const CppTL::ConstString &key,
-            const Value &defaultValue ) const
-{
-   return get( key.c_str(), defaultValue );
-}
-# endif
-
-bool 
-Value::isMember( const char *key ) const
-{
-   const Value *value = &((*this)[key]);
-   return value != &null;
-}
-
-
-bool 
-Value::isMember( const std::string &key ) const
-{
-   return isMember( key.c_str() );
-}
-
-
-# ifdef JSON_USE_CPPTL
-bool 
-Value::isMember( const CppTL::ConstString &key ) const
-{
-   return isMember( key.c_str() );
-}
-#endif
-
-Value::Members 
-Value::getMemberNames() const
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-       return Value::Members();
-   Members members;
-   members.reserve( value_.map_->size() );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   ObjectValues::const_iterator it = value_.map_->begin();
-   ObjectValues::const_iterator itEnd = value_.map_->end();
-   for ( ; it != itEnd; ++it )
-      members.push_back( std::string( (*it).first.c_str() ) );
-#else
-   ValueInternalMap::IteratorState it;
-   ValueInternalMap::IteratorState itEnd;
-   value_.map_->makeBeginIterator( it );
-   value_.map_->makeEndIterator( itEnd );
-   for ( ; !ValueInternalMap::equals( it, itEnd ); ValueInternalMap::increment(it) )
-      members.push_back( std::string( ValueInternalMap::key( it ) ) );
-#endif
-   return members;
-}
-//
-//# ifdef JSON_USE_CPPTL
-//EnumMemberNames
-//Value::enumMemberNames() const
-//{
-//   if ( type_ == objectValue )
-//   {
-//      return CppTL::Enum::any(  CppTL::Enum::transform(
-//         CppTL::Enum::keys( *(value_.map_), CppTL::Type<const CZString &>() ),
-//         MemberNamesTransform() ) );
-//   }
-//   return EnumMemberNames();
-//}
-//
-//
-//EnumValues 
-//Value::enumValues() const
-//{
-//   if ( type_ == objectValue  ||  type_ == arrayValue )
-//      return CppTL::Enum::anyValues( *(value_.map_), 
-//                                     CppTL::Type<const Value &>() );
-//   return EnumValues();
-//}
-//
-//# endif
-
-static bool IsIntegral(double d) {
-  double integral_part;
-  return modf(d, &integral_part) == 0.0;
-}
-
-
-bool
-Value::isNull() const
-{
-   return type_ == nullValue;
-}
-
-
-bool 
-Value::isBool() const
-{
-   return type_ == booleanValue;
-}
-
-
-bool 
-Value::isInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return value_.int_ >= minInt  &&  value_.int_ <= maxInt;
-   case uintValue:
-      return value_.uint_ <= UInt(maxInt);
-   case realValue:
-      return value_.real_ >= minInt &&
-             value_.real_ <= maxInt &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-   return false;
-}
-
-
-bool 
-Value::isUInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return value_.int_ >= 0 && LargestUInt(value_.int_) <= LargestUInt(maxUInt);
-   case uintValue:
-      return value_.uint_ <= maxUInt;
-   case realValue:
-      return value_.real_ >= 0 &&
-             value_.real_ <= maxUInt &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-   return false;
-}
-
-bool 
-Value::isInt64() const
-{
-# if defined(JSON_HAS_INT64)
-   switch ( type_ )
-   {
-   case intValue:
-     return true;
-   case uintValue:
-      return value_.uint_ <= UInt64(maxInt64);
-   case realValue:
-      // Note that maxInt64 (= 2^63 - 1) is not exactly representable as a
-      // double, so double(maxInt64) will be rounded up to 2^63. Therefore we
-      // require the value to be strictly less than the limit.
-      return value_.real_ >= double(minInt64) &&
-             value_.real_ < double(maxInt64) &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-# endif  // JSON_HAS_INT64
-   return false;
-}
-
-bool 
-Value::isUInt64() const
-{
-# if defined(JSON_HAS_INT64)
-   switch ( type_ )
-   {
-   case intValue:
-     return value_.int_ >= 0;
-   case uintValue:
-      return true;
-   case realValue:
-      // Note that maxUInt64 (= 2^64 - 1) is not exactly representable as a
-      // double, so double(maxUInt64) will be rounded up to 2^64. Therefore we
-      // require the value to be strictly less than the limit.
-      return value_.real_ >= 0 &&
-             value_.real_ < maxUInt64AsDouble &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-# endif  // JSON_HAS_INT64
-   return false;
-}
-
-
-bool 
-Value::isIntegral() const
-{
-#if defined(JSON_HAS_INT64)
-  return isInt64() || isUInt64();
-#else
-  return isInt() || isUInt();
-#endif
-}
-
-
-bool 
-Value::isDouble() const
-{
-   return type_ == realValue || isIntegral();
-}
-
-
-bool 
-Value::isNumeric() const
-{
-   return isIntegral() || isDouble();
-}
-
-
-bool 
-Value::isString() const
-{
-   return type_ == stringValue;
-}
-
-
-bool 
-Value::isArray() const
-{
-   return type_ == arrayValue;
-}
-
-
-bool 
-Value::isObject() const
-{
-   return type_ == objectValue;
-}
-
-
-void 
-Value::setComment( const char *comment,
-                   CommentPlacement placement )
-{
-   if ( !comments_ )
-      comments_ = new CommentInfo[numberOfCommentPlacement];
-   comments_[placement].setComment( comment );
-}
-
-
-void 
-Value::setComment( const std::string &comment,
-                   CommentPlacement placement )
-{
-   setComment( comment.c_str(), placement );
-}
-
-
-bool 
-Value::hasComment( CommentPlacement placement ) const
-{
-   return comments_ != 0  &&  comments_[placement].comment_ != 0;
-}
-
-std::string 
-Value::getComment( CommentPlacement placement ) const
-{
-   if ( hasComment(placement) )
-      return comments_[placement].comment_;
-   return "";
-}
-
-
-std::string 
-Value::toStyledString() const
-{
-   StyledWriter writer;
-   return writer.write( *this );
-}
-
-
-Value::const_iterator 
-Value::begin() const
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeBeginIterator( it );
-         return const_iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeBeginIterator( it );
-         return const_iterator( it );
-      }
-      break;
-#else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return const_iterator( value_.map_->begin() );
-      break;
-#endif
-   default:
-      break;
-   }
-   return const_iterator();
-}
-
-Value::const_iterator 
-Value::end() const
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeEndIterator( it );
-         return const_iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeEndIterator( it );
-         return const_iterator( it );
-      }
-      break;
-#else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return const_iterator( value_.map_->end() );
-      break;
-#endif
-   default:
-      break;
-   }
-   return const_iterator();
-}
-
-
-Value::iterator 
-Value::begin()
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeBeginIterator( it );
-         return iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeBeginIterator( it );
-         return iterator( it );
-      }
-      break;
-#else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return iterator( value_.map_->begin() );
-      break;
-#endif
-   default:
-      break;
-   }
-   return iterator();
-}
-
-Value::iterator 
-Value::end()
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeEndIterator( it );
-         return iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeEndIterator( it );
-         return iterator( it );
-      }
-      break;
-#else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return iterator( value_.map_->end() );
-      break;
-#endif
-   default:
-      break;
-   }
-   return iterator();
-}
-
-
-// class PathArgument
-// //////////////////////////////////////////////////////////////////
-
-PathArgument::PathArgument()
-   : key_()
-   , index_()
-   , kind_( kindNone )
-{
-}
-
-
-PathArgument::PathArgument( ArrayIndex index )
-   : key_()
-   , index_( index )
-   , kind_( kindIndex )
-{
-}
-
-
-PathArgument::PathArgument( const char *key )
-   : key_( key )
-   , index_()
-   , kind_( kindKey )
-{
-}
-
-
-PathArgument::PathArgument( const std::string &key )
-   : key_( key.c_str() )
-   , index_()
-   , kind_( kindKey )
-{
-}
-
-// class Path
-// //////////////////////////////////////////////////////////////////
-
-Path::Path( const std::string &path,
-            const PathArgument &a1,
-            const PathArgument &a2,
-            const PathArgument &a3,
-            const PathArgument &a4,
-            const PathArgument &a5 )
-{
-   InArgs in;
-   in.push_back( &a1 );
-   in.push_back( &a2 );
-   in.push_back( &a3 );
-   in.push_back( &a4 );
-   in.push_back( &a5 );
-   makePath( path, in );
-}
-
-
-void 
-Path::makePath( const std::string &path,
-                const InArgs &in )
-{
-   const char *current = path.c_str();
-   const char *end = current + path.length();
-   InArgs::const_iterator itInArg = in.begin();
-   while ( current != end )
-   {
-      if ( *current == '[' )
-      {
-         ++current;
-         if ( *current == '%' )
-            addPathInArg( path, in, itInArg, PathArgument::kindIndex );
-         else
-         {
-            ArrayIndex index = 0;
-            for ( ; current != end && *current >= '0'  &&  *current <= '9'; ++current )
-               index = index * 10 + ArrayIndex(*current - '0');
-            args_.push_back( index );
-         }
-         if ( current == end  ||  *current++ != ']' )
-            invalidPath( path, int(current - path.c_str()) );
-      }
-      else if ( *current == '%' )
-      {
-         addPathInArg( path, in, itInArg, PathArgument::kindKey );
-         ++current;
-      }
-      else if ( *current == '.' )
-      {
-         ++current;
-      }
-      else
-      {
-         const char *beginName = current;
-         while ( current != end  &&  !strchr( "[.", *current ) )
-            ++current;
-         args_.push_back( std::string( beginName, current ) );
-      }
-   }
-}
-
-
-void 
-Path::addPathInArg( const std::string &path, 
-                    const InArgs &in, 
-                    InArgs::const_iterator &itInArg, 
-                    PathArgument::Kind kind )
-{
-   if ( itInArg == in.end() )
-   {
-      // Error: missing argument %d
-   }
-   else if ( (*itInArg)->kind_ != kind )
-   {
-      // Error: bad argument type
-   }
-   else
-   {
-      args_.push_back( **itInArg );
-   }
-}
-
-
-void 
-Path::invalidPath( const std::string &path, 
-                   int location )
-{
-   // Error: invalid path.
-}
-
-
-const Value &
-Path::resolve( const Value &root ) const
-{
-   const Value *node = &root;
-   for ( Args::const_iterator it = args_.begin(); it != args_.end(); ++it )
-   {
-      const PathArgument &arg = *it;
-      if ( arg.kind_ == PathArgument::kindIndex )
-      {
-         if ( !node->isArray()  ||  !node->isValidIndex( arg.index_ ) )
-         {
-            // Error: unable to resolve path (array value expected at position...
-         }
-         node = &((*node)[arg.index_]);
-      }
-      else if ( arg.kind_ == PathArgument::kindKey )
-      {
-         if ( !node->isObject() )
-         {
-            // Error: unable to resolve path (object value expected at position...)
-         }
-         node = &((*node)[arg.key_]);
-         if ( node == &Value::null )
-         {
-            // Error: unable to resolve path (object has no member named '' at position...)
-         }
-      }
-   }
-   return *node;
-}
-
-
-Value 
-Path::resolve( const Value &root, 
-               const Value &defaultValue ) const
-{
-   const Value *node = &root;
-   for ( Args::const_iterator it = args_.begin(); it != args_.end(); ++it )
-   {
-      const PathArgument &arg = *it;
-      if ( arg.kind_ == PathArgument::kindIndex )
-      {
-         if ( !node->isArray()  ||  !node->isValidIndex( arg.index_ ) )
-            return defaultValue;
-         node = &((*node)[arg.index_]);
-      }
-      else if ( arg.kind_ == PathArgument::kindKey )
-      {
-         if ( !node->isObject() )
-            return defaultValue;
-         node = &((*node)[arg.key_]);
-         if ( node == &Value::null )
-            return defaultValue;
-      }
-   }
-   return *node;
-}
-
-
-Value &
-Path::make( Value &root ) const
-{
-   Value *node = &root;
-   for ( Args::const_iterator it = args_.begin(); it != args_.end(); ++it )
-   {
-      const PathArgument &arg = *it;
-      if ( arg.kind_ == PathArgument::kindIndex )
-      {
-         if ( !node->isArray() )
-         {
-            // Error: node is not an array at position ...
-         }
-         node = &((*node)[arg.index_]);
-      }
-      else if ( arg.kind_ == PathArgument::kindKey )
-      {
-         if ( !node->isObject() )
-         {
-            // Error: node is not an object at position...
-         }
-         node = &((*node)[arg.key_]);
-      }
-   }
-   return *node;
-}
-
-
-} // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/patches/value.h.diff b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/patches/value.h.diff
deleted file mode 100644
index bce57b2..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/patches/value.h.diff
+++ /dev/null
@@ -1,37 +0,0 @@
-diff --git "a/source\\include\\json\\value.h" "b/overrides\\include\\json\\value.h"
-index b013c9b..5707260 100644
---- "a/source\\include\\json\\value.h"
-+++ "b/overrides\\include\\json\\value.h"
-@@ -7,7 +7,7 @@
- # define CPPTL_JSON_H_INCLUDED
- 
- #if !defined(JSON_IS_AMALGAMATION)
--# include "forwards.h"
-+# include "third_party/jsoncpp/source/include/json/forwards.h"
- #endif // if !defined(JSON_IS_AMALGAMATION)
- # include <string>
- # include <vector>
-@@ -136,7 +136,7 @@ namespace Json {
-       typedef Json::LargestUInt LargestUInt;
-       typedef Json::ArrayIndex ArrayIndex;
- 
--      static const Value null;
-+      static const Value& null;
-       /// Minimum signed integer value that can be stored in a Json::Value.
-       static const LargestInt minLargestInt;
-       /// Maximum signed integer value that can be stored in a Json::Value.
-@@ -496,10 +496,12 @@ namespace Json {
- # endif
-       } value_;
-       ValueType type_ : 8;
--      int allocated_ : 1;     // Notes: if declared as bool, bitfield is useless.
-+      // One-bit bitfields must be unsigned to allow storing 1.
-+      // They must be 32-bits to share storage with ValueHolder.
-+      unsigned int allocated_ : 1;
- # ifdef JSON_VALUE_USE_INTERNAL_MAP
-       unsigned int itemIsUsed_ : 1;      // used by the ValueInternalMap container.
--      int memberNameIsStatic_ : 1;       // used by the ValueInternalMap container.
-+      unsigned int memberNameIsStatic_ : 1; // used by the ValueInternalMap container.
- # endif
-       CommentInfo *comments_;
-    };
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.clang-format b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.clang-format
new file mode 100644
index 0000000..2a372fc
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.clang-format
@@ -0,0 +1,47 @@
+---
+# BasedOnStyle:  LLVM
+AccessModifierOffset: -2
+ConstructorInitializerIndentWidth: 4
+AlignEscapedNewlinesLeft: false
+AlignTrailingComments: true
+AllowAllParametersOfDeclarationOnNextLine: true
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakTemplateDeclarations: false
+AlwaysBreakBeforeMultilineStrings: false
+BreakBeforeBinaryOperators: false
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BinPackParameters: false
+ColumnLimit:     80
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+DerivePointerBinding: false
+ExperimentalAutoDetectBinPacking: false
+IndentCaseLabels: false
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakBeforeFirstCallParameter: 19
+PenaltyBreakComment: 60
+PenaltyBreakString: 1000
+PenaltyBreakFirstLessLess: 120
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 60
+PointerBindsToType: true
+SpacesBeforeTrailingComments: 1
+Cpp11BracedListStyle: true
+Standard:        Cpp11
+IndentWidth:     2
+TabWidth:        8
+UseTab:          Never
+BreakBeforeBraces: Attach
+IndentFunctionDeclarationAfterType: false
+SpacesInParentheses: false
+SpacesInAngles:  false
+SpaceInEmptyParentheses: false
+SpacesInCStyleCastParentheses: false
+SpaceAfterControlStatementKeyword: true
+SpaceBeforeAssignmentOperators: true
+ContinuationIndentWidth: 4
+...
+
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.gitattributes b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.gitattributes
new file mode 100644
index 0000000..22d2b7a
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.gitattributes
@@ -0,0 +1,11 @@
+*           text=auto
+*.h         text
+*.cpp       text
+*.json      text
+*.in        text
+*.sh        eol=lf
+*.bat       eol=crlf
+*.vcproj    eol=crlf
+*.vcxproj   eol=crlf
+*.sln       eol=crlf
+devtools/agent_vm* eol=crlf
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.github/ISSUE_TEMPLATE/bug_report.md b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..3547709
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,26 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1.
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Meson version
+ - Ninja version
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.github/ISSUE_TEMPLATE/feature_request.md b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..bbcbbe7
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.gitignore b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.gitignore
new file mode 100644
index 0000000..7420697
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.gitignore
@@ -0,0 +1,55 @@
+/build/
+/build-*/
+*.pyc
+*.swp
+*.actual
+*.actual-rewrite
+*.process-output
+*.rewrite
+/bin/
+/libs/
+/doc/doxyfile
+/dist/
+#/version
+#/include/json/version.h
+
+# MSVC project files:
+*.sln
+*.vcxproj
+*.filters
+*.user
+*.sdf
+*.opensdf
+*.suo
+
+# MSVC build files:
+*.lib
+*.obj
+*.tlog/
+*.pdb
+
+# CMake-generated files:
+CMakeFiles/
+*.cmake
+/pkg-config/jsoncpp.pc
+jsoncpp_lib_static.dir/
+
+# In case someone runs cmake in the root-dir:
+/CMakeCache.txt
+/Makefile
+/include/Makefile
+/src/Makefile
+/src/jsontestrunner/Makefile
+/src/jsontestrunner/jsontestrunner_exe
+/src/lib_json/Makefile
+/src/test_lib_json/Makefile
+/src/test_lib_json/jsoncpp_test
+*.a
+
+# eclipse project files
+.project
+.cproject
+/.settings/
+
+# DS_Store
+.DS_Store
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis.yml b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis.yml
new file mode 100644
index 0000000..b649b46
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis.yml
@@ -0,0 +1,65 @@
+# Build matrix / environment variables are explained on:
+# http://about.travis-ci.org/docs/user/build-configuration/
+# This file can be validated on: http://www.yamllint.com/
+# Or using the Ruby based travel command line tool:
+# gem install travis --no-rdoc --no-ri
+# travis lint .travis.yml
+language: cpp
+sudo: false
+addons:
+  homebrew:
+    packages:
+    - meson
+    - ninja
+    update: false # do not update homebrew by default
+  apt:
+    sources:
+    - ubuntu-toolchain-r-test
+    - llvm-toolchain-xenial-8
+    packages:
+    - clang-8
+    - valgrind
+matrix:
+  allow_failures:
+    - os: osx
+  include:
+    - name: Mac clang meson static release testing
+      os: osx
+      osx_image: xcode10.2
+      compiler: clang
+      env:
+         CXX="clang++"
+         CC="clang"
+         LIB_TYPE=static
+         BUILD_TYPE=release
+      script: ./.travis_scripts/meson_builder.sh
+    - name: Linux xenial clang meson static release testing
+      os: linux
+      dist: xenial
+      compiler: clang
+      env:
+         CXX="clang++"
+         CC="clang"
+         LIB_TYPE=static
+         BUILD_TYPE=release
+      # before_install and install steps only needed for linux meson builds
+      before_install:
+          - source ./.travis_scripts/travis.before_install.${TRAVIS_OS_NAME}.sh
+      install:
+           - source ./.travis_scripts/travis.install.${TRAVIS_OS_NAME}.sh
+      script: ./.travis_scripts/meson_builder.sh
+    - name: Linux xenial gcc cmake coverage
+      os: linux
+      dist: xenial
+      compiler: gcc
+      env:
+        CXX=g++
+        CC=gcc
+        DO_Coverage=ON
+        BUILD_TOOL="Unix Makefiles"
+        BUILD_TYPE=Debug
+        LIB_TYPE=shared
+        DESTDIR=/tmp/cmake_json_cpp
+      script: ./.travis_scripts/cmake_builder.sh
+notifications:
+  email: false
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/cmake_builder.sh b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/cmake_builder.sh
new file mode 100755
index 0000000..ccb3331
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/cmake_builder.sh
@@ -0,0 +1,130 @@
+#!/usr/bin/env sh
+# This script can be used on the command line directly to configure several
+# different build environments.
+# This is called by `.travis.yml` via Travis CI.
+# Travis supplies $TRAVIS_OS_NAME.
+#  http://docs.travis-ci.com/user/multi-os/
+# Our .travis.yml also defines:
+
+#   - BUILD_TYPE=Release/Debug
+#   - LIB_TYPE=static/shared
+#
+# Optional environmental variables
+#   - DESTDIR <- used for setting the install prefix
+#   - BUILD_TOOL=["Unix Makefile"|"Ninja"]
+#   - BUILDNAME <- how to identify this build on the dashboard
+#   - DO_MemCheck <- if set, try to use valgrind
+#   - DO_Coverage <- if set, try to do dashboard coverage testing
+#
+
+env_set=1
+if ${BUILD_TYPE+false}; then
+  echo "BUILD_TYPE not set in environment."
+  env_set=0
+fi
+if ${LIB_TYPE+false}; then
+  echo "LIB_TYPE not set in environment."
+  env_set=0
+fi
+if ${CXX+false}; then
+  echo "CXX not set in environment."
+  env_set=0
+fi
+
+
+if [ ${env_set} -eq 0 ]; then
+  echo "USAGE:  CXX=$(which clang++)  BUILD_TYPE=[Release|Debug] LIB_TYPE=[static|shared] $0"
+  echo ""
+  echo "Examples:"
+  echo "           CXX=$(which clang++) BUILD_TYPE=Release LIB_TYPE=shared DESTDIR=/tmp/cmake_json_cpp $0"
+  echo "           CXX=$(which clang++) BUILD_TYPE=Debug   LIB_TYPE=shared DESTDIR=/tmp/cmake_json_cpp $0"
+  echo "           CXX=$(which clang++) BUILD_TYPE=Release LIB_TYPE=static DESTDIR=/tmp/cmake_json_cpp $0"
+  echo "           CXX=$(which clang++) BUILD_TYPE=Debug   LIB_TYPE=static DESTDIR=/tmp/cmake_json_cpp $0"
+
+  echo "           CXX=$(which g++)     BUILD_TYPE=Release LIB_TYPE=shared DESTDIR=/tmp/cmake_json_cpp $0"
+  echo "           CXX=$(which g++)     BUILD_TYPE=Debug   LIB_TYPE=shared DESTDIR=/tmp/cmake_json_cpp $0"
+  echo "           CXX=$(which g++)     BUILD_TYPE=Release LIB_TYPE=static DESTDIR=/tmp/cmake_json_cpp $0"
+  echo "           CXX=$(which g++)     BUILD_TYPE=Debug   LIB_TYPE=static DESTDIR=/tmp/cmake_json_cpp $0"
+
+  exit -1
+fi
+
+if ${DESTDIR+false}; then
+  DESTDIR="/usr/local"
+fi
+
+# -e: fail on error
+# -v: show commands
+# -x: show expanded commands
+set -vex
+
+env | sort
+
+which cmake
+cmake --version
+
+echo ${CXX}
+${CXX} --version
+_COMPILER_NAME=`basename ${CXX}`
+if [ "${BUILD_TYPE}" == "shared" ]; then
+  _CMAKE_BUILD_SHARED_LIBS=ON
+else
+  _CMAKE_BUILD_SHARED_LIBS=OFF
+fi
+
+CTEST_TESTING_OPTION="-D ExperimentalTest"
+#   - DO_MemCheck <- if set, try to use valgrind
+if ! ${DO_MemCheck+false}; then
+   valgrind --version
+   CTEST_TESTING_OPTION="-D ExperimentalMemCheck"
+else
+#   - DO_Coverage <- if set, try to do dashboard coverage testing
+  if ! ${DO_Coverage+false}; then
+     export CXXFLAGS="-fprofile-arcs -ftest-coverage"
+     export LDFLAGS="-fprofile-arcs -ftest-coverage"
+     CTEST_TESTING_OPTION="-D ExperimentalTest -D ExperimentalCoverage"
+     #gcov --version
+  fi
+fi
+
+#  Ninja                        = Generates build.ninja files.
+if ${BUILD_TOOL+false}; then
+  BUILD_TOOL="Ninja"
+  export _BUILD_EXE=ninja
+  which ninja
+  ninja --version
+else
+#  Unix Makefiles               = Generates standard UNIX makefiles.
+  export _BUILD_EXE=make
+fi
+
+_BUILD_DIR_NAME="build-cmake_${BUILD_TYPE}_${LIB_TYPE}_${_COMPILER_NAME}_${_BUILD_EXE}"
+mkdir -p ${_BUILD_DIR_NAME}
+cd "${_BUILD_DIR_NAME}"
+  if ${BUILDNAME+false}; then
+     _HOSTNAME=`hostname -s`
+     BUILDNAME="${_HOSTNAME}_${BUILD_TYPE}_${LIB_TYPE}_${_COMPILER_NAME}_${_BUILD_EXE}"
+  fi
+  cmake \
+    -G "${BUILD_TOOL}" \
+    -DBUILDNAME:STRING="${BUILDNAME}" \
+    -DCMAKE_CXX_COMPILER:PATH=${CXX} \
+    -DCMAKE_BUILD_TYPE:STRING=${BUILD_TYPE} \
+    -DBUILD_SHARED_LIBS:BOOL=${_CMAKE_BUILD_SHARED_LIBS} \
+    -DCMAKE_INSTALL_PREFIX:PATH=${DESTDIR} \
+    ../
+
+  ctest -C ${BUILD_TYPE} -D ExperimentalStart -D ExperimentalConfigure -D ExperimentalBuild ${CTEST_TESTING_OPTION} -D ExperimentalSubmit
+  # Final step is to verify that installation succeeds
+  cmake --build . --config ${BUILD_TYPE} --target install
+
+  if [ "${DESTDIR}" != "/usr/local" ]; then
+     ${_BUILD_EXE} install
+  fi
+cd -
+
+if ${CLEANUP+false}; then
+  echo "Skipping cleanup: build directory will persist."
+else
+  rm -r "${_BUILD_DIR_NAME}"
+fi
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/meson_builder.sh b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/meson_builder.sh
new file mode 100755
index 0000000..dbf03cb
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/meson_builder.sh
@@ -0,0 +1,81 @@
+#!/usr/bin/env sh
+# This script can be used on the command line directly to configure several
+# different build environments.
+# This is called by `.travis.yml` via Travis CI.
+# Travis supplies $TRAVIS_OS_NAME.
+#  http://docs.travis-ci.com/user/multi-os/
+# Our .travis.yml also defines:
+
+#   - BUILD_TYPE=release/debug
+#   - LIB_TYPE=static/shared
+
+env_set=1
+if ${BUILD_TYPE+false}; then
+  echo "BUILD_TYPE not set in environment."
+  env_set=0
+fi
+if ${LIB_TYPE+false}; then
+  echo "LIB_TYPE not set in environment."
+  env_set=0
+fi
+if ${CXX+false}; then
+  echo "CXX not set in environment."
+  env_set=0
+fi
+
+
+if [ ${env_set} -eq 0 ]; then
+  echo "USAGE:  CXX=$(which clang++)  BUILD_TYPE=[release|debug] LIB_TYPE=[static|shared] $0"
+  echo ""
+  echo "Examples:"
+  echo "           CXX=$(which clang++) BUILD_TYPE=release LIB_TYPE=shared DESTDIR=/tmp/meson_json_cpp $0"
+  echo "           CXX=$(which clang++) BUILD_TYPE=debug   LIB_TYPE=shared DESTDIR=/tmp/meson_json_cpp $0"
+  echo "           CXX=$(which clang++) BUILD_TYPE=release LIB_TYPE=static DESTDIR=/tmp/meson_json_cpp $0"
+  echo "           CXX=$(which clang++) BUILD_TYPE=debug   LIB_TYPE=static DESTDIR=/tmp/meson_json_cpp $0"
+
+  echo "           CXX=$(which g++)     BUILD_TYPE=release LIB_TYPE=shared DESTDIR=/tmp/meson_json_cpp $0"
+  echo "           CXX=$(which g++)     BUILD_TYPE=debug   LIB_TYPE=shared DESTDIR=/tmp/meson_json_cpp $0"
+  echo "           CXX=$(which g++)     BUILD_TYPE=release LIB_TYPE=static DESTDIR=/tmp/meson_json_cpp $0"
+  echo "           CXX=$(which g++)     BUILD_TYPE=debug   LIB_TYPE=static DESTDIR=/tmp/meson_json_cpp $0"
+
+  exit -1
+fi
+
+if ${DESTDIR+false}; then
+  DESTDIR="/usr/local"
+fi
+
+# -e: fail on error
+# -v: show commands
+# -x: show expanded commands
+set -vex
+
+
+env | sort
+
+which python3
+which meson
+which ninja
+echo ${CXX}
+${CXX} --version
+python3 --version
+meson --version
+ninja --version
+_COMPILER_NAME=`basename ${CXX}`
+_BUILD_DIR_NAME="build-${BUILD_TYPE}_${LIB_TYPE}_${_COMPILER_NAME}"
+meson --buildtype ${BUILD_TYPE} --default-library ${LIB_TYPE} . "${_BUILD_DIR_NAME}"
+ninja -v -j 2 -C "${_BUILD_DIR_NAME}"
+#ninja -v -j 2 -C "${_BUILD_DIR_NAME}" test
+cd "${_BUILD_DIR_NAME}"
+  meson test --no-rebuild --print-errorlogs
+
+  if [ "${DESTDIR}" != "/usr/local" ]; then
+     ninja install
+  fi
+cd -
+
+if ${CLEANUP+false}; then
+  echo "Skipping cleanup:  build directory will persist."
+else
+  rm -r "${_BUILD_DIR_NAME}"
+fi
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.before_install.linux.sh b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.before_install.linux.sh
new file mode 100644
index 0000000..9b556de
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.before_install.linux.sh
@@ -0,0 +1,8 @@
+set -vex
+
+# Preinstalled versions of python are dependent on which Ubuntu distribution
+# you are running. The below version needs to be updated whenever we roll
+# the Ubuntu version used in Travis.
+# https://docs.travis-ci.com/user/languages/python/
+
+pyenv global 3.7.1
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.before_install.osx.sh b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.before_install.osx.sh
new file mode 100644
index 0000000..5d83c0c
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.before_install.osx.sh
@@ -0,0 +1 @@
+# NOTHING TO DO HERE
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.install.linux.sh b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.install.linux.sh
new file mode 100644
index 0000000..7c5846f
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.install.linux.sh
@@ -0,0 +1,10 @@
+set -vex
+
+wget https://github.com/ninja-build/ninja/releases/download/v1.9.0/ninja-linux.zip
+unzip -q ninja-linux.zip -d build
+
+pip3 install meson
+echo ${PATH}
+ls /usr/local
+ls /usr/local/bin
+export PATH="${PWD}"/build:/usr/local/bin:/usr/bin:${PATH}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.install.osx.sh b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.install.osx.sh
new file mode 100644
index 0000000..5d83c0c
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/.travis_scripts/travis.install.osx.sh
@@ -0,0 +1 @@
+# NOTHING TO DO HERE
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/AUTHORS b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/AUTHORS
index c0fbbee..3723d54 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/AUTHORS
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/AUTHORS
@@ -1 +1,113 @@
 Baptiste Lepilleur <blep@users.sourceforge.net>
+
+Aaron Jacobs <aaronjjacobs@gmail.com>
+Aaron Jacobs <jacobsa@google.com>
+Adam Boseley <ABoseley@agjunction.com>
+Adam Boseley <adam.boseley@gmail.com>
+Aleksandr Derbenev <13alexac@gmail.com>
+Alexander Gazarov <DrMetallius@users.noreply.github.com>
+Alexander V. Brezgin <abrezgin@appliedtech.ru>
+Alexandr Brezgin <albrezgin@mail.ru>
+Alexey Kruchinin <alexey@mopals.com>
+Anton Indrawan <anton.indrawan@gmail.com>
+Baptiste Jonglez <git@bitsofnetworks.org>
+Baptiste Lepilleur <baptiste.lepilleur@gmail.com>
+Baruch Siach <baruch@tkos.co.il>
+Ben Boeckel <mathstuf@gmail.com>
+Benjamin Knecht <bknecht@logitech.com>
+Bernd Kuhls <bernd.kuhls@t-online.de>
+Billy Donahue <billydonahue@google.com>
+Braden McDorman <bmcdorman@gmail.com>
+Brandon Myers <bmyers1788@gmail.com>
+Brendan Drew <brendan.drew@daqri.com>
+chason <cxchao802@gmail.com>
+Chris Gilling <cgilling@iparadigms.com>
+Christopher Dawes <christopher.dawes.1981@googlemail.com>
+Christopher Dunn <cdunn2001@gmail.com>
+Chuck Atkins <chuck.atkins@kitware.com>
+Cody P Schafer <dev@codyps.com>
+Connor Manning <connor@hobu.co>
+Cory Quammen <cory.quammen@kitware.com>
+Cristóvão B da Cruz e Silva <CrisXed@gmail.com>
+Daniel Krügler <daniel.kruegler@gmail.com>
+Dani-Hub <daniel.kruegler@googlemail.com>
+Dan Liu <gzliudan>
+datadiode <datadiode@users.noreply.github.com>
+datadiode <jochen.neubeck@vodafone.de>
+David Seifert <soap@gentoo.org>
+David West <david-west@idexx.com>
+dawesc <chris.dawes@eftlab.co.uk>
+Devin Jeanpierre <jeanpierreda@google.com>
+Dmitry Marakasov <amdmi3@amdmi3.ru>
+dominicpezzuto <dom@dompezzuto.com>
+Don Milham <dmilham@gmail.com>
+drgler <daniel.kruegler@gmail.com>
+ds283 <D.Seery@sussex.ac.uk>
+Egor Tensin <Egor.Tensin@gmail.com>
+eightnoteight <mr.eightnoteight@gmail.com>
+Evince <baneyue@gmail.com>
+filipjs <filipjs@users.noreply.github.com>
+findblar <ft@finbarr.ca>
+Florian Meier <florian.meier@koalo.de>
+Gaëtan Lehmann <gaetan.lehmann@gmail.com>
+Gaurav <g.gupta@samsung.com>
+Gergely Nagy <ngg@ngg.hu>
+Gida Pataki <gida.pataki@prezi.com>
+I3ck <buckmartin@buckmartin.de>
+Iñaki Baz Castillo <ibc@aliax.net>
+Jacco <jacco@geul.net>
+Jean-Christophe Fillion-Robin <jchris.fillionr@kitware.com>
+Jonas Platte <mail@jonasplatte.de>
+Jordan Bayles <bayles.jordan@gmail.com>
+Jörg Krause <joerg.krause@embedded.rocks>
+Keith Lea <keith@whamcitylights.com>
+Kevin Grant <kbradleygrant@gmail.com>
+Kirill V. Lyadvinsky <jia3ep@gmail.com>
+Kirill V. Lyadvinsky <mail@codeatcpp.com>
+Kobi Gurkan <kobigurk@gmail.com>
+Magnus Bjerke Vik <mbvett@gmail.com>
+Malay Shah <malays@users.sourceforge.net>
+Mara Kim <hacker.root@gmail.com>
+Marek Kotewicz <marek.kotewicz@gmail.com>
+Mark Lakata <mark@lakata.org>
+Mark Zeren <mzeren@vmware.com>
+Martin Buck <buckmartin@buckmartin.de>
+Martyn Gigg <martyn.gigg@gmail.com>
+Mattes D <github@xoft.cz>
+Matthias Loy <matthias.loy@hbm.com>
+Merlyn Morgan-Graham <kavika@gmail.com>
+Michael Shields <mshields@google.com>
+Michał Górny <mgorny@gentoo.org>
+Mike Naberezny <mike@naberezny.com>
+mloy <matthias.loy@googlemail.com>
+Motti <lanzkron@gmail.com>
+nnkur <nnkur@mail.ru>
+Omkar Wagh <owagh@owaghlinux.ny.tower-research.com>
+paulo <paulobrizolara@users.noreply.github.com>
+pavel.pimenov <pavel.pimenov@gmail.com>
+Paweł Bylica <chfast@gmail.com>
+Péricles Lopes Machado <pericles.raskolnikoff@gmail.com>
+Peter Spiess-Knafl <psk@autistici.org>
+pffang <pffang@vip.qq.com>
+Rémi Verschelde <remi@verschelde.fr>
+renu555 <renu.tyagi@samsung.com>
+Robert Dailey <rcdailey@gmail.com>
+Sam Clegg <sbc@chromium.org>
+selaselah <selah@outlook.com>
+Sergiy80 <sil2004@gmail.com>
+sergzub <sergzub@gmail.com>
+Stefan Schweter <stefan@schweter.it>
+Steffen Kieß <Steffen.Kiess@ipvs.uni-stuttgart.de>
+Steven Hahn <hahnse@ornl.gov>
+Stuart Eichert <stuart@fivemicro.com>
+SuperManitu <supermanitu@gmail.com>
+Techwolf <dring@g33kworld.net>
+Tengiz Sharafiev <btolfa+github@gmail.com>
+Tomasz Maciejewski <tmaciejewsk@gmail.com>
+Vicente Olivert Riera <Vincent.Riera@imgtec.com>
+xiaoyur347 <xiaoyur347@gmail.com>
+ycqiu <429148848@qq.com>
+yiqiju <fred_ju@selinc.com>
+Yu Xiaolei <dreifachstein@gmail.com>
+
+Google Inc.
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/CMakeLists.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/CMakeLists.txt
new file mode 100644
index 0000000..381d08b
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/CMakeLists.txt
@@ -0,0 +1,186 @@
+# vim: et ts=4 sts=4 sw=4 tw=0
+
+# ==== Define cmake build policies that affect compilation and linkage default behaviors
+#
+# Set the JSONCPP_NEWEST_VALIDATED_POLICIES_VERSION string to the newest cmake version
+# policies that provide successful builds. By setting JSONCPP_NEWEST_VALIDATED_POLICIES_VERSION
+# to a value greater than the oldest policies, all policies between
+# JSONCPP_OLDEST_VALIDATED_POLICIES_VERSION and CMAKE_VERSION (used for this build)
+# are set to their NEW behaivor, thereby suppressing policy warnings related to policies
+# between the JSONCPP_OLDEST_VALIDATED_POLICIES_VERSION and CMAKE_VERSION.
+#
+# CMake versions greater than the JSONCPP_NEWEST_VALIDATED_POLICIES_VERSION policies will
+# continue to generate policy warnings "CMake Warning (dev)...Policy CMP0XXX is not set:"
+#
+set(JSONCPP_OLDEST_VALIDATED_POLICIES_VERSION "3.8.0")
+set(JSONCPP_NEWEST_VALIDATED_POLICIES_VERSION "3.13.2")
+cmake_minimum_required(VERSION ${JSONCPP_OLDEST_VALIDATED_POLICIES_VERSION})
+if("${CMAKE_VERSION}" VERSION_LESS "${JSONCPP_NEWEST_VALIDATED_POLICIES_VERSION}")
+    #Set and use the newest available cmake policies that are validated to work
+    set(JSONCPP_CMAKE_POLICY_VERSION "${CMAKE_VERSION}")
+else()
+    set(JSONCPP_CMAKE_POLICY_VERSION "${JSONCPP_NEWEST_VALIDATED_POLICIES_VERSION}")
+endif()
+cmake_policy(VERSION ${JSONCPP_CMAKE_POLICY_VERSION})
+#
+# Now enumerate specific policies newer than JSONCPP_NEWEST_VALIDATED_POLICIES_VERSION
+# that may need to be individually set to NEW/OLD
+#
+foreach(pnew "") # Currently Empty
+    if(POLICY ${pnew})
+        cmake_policy(SET ${pnew} NEW)
+    endif()
+endforeach()
+foreach(pold "") # Currently Empty
+    if(POLICY ${pold})
+        cmake_policy(SET ${pold} OLD)
+    endif()
+endforeach()
+
+# ==== Define language standard configurations requiring at least c++11 standard
+if(CMAKE_CXX_STANDARD EQUAL "98" )
+    message(FATAL_ERROR "CMAKE_CXX_STANDARD:STRING=98 is not supported.")
+endif()
+
+#####
+##  Set the default target properties
+if(NOT CMAKE_CXX_STANDARD)
+    set(CMAKE_CXX_STANDARD 11) # Supported values are ``11``, ``14``, and ``17``.
+endif()
+if(NOT CMAKE_CXX_STANDARD_REQUIRED)
+    set(CMAKE_CXX_STANDARD_REQUIRED ON)
+endif()
+if(NOT CMAKE_CXX_EXTENSIONS)
+    set(CMAKE_CXX_EXTENSIONS OFF)
+endif()
+
+# ====
+
+# Ensures that CMAKE_BUILD_TYPE has a default value
+if(NOT DEFINED CMAKE_BUILD_TYPE)
+    set(CMAKE_BUILD_TYPE Release CACHE STRING
+        "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.")
+endif()
+
+project(JSONCPP
+        VERSION 1.9.0 # <major>[.<minor>[.<patch>[.<tweak>]]]
+        LANGUAGES CXX)
+
+message(STATUS "JsonCpp Version: ${JSONCPP_VERSION_MAJOR}.${JSONCPP_VERSION_MINOR}.${JSONCPP_VERSION_PATCH}")
+set( JSONCPP_SOVERSION 21 )
+
+option(JSONCPP_WITH_TESTS "Compile and (for jsoncpp_check) run JsonCpp test executables" ON)
+option(JSONCPP_WITH_POST_BUILD_UNITTEST "Automatically run unit-tests as a post build step" ON)
+option(JSONCPP_WITH_WARNING_AS_ERROR "Force compilation to fail if a warning occurs" OFF)
+option(JSONCPP_WITH_STRICT_ISO "Issue all the warnings demanded by strict ISO C and ISO C++" ON)
+option(JSONCPP_WITH_PKGCONFIG_SUPPORT "Generate and install .pc files" ON)
+option(JSONCPP_WITH_CMAKE_PACKAGE "Generate and install cmake package files" ON)
+option(BUILD_SHARED_LIBS "Build jsoncpp_lib as a shared library." OFF)
+
+# Enable runtime search path support for dynamic libraries on OSX
+if(APPLE)
+    set(CMAKE_MACOSX_RPATH 1)
+endif()
+
+# Adhere to GNU filesystem layout conventions
+include(GNUInstallDirs)
+
+set(DEBUG_LIBNAME_SUFFIX "" CACHE STRING "Optional suffix to append to the library name for a debug build")
+
+set(JSONCPP_USE_SECURE_MEMORY "0" CACHE STRING "-D...=1 to use memory-wiping allocator for STL" )
+
+# File version.h is only regenerated on CMake configure step
+configure_file( "${PROJECT_SOURCE_DIR}/src/lib_json/version.h.in"
+                "${PROJECT_BINARY_DIR}/include/json/version.h"
+                NEWLINE_STYLE UNIX )
+configure_file( "${PROJECT_SOURCE_DIR}/version.in"
+                "${PROJECT_BINARY_DIR}/version"
+                NEWLINE_STYLE UNIX )
+
+macro(UseCompilationWarningAsError)
+    if(MSVC)
+        # Only enabled in debug because some old versions of VS STL generate
+        # warnings when compiled in release configuration.
+        add_compile_options($<$<CONFIG:Debug>:/WX>)
+    elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+        add_compile_options(-Werror)
+        if(JSONCPP_WITH_STRICT_ISO)
+            add_compile_options(-pedantic-errors)
+        endif()
+    endif()
+endmacro()
+
+# Include our configuration header
+include_directories( ${jsoncpp_SOURCE_DIR}/include )
+
+if(MSVC)
+    # Only enabled in debug because some old versions of VS STL generate
+    # unreachable code warning when compiled in release configuration.
+    add_compile_options($<$<CONFIG:Debug>:/W4>)
+endif()
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+    # using regular Clang or AppleClang
+    add_compile_options(-Wall -Wconversion -Wshadow -Werror=conversion -Werror=sign-compare)
+elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+    # using GCC
+    add_compile_options(-Wall -Wconversion -Wshadow -Wextra)
+    # not yet ready for -Wsign-conversion
+
+    if(JSONCPP_WITH_STRICT_ISO)
+        add_compile_options(-pedantic)
+    endif()
+    if(JSONCPP_WITH_WARNING_AS_ERROR)
+        add_compile_options(-Werror=conversion)
+    endif()
+elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
+    # using Intel compiler
+    add_compile_options(-Wall -Wconversion -Wshadow -Wextra -Werror=conversion)
+
+    if(JSONCPP_WITH_STRICT_ISO AND NOT JSONCPP_WITH_WARNING_AS_ERROR)
+        add_compile_options(-pedantic)
+    endif()
+endif()
+
+find_program(CCACHE_FOUND ccache)
+if(CCACHE_FOUND)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
+endif(CCACHE_FOUND)
+
+if(JSONCPP_WITH_WARNING_AS_ERROR)
+    UseCompilationWarningAsError()
+endif()
+
+if(JSONCPP_WITH_PKGCONFIG_SUPPORT)
+    configure_file(
+        "pkg-config/jsoncpp.pc.in"
+        "pkg-config/jsoncpp.pc"
+        @ONLY)
+    install(FILES "${CMAKE_CURRENT_BINARY_DIR}/pkg-config/jsoncpp.pc"
+        DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
+endif()
+
+if(JSONCPP_WITH_CMAKE_PACKAGE)
+        include (CMakePackageConfigHelpers)
+        install(EXPORT jsoncpp
+                DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/jsoncpp
+                FILE        jsoncppConfig.cmake)
+        write_basic_package_version_file ("${CMAKE_CURRENT_BINARY_DIR}/jsoncppConfigVersion.cmake"
+                VERSION ${PROJECT_VERSION}
+                COMPATIBILITY SameMajorVersion)
+        install(FILES ${CMAKE_CURRENT_BINARY_DIR}/jsoncppConfigVersion.cmake
+                DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/jsoncpp)
+endif()
+
+if(JSONCPP_WITH_TESTS)
+  enable_testing()
+  include(CTest)
+endif()
+
+# Build the different applications
+add_subdirectory( src )
+
+#install the includes
+add_subdirectory( include )
+
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/CONTRIBUTING.md b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/CONTRIBUTING.md
new file mode 100644
index 0000000..b7e4ab7
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/CONTRIBUTING.md
@@ -0,0 +1,145 @@
+# Contributing to JsonCpp
+
+## Building
+
+Both CMake and Meson tools are capable of generating a variety of build environments for you preferred development environment.
+Using cmake or meson you can generate an XCode, Visual Studio, Unix Makefile, Ninja, or other environment that fits your needs.
+
+An example of a common Meson/Ninja environment is described next.
+
+## Building and testing with Meson/Ninja
+Thanks to David Seifert (@SoapGentoo), we (the maintainers) now use
+[meson](http://mesonbuild.com/) and [ninja](https://ninja-build.org/) to build
+for debugging, as well as for continuous integration (see
+[`./travis_scripts/meson_builder.sh`](./travis_scripts/meson_builder.sh) ). Other systems may work, but minor
+things like version strings might break.
+
+First, install both meson (which requires Python3) and ninja.
+If you wish to install to a directory other than /usr/local, set an environment variable called DESTDIR with the desired path:
+    DESTDIR=/path/to/install/dir
+
+Then,
+
+    cd jsoncpp/
+    BUILD_TYPE=debug
+    #BUILD_TYPE=release
+    LIB_TYPE=shared
+    #LIB_TYPE=static
+    meson --buildtype ${BUILD_TYPE} --default-library ${LIB_TYPE} . build-${LIB_TYPE}
+    #ninja -v -C build-${LIB_TYPE} test # This stopped working on my Mac.
+    ninja -v -C build-${LIB_TYPE}
+    cd build-${LIB_TYPE}
+    meson test --no-rebuild --print-errorlogs
+    sudo ninja install
+
+## Building and testing with other build systems
+See https://github.com/open-source-parsers/jsoncpp/wiki/Building
+
+## Running the tests manually
+
+You need to run tests manually only if you are troubleshooting an issue.
+
+In the instructions below, replace `path/to/jsontest` with the path of the
+`jsontest` executable that was compiled on your platform.
+
+    cd test
+    # This will run the Reader/Writer tests
+    python runjsontests.py path/to/jsontest
+
+    # This will run the Reader/Writer tests, using JSONChecker test suite
+    # (http://www.json.org/JSON_checker/).
+    # Notes: not all tests pass: JsonCpp is too lenient (for example,
+    # it allows an integer to start with '0'). The goal is to improve
+    # strict mode parsing to get all tests to pass.
+    python runjsontests.py --with-json-checker path/to/jsontest
+
+    # This will run the unit tests (mostly Value)
+    python rununittests.py path/to/test_lib_json
+
+    # You can run the tests using valgrind:
+    python rununittests.py --valgrind path/to/test_lib_json
+
+## Building the documentation
+
+Run the Python script `doxybuild.py` from the top directory:
+
+    python doxybuild.py --doxygen=$(which doxygen) --open --with-dot
+
+See `doxybuild.py --help` for options.
+
+## Adding a reader/writer test
+
+To add a test, you need to create two files in test/data:
+
+* a `TESTNAME.json` file, that contains the input document in JSON format.
+* a `TESTNAME.expected` file, that contains a flatened representation of the
+  input document.
+
+The `TESTNAME.expected` file format is as follows:
+
+* Each line represents a JSON element of the element tree represented by the
+  input document.
+* Each line has two parts: the path to access the element separated from the
+  element value by `=`. Array and object values are always empty (i.e.
+  represented by either `[]` or `{}`).
+* Element path `.` represents the root element, and is used to separate object
+  members. `[N]` is used to specify the value of an array element at index `N`.
+
+See the examples `test_complex_01.json` and `test_complex_01.expected` to better understand element paths.
+
+## Understanding reader/writer test output
+
+When a test is run, output files are generated beside the input test files. Below is a short description of the content of each file:
+
+* `test_complex_01.json`: input JSON document.
+* `test_complex_01.expected`: flattened JSON element tree used to check if
+  parsing was corrected.
+* `test_complex_01.actual`: flattened JSON element tree produced by `jsontest`
+  from reading `test_complex_01.json`.
+* `test_complex_01.rewrite`: JSON document written by `jsontest` using the
+  `Json::Value` parsed from `test_complex_01.json` and serialized using
+  `Json::StyledWritter`.
+* `test_complex_01.actual-rewrite`: flattened JSON element tree produced by
+  `jsontest` from reading `test_complex_01.rewrite`.
+* `test_complex_01.process-output`: `jsontest` output, typically useful for
+  understanding parsing errors.
+
+## Versioning rules
+
+Consumers of this library require a strict approach to incrementing versioning of the JsonCpp library. Currently, we follow the below set of rules:
+
+* Any new public symbols require a minor version bump.
+* Any alteration or removal of public symbols requires a major version bump, including changing the size of a class. This is necessary for
+consumers to do dependency injection properly.
+
+## Preparing code for submission
+
+Generally, JsonCpp's style guide has been pretty relaxed, with the following common themes:
+
+* Variables and function names use lower camel case (E.g. parseValue or collectComments).
+* Class use camel case (e.g. OurReader)
+* Member variables have a trailing underscore
+* Prefer `nullptr` over `NULL`.
+* Passing by non-const reference is allowed.
+* Single statement if blocks may omit brackets.
+* Generally prefer less space over more space.
+
+For an example:
+
+```c++
+bool Reader::decodeNumber(Token& token) {
+  Value decoded;
+  if (!decodeNumber(token, decoded))
+    return false;
+  currentValue().swapPayload(decoded);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  currentValue().setOffsetLimit(token.end_ - begin_);
+  return true;
+}
+```
+
+Before submitting your code, ensure that you meet the versioning requirements above, follow the style guide of the file you are modifying (or the above rules for new files), and run clang format. Meson exposes clang format with the following command:
+
+```
+ninja -v -C build-${LIB_TYPE}/ clang-format
+```
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/LICENSE b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/LICENSE
index ca2bfe1..89280a6 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/LICENSE
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/LICENSE
@@ -2,13 +2,13 @@
 tests and demonstration applications, are licensed under the following
 conditions...
 
-The author (Baptiste Lepilleur) explicitly disclaims copyright in all 
+Baptiste Lepilleur and The JsonCpp Authors explicitly disclaim copyright in all 
 jurisdictions which recognize such a disclaimer. In such jurisdictions, 
 this software is released into the Public Domain.
 
 In jurisdictions which do not recognize Public Domain property (e.g. Germany as of
-2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is
-released under the terms of the MIT License (see below).
+2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur and
+The JsonCpp Authors, and is released under the terms of the MIT License (see below).
 
 In jurisdictions which recognize Public Domain property, the user of this 
 software may choose to accept it either as 1) Public Domain, 2) under the 
@@ -23,7 +23,7 @@
 The full text of the MIT License follows:
 
 ========================================================================
-Copyright (c) 2007-2010 Baptiste Lepilleur
+Copyright (c) 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 
 Permission is hereby granted, free of charge, to any person
 obtaining a copy of this software and associated documentation
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/NEWS.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/NEWS.txt
deleted file mode 100644
index 8316ff6..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/NEWS.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-New in SVN
-----------
-
- *  Updated the type system's behavior, in order to better support backwards
-    compatibility with code that was written before 64-bit integer support was
-    introduced. Here's how it works now:
-
-     *  isInt, isInt64, isUInt, and isUInt64 return true if and only if the
-        value can be exactly represented as that type. In particular, a value
-        constructed with a double like 17.0 will now return true for all of
-        these methods.
-
-     *  isDouble and isFloat now return true for all numeric values, since all
-        numeric values can be converted to a double or float without
-        truncation. Note however that the conversion may not be exact -- for
-        example, doubles cannot exactly represent all integers above 2^53 + 1.
-
-     *  isBool, isNull, isString, isArray, and isObject now return true if and
-        only if the value is of that type.
-
-     *  isConvertibleTo(fooValue) indicates that it is safe to call asFoo.
-        (For each type foo, isFoo always implies isConvertibleTo(fooValue).)
-        asFoo returns an approximate or exact representation as appropriate.
-        For example, a double value may be truncated when asInt is called.
-
-     *  For backwards compatibility with old code, isConvertibleTo(intValue)
-        may return false even if type() == intValue. This is because the value
-        may have been constructed with a 64-bit integer larger than maxInt,
-        and calling asInt() would cause an exception. If you're writing new
-        code, use isInt64 to find out whether the value is exactly
-        representable using an Int64, or asDouble() combined with minInt64 and
-        maxInt64 to figure out whether it is approximately representable.
-
-
-  New in JsonCpp 0.6.0:
-  ---------------------
-
-* Compilation
-
-  - LD_LIBRARY_PATH and LIBRARY_PATH environment variables are now 
-    propagated to the build environment as this is required for some 
-    compiler installation.
-
-  - Added support for Microsoft Visual Studio 2008 (bug #2930462): 
-    The platform "msvc90" has been added.
-
-    Notes: you need to setup the environment by running vcvars32.bat 
-    (e.g. MSVC 2008 command prompt in start menu) before running scons.
-    
-  - Added support for amalgamated source and header generation (a la sqlite).
-    Refer to README.txt section "Generating amalgamated source and header"
-    for detail.
-    
-* Value
-
-  - Removed experimental ValueAllocator, it caused static 
-    initialization/destruction order issues (bug #2934500). 
-    The DefaultValueAllocator has been inlined in code.
-    
-  - Added support for 64 bits integer:
-  
-    Types Json::Int64 and Json::UInt64 have been added. They are aliased
-	to 64 bits integers on system that support them (based on __int64 on 
-	Microsoft Visual Studio platform, and long long on other platforms).
-	
-	Types Json::LargestInt and Json::LargestUInt have been added. They are
-	aliased to the largest integer type supported: 
-	either Json::Int/Json::UInt or Json::Int64/Json::UInt64 respectively.
-	
-	Json::Value::asInt() and Json::Value::asUInt() still returns plain
-	"int" based types, but asserts if an attempt is made to retrieve
-	a 64 bits value that can not represented as the return type.
-	
-	Json::Value::asInt64() and Json::Value::asUInt64() have been added
-	to obtain the 64 bits integer value.
-	
-	Json::Value::asLargestInt() and Json::Value::asLargestUInt() returns
-	the integer as a LargestInt/LargestUInt respectively. Those functions
-	functions are typically used when implementing writer.
-	
-	The reader attempts to read number as 64 bits integer, and fall back
-	to reading a double if the number is not in the range of 64 bits 
-	integer.
-    
-    Warning: Json::Value::asInt() and Json::Value::asUInt() now returns
-    long long. This changes break code that was passing the return value
-    to *printf() function.
-  
-    Support for 64 bits integer can be disabled by defining the macro 
-	JSON_NO_INT64 (uncomment it in json/config.h for example), though
-	it should have no impact on existing usage.
-    
-  - The type Json::ArrayIndex is used for indexes of a JSON value array. It
-    is an unsigned int (typically 32 bits).
-	
-  - Array index can be passed as int to operator[], allowing use of literal:
-    Json::Value array;
-	array.append( 1234 );
-	int value = array[0].asInt();  // did not compile previously
-
-  - Added float Json::Value::asFloat() to obtain a floating point value as a
-    float (avoid lost of precision warning caused by used of asDouble() 
-    to initialize a float).
-
-* Reader
-
-  - Renamed Reader::getFormatedErrorMessages() to getFormattedErrorMessages.
-    Bug #3023708 (Formatted has 2 't'). The old member function is deprecated
-    but still present for backward compatibility.
-    
-* Tests
-
-  - Added test to ensure that the escape sequence "\/" is corrected handled 
-    by the parser.
-
-* Bug fixes
-
-  - Bug #3139677: JSON [1 2 3] was incorrectly parsed as [1, 3]. Error is now 
-    correctly detected.
-    
-  - Bug #3139678: stack buffer overflow when parsing a double with a
-    length of 32 characters.
-	
-  - Fixed Value::operator <= implementation (had the semantic of operator >=).
-    Found when adding unit tests for comparison operators.
-    
-  - Value::compare() is now const and has an actual implementation with
-    unit tests.
-
-  - Bug #2407932: strpbrk() can fail for NULL pointer.
-
-  - Bug #3306345: Fixed minor typo in Path::resolve().
-
-  - Bug #3314841/#3306896: errors in amalgamate.py
-
-  - Fixed some Coverity warnings and line-endings.
-    
-* License
-  
-  - See file LICENSE for details. Basically JsonCpp is now licensed under 
-    MIT license, or public domain if desired and recognized in your jurisdiction.
-    Thanks to Stephan G. Beal [http://wanderinghorse.net/home/stephan/]) who
-	helped figuring out the solution to the public domain issue.
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/README.md b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/README.md
new file mode 100644
index 0000000..8c85870
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/README.md
@@ -0,0 +1,46 @@
+# JsonCpp
+
+[![badge](https://img.shields.io/badge/conan.io-jsoncpp%2F1.8.0-green.svg?logo=data:image/png;base64%2CiVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAMAAAAolt3jAAAA1VBMVEUAAABhlctjlstkl8tlmMtlmMxlmcxmmcxnmsxpnMxpnM1qnc1sn85voM91oM11oc1xotB2oc56pNF6pNJ2ptJ8ptJ8ptN9ptN8p9N5qNJ9p9N9p9R8qtOBqdSAqtOAqtR%2BrNSCrNJ/rdWDrNWCsNWCsNaJs9eLs9iRvNuVvdyVv9yXwd2Zwt6axN6dxt%2Bfx%2BChyeGiyuGjyuCjyuGly%2BGlzOKmzOGozuKoz%2BKqz%2BOq0OOv1OWw1OWw1eWx1eWy1uay1%2Baz1%2Baz1%2Bez2Oe02Oe12ee22ujUGwH3AAAAAXRSTlMAQObYZgAAAAFiS0dEAIgFHUgAAAAJcEhZcwAACxMAAAsTAQCanBgAAAAHdElNRQfgBQkREyOxFIh/AAAAiklEQVQI12NgAAMbOwY4sLZ2NtQ1coVKWNvoc/Eq8XDr2wB5Ig62ekza9vaOqpK2TpoMzOxaFtwqZua2Bm4makIM7OzMAjoaCqYuxooSUqJALjs7o4yVpbowvzSUy87KqSwmxQfnsrPISyFzWeWAXCkpMaBVIC4bmCsOdgiUKwh3JojLgAQ4ZCE0AMm2D29tZwe6AAAAAElFTkSuQmCC)](https://bintray.com/theirix/conan-repo/jsoncpp%3Atheirix)
+
+[JSON][json-org] is a lightweight data-interchange format. It can represent
+numbers, strings, ordered sequences of values, and collections of name/value
+pairs.
+
+[json-org]: http://json.org/
+
+JsonCpp is a C++ library that allows manipulating JSON values, including
+serialization and deserialization to and from strings. It can also preserve
+existing comment in unserialization/serialization steps, making it a convenient
+format to store user input files.
+
+
+## Documentation
+
+[JsonCpp documentation][JsonCpp-documentation] is generated using [Doxygen][].
+
+[JsonCpp-documentation]: http://open-source-parsers.github.io/jsoncpp-docs/doxygen/index.html
+[Doxygen]: http://www.doxygen.org
+
+
+## A note on backward-compatibility
+
+* `1.y.z` is built with C++11.
+* `0.y.z` can be used with older compilers.
+* Major versions maintain binary-compatibility.
+
+
+## Using JsonCpp in your project
+
+### Amalgamated source
+https://github.com/open-source-parsers/jsoncpp/wiki/Amalgamated
+
+### The Meson Build System
+If you are using the [Meson Build System](http://mesonbuild.com), then you can get a wrap file by downloading it from [Meson WrapDB](https://wrapdb.mesonbuild.com/jsoncpp), or simply use `meson wrap install jsoncpp`.
+
+### Other ways
+If you have trouble, see the Wiki, or post a question as an Issue.
+
+## License
+
+See the `LICENSE` file for details. In summary, JsonCpp is licensed under the
+MIT license, or public domain if desired and recognized in your jurisdiction.
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/README.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/README.txt
deleted file mode 100644
index 88c1178..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/README.txt
+++ /dev/null
@@ -1,173 +0,0 @@
-* Introduction:
-  =============
-
-JSON (JavaScript Object Notation) is a lightweight data-interchange format. 
-It can represent integer, real number, string, an ordered sequence of 
-value, and a collection of name/value pairs.
-
-JsonCpp (http://jsoncpp.sourceforge.net/) is a simple API to manipulate 
-JSON value, handle serialization and unserialization to string.
-
-It can also preserve existing comment in unserialization/serialization steps,
-making it a convenient format to store user input files.
-
-Unserialization parsing is user friendly and provides precise error reports.
-
-
-* Building/Testing:
-  =================
-
-JsonCpp uses Scons (http://www.scons.org) as a build system. Scons requires
-python to be installed (http://www.python.org).
-
-You download scons-local distribution from the following url:
-http://sourceforge.net/projects/scons/files/scons-local/1.2.0/
-
-Unzip it in the directory where you found this README file. scons.py Should be 
-at the same level as README.
-
-python scons.py platform=PLTFRM [TARGET]
-where PLTFRM may be one of:
-	suncc Sun C++ (Solaris)
-	vacpp Visual Age C++ (AIX)
-	mingw 
-	msvc6 Microsoft Visual Studio 6 service pack 5-6
-	msvc70 Microsoft Visual Studio 2002
-	msvc71 Microsoft Visual Studio 2003
-	msvc80 Microsoft Visual Studio 2005
-	msvc90 Microsoft Visual Studio 2008
-	linux-gcc Gnu C++ (linux, also reported to work for Mac OS X)
-
-Notes: if you are building with Microsoft Visual Studio 2008, you need to 
-setup the environment by running vcvars32.bat (e.g. MSVC 2008 command prompt)
-before running scons.
-	
-Adding platform is fairly simple. You need to change the Sconstruct file 
-to do so.
-	
-and TARGET may be:
-	check: build library and run unit tests.
-
-    
-* Running the test manually:
-  ==========================
-
-Notes that test can be run by scons using the 'check' target (see above).
-
-You need to run test manually only if you are troubleshooting an issue.
-
-In the instruction below, replace "path to jsontest.exe" with the path
-of the 'jsontest' executable that was compiled on your platform.
-  
-cd test
-# This will run the Reader/Writer tests
-python runjsontests.py "path to jsontest.exe"
-
-# This will run the Reader/Writer tests, using JSONChecker test suite
-# (http://www.json.org/JSON_checker/).
-# Notes: not all tests pass: JsonCpp is too lenient (for example,
-# it allows an integer to start with '0'). The goal is to improve
-# strict mode parsing to get all tests to pass.
-python runjsontests.py --with-json-checker "path to jsontest.exe"
-
-# This will run the unit tests (mostly Value)
-python rununittests.py "path to test_lib_json.exe"
-
-You can run the tests using valgrind:
-python rununittests.py --valgrind "path to test_lib_json.exe"
-
-
-* Building the documentation:
-  ===========================
-
-Run the python script doxybuild.py from the top directory:
-
-python doxybuild.py --open --with-dot
-
-See doxybuild.py --help for options. 
-
-Notes that the documentation is also available for download as a tarball. 
-The documentation of the latest release is available online at:
-http://jsoncpp.sourceforge.net/
-
-* Generating amalgamated source and header
-  ========================================
-
-JsonCpp is provided with a script to generate a single header and a single
-source file to ease inclusion in an existing project.
-
-The amalgamated source can be generated at any time by running the following
-command from the top-directory (requires python 2.6):
-
-python amalgamate.py
-
-It is possible to specify header name. See -h options for detail. By default,
-the following files are generated:
-- dist/jsoncpp.cpp: source file that need to be added to your project
-- dist/json/json.h: header file corresponding to use in your project. It is
-equivalent to including json/json.h in non-amalgamated source. This header
-only depends on standard headers. 
-- dist/json/json-forwards.h: header the provides forward declaration
-of all JsonCpp types. This typically what should be included in headers to
-speed-up compilation.
-
-The amalgamated sources are generated by concatenating JsonCpp source in the
-correct order and defining macro JSON_IS_AMALGAMATION to prevent inclusion
-of other headers.
-
-* Using json-cpp in your project:
-  ===============================
-
-include/ should be added to your compiler include path. jsoncpp headers 
-should be included as follow:
-
-#include <json/json.h>
-  
-
-* Adding a reader/writer test:
-  ============================
-
-To add a test, you need to create two files in test/data:
-- a TESTNAME.json file, that contains the input document in JSON format.
-- a TESTNAME.expected file, that contains a flatened representation of 
-  the input document.
-  
-TESTNAME.expected file format:
-- each line represents a JSON element of the element tree represented 
-  by the input document.
-- each line has two parts: the path to access the element separated from
-  the element value by '='. Array and object values are always empty 
-  (e.g. represented by either [] or {}).
-- element path: '.' represented the root element, and is used to separate 
-  object members. [N] is used to specify the value of an array element
-  at index N.
-See test_complex_01.json and test_complex_01.expected to better understand
-element path.
-
-
-* Understanding reader/writer test output:
-  ========================================
-
-When a test is run, output files are generated aside the input test files. 
-Below is a short description of the content of each file:
-
-- test_complex_01.json: input JSON document
-- test_complex_01.expected: flattened JSON element tree used to check if 
-    parsing was corrected.
-
-- test_complex_01.actual: flattened JSON element tree produced by 
-    jsontest.exe from reading test_complex_01.json
-- test_complex_01.rewrite: JSON document written by jsontest.exe using the
-    Json::Value parsed from test_complex_01.json and serialized using
-    Json::StyledWritter.
-- test_complex_01.actual-rewrite: flattened JSON element tree produced by 
-    jsontest.exe from reading test_complex_01.rewrite.
-test_complex_01.process-output: jsontest.exe output, typically useful to
-    understand parsing error.
-
-* License
-  =======
-  
-See file LICENSE for details. Basically JsonCpp is licensed under 
-MIT license, or public domain if desired and recognized in your jurisdiction.
-
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/SConstruct b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/SConstruct
deleted file mode 100644
index 23225cb..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/SConstruct
+++ /dev/null
@@ -1,248 +0,0 @@
-"""
-Notes: 
-- shared library support is buggy: it assumes that a static and dynamic library can be build from the same object files. This is not true on many platforms. For this reason it is only enabled on linux-gcc at the current time.
-
-To add a platform:
-- add its name in options allowed_values below
-- add tool initialization for this platform. Search for "if platform == 'suncc'" as an example.
-"""
-
-import os
-import os.path
-import sys
-
-JSONCPP_VERSION = open(File('#version').abspath,'rt').read().strip()
-DIST_DIR = '#dist'
-
-options = Variables()
-options.Add( EnumVariable('platform',
-                        'Platform (compiler/stl) used to build the project',
-                        'msvc71',
-                        allowed_values='suncc vacpp mingw msvc6 msvc7 msvc71 msvc80 msvc90 linux-gcc'.split(),
-                        ignorecase=2) )
-
-try:
-    platform = ARGUMENTS['platform']
-    if platform == 'linux-gcc':
-        CXX = 'g++' # not quite right, but env is not yet available.
-        import commands
-        version = commands.getoutput('%s -dumpversion' %CXX)
-        platform = 'linux-gcc-%s' %version
-        print "Using platform '%s'" %platform
-        LD_LIBRARY_PATH = os.environ.get('LD_LIBRARY_PATH', '')
-        LD_LIBRARY_PATH = "%s:libs/%s" %(LD_LIBRARY_PATH, platform)
-        os.environ['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH
-        print "LD_LIBRARY_PATH =", LD_LIBRARY_PATH
-except KeyError:
-    print 'You must specify a "platform"'
-    sys.exit(2)
-
-print "Building using PLATFORM =", platform
-
-rootbuild_dir = Dir('#buildscons')
-build_dir = os.path.join( '#buildscons', platform )
-bin_dir = os.path.join( '#bin', platform )
-lib_dir = os.path.join( '#libs', platform )
-sconsign_dir_path = Dir(build_dir).abspath
-sconsign_path = os.path.join( sconsign_dir_path, '.sconsign.dbm' )
-
-# Ensure build directory exist (SConsignFile fail otherwise!)
-if not os.path.exists( sconsign_dir_path ):
-    os.makedirs( sconsign_dir_path )
-
-# Store all dependencies signature in a database
-SConsignFile( sconsign_path )
-
-def make_environ_vars():
-	"""Returns a dictionnary with environment variable to use when compiling."""
-	# PATH is required to find the compiler
-	# TEMP is required for at least mingw
-    # LD_LIBRARY_PATH & co is required on some system for the compiler
-	vars = {}
-	for name in ('PATH', 'TEMP', 'TMP', 'LD_LIBRARY_PATH', 'LIBRARY_PATH'):
-		if name in os.environ:
-			vars[name] = os.environ[name]
-	return vars
-	
-
-env = Environment( ENV = make_environ_vars(),
-                   toolpath = ['scons-tools'],
-                   tools=[] ) #, tools=['default'] )
-
-if platform == 'suncc':
-    env.Tool( 'sunc++' )
-    env.Tool( 'sunlink' )
-    env.Tool( 'sunar' )
-    env.Append( CCFLAGS = ['-mt'] )
-elif platform == 'vacpp':
-    env.Tool( 'default' )
-    env.Tool( 'aixcc' )
-    env['CXX'] = 'xlC_r'   #scons does not pick-up the correct one !
-    # using xlC_r ensure multi-threading is enabled:
-    # http://publib.boulder.ibm.com/infocenter/pseries/index.jsp?topic=/com.ibm.vacpp7a.doc/compiler/ref/cuselect.htm
-    env.Append( CCFLAGS = '-qrtti=all',
-                LINKFLAGS='-bh:5' )  # -bh:5 remove duplicate symbol warning
-elif platform == 'msvc6':
-    env['MSVS_VERSION']='6.0'
-    for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
-        env.Tool( tool )
-    env['CXXFLAGS']='-GR -GX /nologo /MT'
-elif platform == 'msvc70':
-    env['MSVS_VERSION']='7.0'
-    for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
-        env.Tool( tool )
-    env['CXXFLAGS']='-GR -GX /nologo /MT'
-elif platform == 'msvc71':
-    env['MSVS_VERSION']='7.1'
-    for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
-        env.Tool( tool )
-    env['CXXFLAGS']='-GR -GX /nologo /MT'
-elif platform == 'msvc80':
-    env['MSVS_VERSION']='8.0'
-    for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
-        env.Tool( tool )
-    env['CXXFLAGS']='-GR -EHsc /nologo /MT'
-elif platform == 'msvc90':
-    env['MSVS_VERSION']='9.0'
-    # Scons 1.2 fails to detect the correct location of the platform SDK.
-    # So we propagate those from the environment. This requires that the
-    # user run vcvars32.bat before compiling.
-    if 'INCLUDE' in os.environ:
-        env['ENV']['INCLUDE'] = os.environ['INCLUDE']
-    if 'LIB' in os.environ:
-        env['ENV']['LIB'] = os.environ['LIB']
-    for tool in ['msvc', 'msvs', 'mslink', 'masm', 'mslib']:
-        env.Tool( tool )
-    env['CXXFLAGS']='-GR -EHsc /nologo /MT'
-elif platform == 'mingw':
-    env.Tool( 'mingw' )
-    env.Append( CPPDEFINES=[ "WIN32", "NDEBUG", "_MT" ] )
-elif platform.startswith('linux-gcc'):
-    env.Tool( 'default' )
-    env.Append( LIBS = ['pthread'], CCFLAGS = "-Wall" )
-    env['SHARED_LIB_ENABLED'] = True
-else:
-    print "UNSUPPORTED PLATFORM."
-    env.Exit(1)
-
-env.Tool('targz')
-env.Tool('srcdist')
-env.Tool('globtool')
-
-env.Append( CPPPATH = ['#include'],
-            LIBPATH = lib_dir )
-short_platform = platform
-if short_platform.startswith('msvc'):
-    short_platform = short_platform[2:]
-# Notes: on Windows you need to rebuild the source for each variant
-# Build script does not support that yet so we only build static libraries.
-# This also fails on AIX because both dynamic and static library ends with
-# extension .a.
-env['SHARED_LIB_ENABLED'] = env.get('SHARED_LIB_ENABLED', False)
-env['LIB_PLATFORM'] = short_platform
-env['LIB_LINK_TYPE'] = 'lib'    # static
-env['LIB_CRUNTIME'] = 'mt'
-env['LIB_NAME_SUFFIX'] = '${LIB_PLATFORM}_${LIB_LINK_TYPE}${LIB_CRUNTIME}'  # must match autolink naming convention
-env['JSONCPP_VERSION'] = JSONCPP_VERSION
-env['BUILD_DIR'] = env.Dir(build_dir)
-env['ROOTBUILD_DIR'] = env.Dir(rootbuild_dir)
-env['DIST_DIR'] = DIST_DIR
-if 'TarGz' in env['BUILDERS']:
-	class SrcDistAdder:
-		def __init__( self, env ):
-			self.env = env
-		def __call__( self, *args, **kw ):
-			apply( self.env.SrcDist, (self.env['SRCDIST_TARGET'],) + args, kw )
-	env['SRCDIST_BUILDER'] = env.TarGz
-else: # If tarfile module is missing
-	class SrcDistAdder:
-		def __init__( self, env ):
-			pass
-		def __call__( self, *args, **kw ):
-			pass
-env['SRCDIST_ADD'] = SrcDistAdder( env )
-env['SRCDIST_TARGET'] = os.path.join( DIST_DIR, 'jsoncpp-src-%s.tar.gz' % env['JSONCPP_VERSION'] )
-                      
-env_testing = env.Clone( )
-env_testing.Append( LIBS = ['json_${LIB_NAME_SUFFIX}'] )
-
-def buildJSONExample( env, target_sources, target_name ):
-    env = env.Clone()
-    env.Append( CPPPATH = ['#'] )
-    exe = env.Program( target=target_name,
-                       source=target_sources )
-    env['SRCDIST_ADD']( source=[target_sources] )
-    global bin_dir
-    return env.Install( bin_dir, exe )
-
-def buildJSONTests( env, target_sources, target_name ):
-    jsontests_node = buildJSONExample( env, target_sources, target_name )
-    check_alias_target = env.Alias( 'check', jsontests_node, RunJSONTests( jsontests_node, jsontests_node ) )
-    env.AlwaysBuild( check_alias_target )
-
-def buildUnitTests( env, target_sources, target_name ):
-    jsontests_node = buildJSONExample( env, target_sources, target_name )
-    check_alias_target = env.Alias( 'check', jsontests_node, 
-                                    RunUnitTests( jsontests_node, jsontests_node ) )
-    env.AlwaysBuild( check_alias_target )
-
-def buildLibrary( env, target_sources, target_name ):
-    static_lib = env.StaticLibrary( target=target_name + '_${LIB_NAME_SUFFIX}',
-                                    source=target_sources )
-    global lib_dir
-    env.Install( lib_dir, static_lib )
-    if env['SHARED_LIB_ENABLED']:
-        shared_lib = env.SharedLibrary( target=target_name + '_${LIB_NAME_SUFFIX}',
-                                        source=target_sources )
-        env.Install( lib_dir, shared_lib )
-    env['SRCDIST_ADD']( source=[target_sources] )
-
-Export( 'env env_testing buildJSONExample buildLibrary buildJSONTests buildUnitTests' )
-
-def buildProjectInDirectory( target_directory ):
-    global build_dir
-    target_build_dir = os.path.join( build_dir, target_directory )
-    target = os.path.join( target_directory, 'sconscript' )
-    SConscript( target, build_dir=target_build_dir, duplicate=0 )
-    env['SRCDIST_ADD']( source=[target] )
-
-
-def runJSONTests_action( target, source = None, env = None ):
-    # Add test scripts to python path
-    jsontest_path = Dir( '#test' ).abspath
-    sys.path.insert( 0, jsontest_path )
-    data_path = os.path.join( jsontest_path, 'data' )
-    import runjsontests
-    return runjsontests.runAllTests( os.path.abspath(source[0].path), data_path )
-
-def runJSONTests_string( target, source = None, env = None ):
-    return 'RunJSONTests("%s")' % source[0]
-
-import SCons.Action
-ActionFactory = SCons.Action.ActionFactory
-RunJSONTests = ActionFactory(runJSONTests_action, runJSONTests_string )
-
-def runUnitTests_action( target, source = None, env = None ):
-    # Add test scripts to python path
-    jsontest_path = Dir( '#test' ).abspath
-    sys.path.insert( 0, jsontest_path )
-    import rununittests
-    return rununittests.runAllTests( os.path.abspath(source[0].path) )
-
-def runUnitTests_string( target, source = None, env = None ):
-    return 'RunUnitTests("%s")' % source[0]
-
-RunUnitTests = ActionFactory(runUnitTests_action, runUnitTests_string )
-
-env.Alias( 'check' )
-
-srcdist_cmd = env['SRCDIST_ADD']( source = """
-    AUTHORS README.txt SConstruct
-    """.split() )
-env.Alias( 'src-dist', srcdist_cmd )
-
-buildProjectInDirectory( 'src/jsontestrunner' )
-buildProjectInDirectory( 'src/lib_json' )
-buildProjectInDirectory( 'src/test_lib_json' )
-#print env.Dump()
-
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/amalgamate.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/amalgamate.py
index eab724f..c12215a 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/amalgamate.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/amalgamate.py
@@ -1,148 +1,155 @@
-"""Amalgate json-cpp library sources into a single source and header file.
+"""Amalgamate json-cpp library sources into a single source and header file.
 
-Requires Python 2.6
+Works with python2.6+ and python3.4+.
 
 Example of invocation (must be invoked from json-cpp top directory):
-python amalgate.py
+python amalgamate.py
 """
 import os
 import os.path
 import sys
 
 class AmalgamationFile:
-    def __init__( self, top_dir ):
+    def __init__(self, top_dir):
         self.top_dir = top_dir
         self.blocks = []
 
-    def add_text( self, text ):
-        if not text.endswith( '\n' ):
-            text += '\n'
-        self.blocks.append( text )
+    def add_text(self, text):
+        if not text.endswith("\n"):
+            text += "\n"
+        self.blocks.append(text)
 
-    def add_file( self, relative_input_path, wrap_in_comment=False ):
-        def add_marker( prefix ):
-            self.add_text( '' )
-            self.add_text( '// ' + '/'*70 )
-            self.add_text( '// %s of content of file: %s' % (prefix, relative_input_path.replace('\\','/')) )
-            self.add_text( '// ' + '/'*70 )
-            self.add_text( '' )
-        add_marker( 'Beginning' )
-        f = open( os.path.join( self.top_dir, relative_input_path ), 'rt' )
+    def add_file(self, relative_input_path, wrap_in_comment=False):
+        def add_marker(prefix):
+            self.add_text("")
+            self.add_text("// " + "/"*70)
+            self.add_text("// %s of content of file: %s" % (prefix, relative_input_path.replace("\\","/")))
+            self.add_text("// " + "/"*70)
+            self.add_text("")
+        add_marker("Beginning")
+        f = open(os.path.join(self.top_dir, relative_input_path), "rt")
         content = f.read()
         if wrap_in_comment:
-            content = '/*\n' + content + '\n*/'
-        self.add_text( content )
+            content = "/*\n" + content + "\n*/"
+        self.add_text(content)
         f.close()
-        add_marker( 'End' )
-        self.add_text( '\n\n\n\n' )
+        add_marker("End")
+        self.add_text("\n\n\n\n")
 
-    def get_value( self ):
-        return ''.join( self.blocks ).replace('\r\n','\n')
+    def get_value(self):
+        return "".join(self.blocks).replace("\r\n","\n")
 
-    def write_to( self, output_path ):
-        output_dir = os.path.dirname( output_path )
-        if output_dir and not os.path.isdir( output_dir ):
-            os.makedirs( output_dir )
-        f = open( output_path, 'wb' )
-        f.write( self.get_value() )
+    def write_to(self, output_path):
+        output_dir = os.path.dirname(output_path)
+        if output_dir and not os.path.isdir(output_dir):
+            os.makedirs(output_dir)
+        f = open(output_path, "wb")
+        f.write(str.encode(self.get_value(), 'UTF-8'))
         f.close()
 
-def amalgamate_source( source_top_dir=None,
+def amalgamate_source(source_top_dir=None,
                        target_source_path=None,
-                       header_include_path=None ):
-    """Produces amalgated source.
+                       header_include_path=None):
+    """Produces amalgamated source.
        Parameters:
            source_top_dir: top-directory
            target_source_path: output .cpp path
            header_include_path: generated header path relative to target_source_path.
     """
-    print 'Amalgating header...'
-    header = AmalgamationFile( source_top_dir )
-    header.add_text( '/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/).' )
-    header.add_text( '/// It is intented to be used with #include <%s>' % header_include_path )
-    header.add_file( 'LICENSE', wrap_in_comment=True )
-    header.add_text( '#ifndef JSON_AMALGATED_H_INCLUDED' )
-    header.add_text( '# define JSON_AMALGATED_H_INCLUDED' )
-    header.add_text( '/// If defined, indicates that the source file is amalgated' )
-    header.add_text( '/// to prevent private header inclusion.' )
-    header.add_text( '#define JSON_IS_AMALGAMATION' )
-    header.add_file( 'include/json/config.h' )
-    header.add_file( 'include/json/forwards.h' )
-    header.add_file( 'include/json/features.h' )
-    header.add_file( 'include/json/value.h' )
-    header.add_file( 'include/json/reader.h' )
-    header.add_file( 'include/json/writer.h' )
-    header.add_text( '#endif //ifndef JSON_AMALGATED_H_INCLUDED' )
+    print("Amalgamating header...")
+    header = AmalgamationFile(source_top_dir)
+    header.add_text("/// Json-cpp amalgamated header (http://jsoncpp.sourceforge.net/).")
+    header.add_text('/// It is intended to be used with #include "%s"' % header_include_path)
+    header.add_file("LICENSE", wrap_in_comment=True)
+    header.add_text("#ifndef JSON_AMALGAMATED_H_INCLUDED")
+    header.add_text("# define JSON_AMALGAMATED_H_INCLUDED")
+    header.add_text("/// If defined, indicates that the source file is amalgamated")
+    header.add_text("/// to prevent private header inclusion.")
+    header.add_text("#define JSON_IS_AMALGAMATION")
+    header.add_file("include/json/version.h")
+    header.add_file("include/json/allocator.h")
+    header.add_file("include/json/config.h")
+    header.add_file("include/json/forwards.h")
+    header.add_file("include/json/features.h")
+    header.add_file("include/json/value.h")
+    header.add_file("include/json/reader.h")
+    header.add_file("include/json/writer.h")
+    header.add_file("include/json/assertions.h")
+    header.add_text("#endif //ifndef JSON_AMALGAMATED_H_INCLUDED")
 
-    target_header_path = os.path.join( os.path.dirname(target_source_path), header_include_path )
-    print 'Writing amalgated header to %r' % target_header_path
-    header.write_to( target_header_path )
+    target_header_path = os.path.join(os.path.dirname(target_source_path), header_include_path)
+    print("Writing amalgamated header to %r" % target_header_path)
+    header.write_to(target_header_path)
 
-    base, ext = os.path.splitext( header_include_path )
-    forward_header_include_path = base + '-forwards' + ext
-    print 'Amalgating forward header...'
-    header = AmalgamationFile( source_top_dir )
-    header.add_text( '/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/).' )
-    header.add_text( '/// It is intented to be used with #include <%s>' % forward_header_include_path )
-    header.add_text( '/// This header provides forward declaration for all JsonCpp types.' )
-    header.add_file( 'LICENSE', wrap_in_comment=True )
-    header.add_text( '#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED' )
-    header.add_text( '# define JSON_FORWARD_AMALGATED_H_INCLUDED' )
-    header.add_text( '/// If defined, indicates that the source file is amalgated' )
-    header.add_text( '/// to prevent private header inclusion.' )
-    header.add_text( '#define JSON_IS_AMALGAMATION' )
-    header.add_file( 'include/json/config.h' )
-    header.add_file( 'include/json/forwards.h' )
-    header.add_text( '#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED' )
+    base, ext = os.path.splitext(header_include_path)
+    forward_header_include_path = base + "-forwards" + ext
+    print("Amalgamating forward header...")
+    header = AmalgamationFile(source_top_dir)
+    header.add_text("/// Json-cpp amalgamated forward header (http://jsoncpp.sourceforge.net/).")
+    header.add_text('/// It is intended to be used with #include "%s"' % forward_header_include_path)
+    header.add_text("/// This header provides forward declaration for all JsonCpp types.")
+    header.add_file("LICENSE", wrap_in_comment=True)
+    header.add_text("#ifndef JSON_FORWARD_AMALGAMATED_H_INCLUDED")
+    header.add_text("# define JSON_FORWARD_AMALGAMATED_H_INCLUDED")
+    header.add_text("/// If defined, indicates that the source file is amalgamated")
+    header.add_text("/// to prevent private header inclusion.")
+    header.add_text("#define JSON_IS_AMALGAMATION")
+    header.add_file("include/json/config.h")
+    header.add_file("include/json/forwards.h")
+    header.add_text("#endif //ifndef JSON_FORWARD_AMALGAMATED_H_INCLUDED")
 
-    target_forward_header_path = os.path.join( os.path.dirname(target_source_path),
-                                               forward_header_include_path )
-    print 'Writing amalgated forward header to %r' % target_forward_header_path
-    header.write_to( target_forward_header_path )
+    target_forward_header_path = os.path.join(os.path.dirname(target_source_path),
+                                               forward_header_include_path)
+    print("Writing amalgamated forward header to %r" % target_forward_header_path)
+    header.write_to(target_forward_header_path)
 
-    print 'Amalgating source...'
-    source = AmalgamationFile( source_top_dir )
-    source.add_text( '/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/).' )
-    source.add_text( '/// It is intented to be used with #include <%s>' % header_include_path )
-    source.add_file( 'LICENSE', wrap_in_comment=True )
-    source.add_text( '' )
-    source.add_text( '#include <%s>' % header_include_path )
-    source.add_text( '' )
-    lib_json = 'src/lib_json'
-    source.add_file( os.path.join(lib_json, 'json_tool.h') )
-    source.add_file( os.path.join(lib_json, 'json_reader.cpp') )
-    source.add_file( os.path.join(lib_json, 'json_batchallocator.h') )
-    source.add_file( os.path.join(lib_json, 'json_valueiterator.inl') )
-    source.add_file( os.path.join(lib_json, 'json_value.cpp') )
-    source.add_file( os.path.join(lib_json, 'json_writer.cpp') )
+    print("Amalgamating source...")
+    source = AmalgamationFile(source_top_dir)
+    source.add_text("/// Json-cpp amalgamated source (http://jsoncpp.sourceforge.net/).")
+    source.add_text('/// It is intended to be used with #include "%s"' % header_include_path)
+    source.add_file("LICENSE", wrap_in_comment=True)
+    source.add_text("")
+    source.add_text('#include "%s"' % header_include_path)
+    source.add_text("""
+#ifndef JSON_IS_AMALGAMATION
+#error "Compile with -I PATH_TO_JSON_DIRECTORY"
+#endif
+""")
+    source.add_text("")
+    lib_json = "src/lib_json"
+    source.add_file(os.path.join(lib_json, "json_tool.h"))
+    source.add_file(os.path.join(lib_json, "json_reader.cpp"))
+    source.add_file(os.path.join(lib_json, "json_valueiterator.inl"))
+    source.add_file(os.path.join(lib_json, "json_value.cpp"))
+    source.add_file(os.path.join(lib_json, "json_writer.cpp"))
 
-    print 'Writing amalgated source to %r' % target_source_path
-    source.write_to( target_source_path )
+    print("Writing amalgamated source to %r" % target_source_path)
+    source.write_to(target_source_path)
 
 def main():
     usage = """%prog [options]
-Generate a single amalgated source and header file from the sources.
+Generate a single amalgamated source and header file from the sources.
 """
     from optparse import OptionParser
     parser = OptionParser(usage=usage)
     parser.allow_interspersed_args = False
-    parser.add_option('-s', '--source', dest="target_source_path", action='store', default='dist/jsoncpp.cpp',
+    parser.add_option("-s", "--source", dest="target_source_path", action="store", default="dist/jsoncpp.cpp",
         help="""Output .cpp source path. [Default: %default]""")
-    parser.add_option('-i', '--include', dest="header_include_path", action='store', default='json/json.h',
-        help="""Header include path. Used to include the header from the amalgated source file. [Default: %default]""")
-    parser.add_option('-t', '--top-dir', dest="top_dir", action='store', default=os.getcwd(),
+    parser.add_option("-i", "--include", dest="header_include_path", action="store", default="json/json.h",
+        help="""Header include path. Used to include the header from the amalgamated source file. [Default: %default]""")
+    parser.add_option("-t", "--top-dir", dest="top_dir", action="store", default=os.getcwd(),
         help="""Source top-directory. [Default: %default]""")
     parser.enable_interspersed_args()
     options, args = parser.parse_args()
 
-    msg = amalgamate_source( source_top_dir=options.top_dir,
+    msg = amalgamate_source(source_top_dir=options.top_dir,
                              target_source_path=options.target_source_path,
-                             header_include_path=options.header_include_path )
+                             header_include_path=options.header_include_path)
     if msg:
-        sys.stderr.write( msg + '\n' )
-        sys.exit( 1 )
+        sys.stderr.write(msg + "\n")
+        sys.exit(1)
     else:
-        print 'Source succesfully amalagated'
- 
-if __name__ == '__main__':
+        print("Source successfully amalgamated")
+
+if __name__ == "__main__":
     main()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/appveyor.yml b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/appveyor.yml
new file mode 100644
index 0000000..0b9c8fe
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/appveyor.yml
@@ -0,0 +1,32 @@
+clone_folder: c:\projects\jsoncpp
+
+environment:
+  matrix:
+    - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+      CMAKE_GENERATOR: Visual Studio 14 2015
+    - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
+      CMAKE_GENERATOR: Visual Studio 14 2015 Win64
+    - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+      CMAKE_GENERATOR: Visual Studio 15 2017
+    - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+      CMAKE_GENERATOR: Visual Studio 15 2017 Win64
+
+build_script:
+  - cmake --version
+  - cd c:\projects\jsoncpp
+  - cmake -G "%CMAKE_GENERATOR%" -DCMAKE_INSTALL_PREFIX:PATH=%CD:\=/%/install -DBUILD_SHARED_LIBS:BOOL=ON .
+  # Use ctest to make a dashboard build:
+  # - ctest -D Experimental(Start|Update|Configure|Build|Test|Coverage|MemCheck|Submit)
+  # NOTE: Testing on window is not yet finished:
+  # - ctest -C Release -D ExperimentalStart -D ExperimentalConfigure -D ExperimentalBuild -D ExperimentalTest -D ExperimentalSubmit
+  - ctest -C Release -D ExperimentalStart -D ExperimentalConfigure -D ExperimentalBuild -D ExperimentalSubmit
+  # Final step is to verify that installation succeeds
+  - cmake --build . --config Release --target install
+
+deploy:
+    provider: GitHub
+    auth_token:
+        secure: K2Tp1q8pIZ7rs0Ot24ZMWuwr12Ev6Tc6QkhMjGQxoQG3ng1pXtgPasiJ45IDXGdg
+    on:
+        branch: master
+        appveyor_repo_tag: true
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/dev.makefile b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/dev.makefile
new file mode 100644
index 0000000..1a4be6a
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/dev.makefile
@@ -0,0 +1,35 @@
+# This is only for jsoncpp developers/contributors.
+# We use this to sign releases, generate documentation, etc.
+VER?=$(shell cat version.txt)
+
+default:
+	@echo "VER=${VER}"
+sign: jsoncpp-${VER}.tar.gz
+	gpg --armor --detach-sign $<
+	gpg --verify $<.asc
+	# Then upload .asc to the release.
+jsoncpp-%.tar.gz:
+	curl https://github.com/open-source-parsers/jsoncpp/archive/$*.tar.gz -o $@
+dox:
+	python doxybuild.py --doxygen=$$(which doxygen) --in doc/web_doxyfile.in
+	rsync -va -c --delete dist/doxygen/jsoncpp-api-html-${VER}/ ../jsoncpp-docs/doxygen/
+	# Then 'git add -A' and 'git push' in jsoncpp-docs.
+build:
+	mkdir -p build/debug
+	cd build/debug; cmake -DCMAKE_BUILD_TYPE=debug -DBUILD_SHARED_LIBS=ON -G "Unix Makefiles" ../..
+	make -C build/debug
+
+# Currently, this depends on include/json/version.h generated
+# by cmake.
+test-amalgamate:
+	python2.7 amalgamate.py
+	python3.4 amalgamate.py
+	cd dist; gcc -I. -c jsoncpp.cpp
+
+valgrind:
+	valgrind --error-exitcode=42 --leak-check=full ./build/debug/src/test_lib_json/jsoncpp_test
+
+clean:
+	\rm -rf *.gz *.asc dist/
+
+.PHONY: build
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/__init__.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/__init__.py
index c944e7c..4a51e65 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/__init__.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/__init__.py
@@ -1 +1,6 @@
-# module
\ No newline at end of file
+# Copyright 2010 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+# module
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/agent_vmw7.json b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/agent_vmw7.json
new file mode 100644
index 0000000..cd7b777
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/agent_vmw7.json
@@ -0,0 +1,33 @@
+{
+    "cmake_variants" : [
+        {"name": "generator",
+         "generators": [
+            {"generator": [
+                "Visual Studio 7 .NET 2003",
+                "Visual Studio 9 2008",
+                "Visual Studio 9 2008 Win64",
+                "Visual Studio 10",
+                "Visual Studio 10 Win64",
+                "Visual Studio 11",
+                "Visual Studio 11 Win64"
+                ]
+            },
+            {"generator": ["MinGW Makefiles"],
+             "env_prepend": [{"path": "c:/wut/prg/MinGW/bin"}]
+            }
+         ]
+        },
+        {"name": "shared_dll",
+         "variables": [
+            ["BUILD_SHARED_LIBS=true"],
+            ["BUILD_SHARED_LIBS=false"]
+          ]
+        },
+        {"name": "build_type",
+         "build_types": [
+            "debug",
+            "release"
+            ]
+        }
+    ]
+}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/agent_vmxp.json b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/agent_vmxp.json
new file mode 100644
index 0000000..f82a077
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/agent_vmxp.json
@@ -0,0 +1,26 @@
+{
+    "cmake_variants" : [
+        {"name": "generator",
+         "generators": [
+            {"generator": [
+                "Visual Studio 6",
+                "Visual Studio 7",
+                "Visual Studio 8 2005"
+                ]
+            }
+         ]
+        },
+        {"name": "shared_dll",
+         "variables": [
+            ["BUILD_SHARED_LIBS=true"],
+            ["BUILD_SHARED_LIBS=false"]
+          ]
+        },
+        {"name": "build_type",
+         "build_types": [
+            "debug",
+            "release"
+            ]
+        }
+    ]
+}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/antglob.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/antglob.py
index 30837b5..9843765 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/antglob.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/antglob.py
@@ -1,7 +1,11 @@
 #!/usr/bin/env python
 # encoding: utf-8
-# Baptiste Lepilleur, 2009
+# Copyright 2009 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
+from __future__ import print_function
 from dircache import listdir
 import re
 import fnmatch
@@ -53,9 +57,9 @@
 ALL_NO_LINK = DIR | FILE
 ALL = DIR | FILE | LINKS
 
-_ANT_RE = re.compile( r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)' )
+_ANT_RE = re.compile(r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)')
 
-def ant_pattern_to_re( ant_pattern ):
+def ant_pattern_to_re(ant_pattern):
     """Generates a regular expression from the ant pattern.
     Matching convention:
     **/a: match 'a', 'dir/a', 'dir1/dir2/a'
@@ -64,30 +68,30 @@
     """
     rex = ['^']
     next_pos = 0
-    sep_rex = r'(?:/|%s)' % re.escape( os.path.sep )
+    sep_rex = r'(?:/|%s)' % re.escape(os.path.sep)
 ##    print 'Converting', ant_pattern
-    for match in _ANT_RE.finditer( ant_pattern ):
+    for match in _ANT_RE.finditer(ant_pattern):
 ##        print 'Matched', match.group()
 ##        print match.start(0), next_pos
         if match.start(0) != next_pos:
-            raise ValueError( "Invalid ant pattern" )
+            raise ValueError("Invalid ant pattern")
         if match.group(1): # /**/
-            rex.append( sep_rex + '(?:.*%s)?' % sep_rex )
+            rex.append(sep_rex + '(?:.*%s)?' % sep_rex)
         elif match.group(2): # **/
-            rex.append( '(?:.*%s)?' % sep_rex )
+            rex.append('(?:.*%s)?' % sep_rex)
         elif match.group(3): # /**
-            rex.append( sep_rex + '.*' )
+            rex.append(sep_rex + '.*')
         elif match.group(4): # *
-            rex.append( '[^/%s]*' % re.escape(os.path.sep) )
+            rex.append('[^/%s]*' % re.escape(os.path.sep))
         elif match.group(5): # /
-            rex.append( sep_rex )
+            rex.append(sep_rex)
         else: # somepath
-            rex.append( re.escape(match.group(6)) )
+            rex.append(re.escape(match.group(6)))
         next_pos = match.end()
     rex.append('$')
-    return re.compile( ''.join( rex ) )
+    return re.compile(''.join(rex))
 
-def _as_list( l ):
+def _as_list(l):
     if isinstance(l, basestring):
         return l.split()
     return l
@@ -104,37 +108,37 @@
     dir_path = dir_path.replace('/',os.path.sep)
     entry_type_filter = entry_type
 
-    def is_pruned_dir( dir_name ):
+    def is_pruned_dir(dir_name):
         for pattern in prune_dirs:
-            if fnmatch.fnmatch( dir_name, pattern ):
+            if fnmatch.fnmatch(dir_name, pattern):
                 return True
         return False
 
-    def apply_filter( full_path, filter_rexs ):
+    def apply_filter(full_path, filter_rexs):
         """Return True if at least one of the filter regular expression match full_path."""
         for rex in filter_rexs:
-            if rex.match( full_path ):
+            if rex.match(full_path):
                 return True
         return False
 
-    def glob_impl( root_dir_path ):
+    def glob_impl(root_dir_path):
         child_dirs = [root_dir_path]
         while child_dirs:
             dir_path = child_dirs.pop()
-            for entry in listdir( dir_path ):
-                full_path = os.path.join( dir_path, entry )
+            for entry in listdir(dir_path):
+                full_path = os.path.join(dir_path, entry)
 ##                print 'Testing:', full_path,
-                is_dir = os.path.isdir( full_path )
-                if is_dir and not is_pruned_dir( entry ): # explore child directory ?
+                is_dir = os.path.isdir(full_path)
+                if is_dir and not is_pruned_dir(entry): # explore child directory ?
 ##                    print '===> marked for recursion',
-                    child_dirs.append( full_path )
-                included = apply_filter( full_path, include_filter )
-                rejected = apply_filter( full_path, exclude_filter )
+                    child_dirs.append(full_path)
+                included = apply_filter(full_path, include_filter)
+                rejected = apply_filter(full_path, exclude_filter)
                 if not included or rejected: # do not include entry ?
 ##                    print '=> not included or rejected'
                     continue
-                link = os.path.islink( full_path )
-                is_file = os.path.isfile( full_path )
+                link = os.path.islink(full_path)
+                is_file = os.path.isfile(full_path)
                 if not is_file and not is_dir:
 ##                    print '=> unknown entry type'
                     continue
@@ -145,57 +149,57 @@
 ##                print '=> type: %d' % entry_type, 
                 if (entry_type & entry_type_filter) != 0:
 ##                    print ' => KEEP'
-                    yield os.path.join( dir_path, entry )
+                    yield os.path.join(dir_path, entry)
 ##                else:
 ##                    print ' => TYPE REJECTED'
-    return list( glob_impl( dir_path ) )
+    return list(glob_impl(dir_path))
 
 
 if __name__ == "__main__":
     import unittest
 
     class AntPatternToRETest(unittest.TestCase):
-##        def test_conversion( self ):
-##            self.assertEqual( '^somepath$', ant_pattern_to_re( 'somepath' ).pattern )
+##        def test_conversion(self):
+##            self.assertEqual('^somepath$', ant_pattern_to_re('somepath').pattern)
 
-        def test_matching( self ):
-            test_cases = [ ( 'path',
+        def test_matching(self):
+            test_cases = [ ('path',
                              ['path'],
-                             ['somepath', 'pathsuffix', '/path', '/path'] ),
-                           ( '*.py',
+                             ['somepath', 'pathsuffix', '/path', '/path']),
+                           ('*.py',
                              ['source.py', 'source.ext.py', '.py'],
-                             ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c'] ),
-                           ( '**/path',
+                             ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c']),
+                           ('**/path',
                              ['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'],
-                             ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath'] ),
-                           ( 'path/**',
+                             ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath']),
+                           ('path/**',
                              ['path/a', 'path/path/a', 'path//'],
-                             ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a'] ),
-                           ( '/**/path',
+                             ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a']),
+                           ('/**/path',
                              ['/path', '/a/path', '/a/b/path/path', '/path/path'],
-                             ['path', 'path/', 'a/path', '/pathsuffix', '/somepath'] ),
-                           ( 'a/b',
+                             ['path', 'path/', 'a/path', '/pathsuffix', '/somepath']),
+                           ('a/b',
                              ['a/b'],
-                             ['somea/b', 'a/bsuffix', 'a/b/c'] ),
-                           ( '**/*.py',
+                             ['somea/b', 'a/bsuffix', 'a/b/c']),
+                           ('**/*.py',
                              ['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'],
-                             ['script.pyc', 'script.pyo', 'a.py/b'] ),
-                           ( 'src/**/*.py',
+                             ['script.pyc', 'script.pyo', 'a.py/b']),
+                           ('src/**/*.py',
                              ['src/a.py', 'src/dir/a.py'],
-                             ['a/src/a.py', '/src/a.py'] ),
+                             ['a/src/a.py', '/src/a.py']),
                            ]
             for ant_pattern, accepted_matches, rejected_matches in list(test_cases):
-                def local_path( paths ):
+                def local_path(paths):
                     return [ p.replace('/',os.path.sep) for p in paths ]
-                test_cases.append( (ant_pattern, local_path(accepted_matches), local_path( rejected_matches )) )
+                test_cases.append((ant_pattern, local_path(accepted_matches), local_path(rejected_matches)))
             for ant_pattern, accepted_matches, rejected_matches in test_cases:
-                rex = ant_pattern_to_re( ant_pattern )
-                print 'ant_pattern:', ant_pattern, ' => ', rex.pattern
+                rex = ant_pattern_to_re(ant_pattern)
+                print('ant_pattern:', ant_pattern, ' => ', rex.pattern)
                 for accepted_match in accepted_matches:
-                    print 'Accepted?:', accepted_match
-                    self.assert_( rex.match( accepted_match ) is not None )
+                    print('Accepted?:', accepted_match)
+                    self.assertTrue(rex.match(accepted_match) is not None)
                 for rejected_match in rejected_matches:
-                    print 'Rejected?:', rejected_match
-                    self.assert_( rex.match( rejected_match ) is None )
+                    print('Rejected?:', rejected_match)
+                    self.assertTrue(rex.match(rejected_match) is None)
 
     unittest.main()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/batchbuild.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/batchbuild.py
new file mode 100644
index 0000000..0eb0690
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/batchbuild.py
@@ -0,0 +1,278 @@
+from __future__ import print_function
+import collections
+import itertools
+import json
+import os
+import os.path
+import re
+import shutil
+import string
+import subprocess
+import sys
+import cgi
+
+class BuildDesc:
+    def __init__(self, prepend_envs=None, variables=None, build_type=None, generator=None):
+        self.prepend_envs = prepend_envs or [] # [ { "var": "value" } ]
+        self.variables = variables or []
+        self.build_type = build_type
+        self.generator = generator
+
+    def merged_with(self, build_desc):
+        """Returns a new BuildDesc by merging field content.
+           Prefer build_desc fields to self fields for single valued field.
+        """
+        return BuildDesc(self.prepend_envs + build_desc.prepend_envs,
+                          self.variables + build_desc.variables,
+                          build_desc.build_type or self.build_type,
+                          build_desc.generator or self.generator)
+
+    def env(self):
+        environ = os.environ.copy()
+        for values_by_name in self.prepend_envs:
+            for var, value in list(values_by_name.items()):
+                var = var.upper()
+                if type(value) is unicode:
+                    value = value.encode(sys.getdefaultencoding())
+                if var in environ:
+                    environ[var] = value + os.pathsep + environ[var]
+                else:
+                    environ[var] = value
+        return environ
+
+    def cmake_args(self):
+        args = ["-D%s" % var for var in self.variables]
+        # skip build type for Visual Studio solution as it cause warning
+        if self.build_type and 'Visual' not in self.generator:
+            args.append("-DCMAKE_BUILD_TYPE=%s" % self.build_type)
+        if self.generator:
+            args.extend(['-G', self.generator])
+        return args
+
+    def __repr__(self):
+        return "BuildDesc(%s, build_type=%s)" %  (" ".join(self.cmake_args()), self.build_type)
+
+class BuildData:
+    def __init__(self, desc, work_dir, source_dir):
+        self.desc = desc
+        self.work_dir = work_dir
+        self.source_dir = source_dir
+        self.cmake_log_path = os.path.join(work_dir, 'batchbuild_cmake.log')
+        self.build_log_path = os.path.join(work_dir, 'batchbuild_build.log')
+        self.cmake_succeeded = False
+        self.build_succeeded = False
+
+    def execute_build(self):
+        print('Build %s' % self.desc)
+        self._make_new_work_dir()
+        self.cmake_succeeded = self._generate_makefiles()
+        if self.cmake_succeeded:
+            self.build_succeeded = self._build_using_makefiles()
+        return self.build_succeeded
+
+    def _generate_makefiles(self):
+        print('  Generating makefiles: ', end=' ')
+        cmd = ['cmake'] + self.desc.cmake_args() + [os.path.abspath(self.source_dir)]
+        succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.cmake_log_path)
+        print('done' if succeeded else 'FAILED')
+        return succeeded
+
+    def _build_using_makefiles(self):
+        print('  Building:', end=' ')
+        cmd = ['cmake', '--build', self.work_dir]
+        if self.desc.build_type:
+            cmd += ['--config', self.desc.build_type]
+        succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.build_log_path)
+        print('done' if succeeded else 'FAILED')
+        return succeeded
+
+    def _execute_build_subprocess(self, cmd, env, log_path):
+        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir,
+                                    env=env)
+        stdout, _ = process.communicate()
+        succeeded = (process.returncode == 0)
+        with open(log_path, 'wb') as flog:
+            log = ' '.join(cmd) + '\n' + stdout + '\nExit code: %r\n' % process.returncode
+            flog.write(fix_eol(log))
+        return succeeded
+
+    def _make_new_work_dir(self):
+        if os.path.isdir(self.work_dir):
+            print('  Removing work directory', self.work_dir)
+            shutil.rmtree(self.work_dir, ignore_errors=True)
+        if not os.path.isdir(self.work_dir):
+            os.makedirs(self.work_dir)
+
+def fix_eol(stdout):
+    """Fixes wrong EOL produced by cmake --build on Windows (\r\r\n instead of \r\n).
+    """
+    return re.sub('\r*\n', os.linesep, stdout)
+
+def load_build_variants_from_config(config_path):
+    with open(config_path, 'rb') as fconfig:
+        data = json.load(fconfig)
+    variants = data[ 'cmake_variants' ]
+    build_descs_by_axis = collections.defaultdict(list)
+    for axis in variants:
+        axis_name = axis["name"]
+        build_descs = []
+        if "generators" in axis:
+            for generator_data in axis["generators"]:
+                for generator in generator_data["generator"]:
+                    build_desc = BuildDesc(generator=generator,
+                                            prepend_envs=generator_data.get("env_prepend"))
+                    build_descs.append(build_desc)
+        elif "variables" in axis:
+            for variables in axis["variables"]:
+                build_desc = BuildDesc(variables=variables)
+                build_descs.append(build_desc)
+        elif "build_types" in axis:
+            for build_type in axis["build_types"]:
+                build_desc = BuildDesc(build_type=build_type)
+                build_descs.append(build_desc)
+        build_descs_by_axis[axis_name].extend(build_descs)
+    return build_descs_by_axis
+
+def generate_build_variants(build_descs_by_axis):
+    """Returns a list of BuildDesc generated for the partial BuildDesc for each axis."""
+    axis_names = list(build_descs_by_axis.keys())
+    build_descs = []
+    for axis_name, axis_build_descs in list(build_descs_by_axis.items()):
+        if len(build_descs):
+            # for each existing build_desc and each axis build desc, create a new build_desc
+            new_build_descs = []
+            for prototype_build_desc, axis_build_desc in itertools.product(build_descs, axis_build_descs):
+                new_build_descs.append(prototype_build_desc.merged_with(axis_build_desc))
+            build_descs = new_build_descs
+        else:
+            build_descs = axis_build_descs
+    return build_descs
+
+HTML_TEMPLATE = string.Template('''<html>
+<head>
+    <title>$title</title>
+    <style type="text/css">
+    td.failed {background-color:#f08080;}
+    td.ok {background-color:#c0eec0;}
+    </style>
+</head>
+<body>
+<table border="1">
+<thead>
+    <tr>
+        <th>Variables</th>
+        $th_vars
+    </tr>
+    <tr>
+        <th>Build type</th>
+        $th_build_types
+    </tr>
+</thead>
+<tbody>
+$tr_builds
+</tbody>
+</table>
+</body></html>''')
+
+def generate_html_report(html_report_path, builds):
+    report_dir = os.path.dirname(html_report_path)
+    # Vertical axis: generator
+    # Horizontal: variables, then build_type
+    builds_by_generator = collections.defaultdict(list)
+    variables = set()
+    build_types_by_variable = collections.defaultdict(set)
+    build_by_pos_key = {} # { (generator, var_key, build_type): build }
+    for build in builds:
+        builds_by_generator[build.desc.generator].append(build)
+        var_key = tuple(sorted(build.desc.variables))
+        variables.add(var_key)
+        build_types_by_variable[var_key].add(build.desc.build_type)
+        pos_key = (build.desc.generator, var_key, build.desc.build_type)
+        build_by_pos_key[pos_key] = build
+    variables = sorted(variables)
+    th_vars = []
+    th_build_types = []
+    for variable in variables:
+        build_types = sorted(build_types_by_variable[variable])
+        nb_build_type = len(build_types_by_variable[variable])
+        th_vars.append('<th colspan="%d">%s</th>' % (nb_build_type, cgi.escape(' '.join(variable))))
+        for build_type in build_types:
+            th_build_types.append('<th>%s</th>' % cgi.escape(build_type))
+    tr_builds = []
+    for generator in sorted(builds_by_generator):
+        tds = [ '<td>%s</td>\n' % cgi.escape(generator) ]
+        for variable in variables:
+            build_types = sorted(build_types_by_variable[variable])
+            for build_type in build_types:
+                pos_key = (generator, variable, build_type)
+                build = build_by_pos_key.get(pos_key)
+                if build:
+                    cmake_status = 'ok' if build.cmake_succeeded else 'FAILED'
+                    build_status = 'ok' if build.build_succeeded else 'FAILED'
+                    cmake_log_url = os.path.relpath(build.cmake_log_path, report_dir)
+                    build_log_url = os.path.relpath(build.build_log_path, report_dir)
+                    td = '<td class="%s"><a href="%s" class="%s">CMake: %s</a>' % (                        build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status)
+                    if build.cmake_succeeded:
+                        td += '<br><a href="%s" class="%s">Build: %s</a>' % (                            build_log_url, build_status.lower(), build_status)
+                    td += '</td>'
+                else:
+                    td = '<td></td>'
+                tds.append(td)
+        tr_builds.append('<tr>%s</tr>' % '\n'.join(tds))
+    html = HTML_TEMPLATE.substitute(        title='Batch build report',
+        th_vars=' '.join(th_vars),
+        th_build_types=' '.join(th_build_types),
+        tr_builds='\n'.join(tr_builds))
+    with open(html_report_path, 'wt') as fhtml:
+        fhtml.write(html)
+    print('HTML report generated in:', html_report_path)
+
+def main():
+    usage = r"""%prog WORK_DIR SOURCE_DIR CONFIG_JSON_PATH [CONFIG2_JSON_PATH...]
+Build a given CMake based project located in SOURCE_DIR with multiple generators/options.dry_run
+as described in CONFIG_JSON_PATH building in WORK_DIR.
+
+Example of call:
+python devtools\batchbuild.py e:\buildbots\jsoncpp\build . devtools\agent_vmw7.json
+"""
+    from optparse import OptionParser
+    parser = OptionParser(usage=usage)
+    parser.allow_interspersed_args = True
+#    parser.add_option('-v', '--verbose', dest="verbose", action='store_true',
+#        help="""Be verbose.""")
+    parser.enable_interspersed_args()
+    options, args = parser.parse_args()
+    if len(args) < 3:
+        parser.error("Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH.")
+    work_dir = args[0]
+    source_dir = args[1].rstrip('/\\')
+    config_paths = args[2:]
+    for config_path in config_paths:
+        if not os.path.isfile(config_path):
+            parser.error("Can not read: %r" % config_path)
+
+    # generate build variants
+    build_descs = []
+    for config_path in config_paths:
+        build_descs_by_axis = load_build_variants_from_config(config_path)
+        build_descs.extend(generate_build_variants(build_descs_by_axis))
+    print('Build variants (%d):' % len(build_descs))
+    # assign build directory for each variant
+    if not os.path.isdir(work_dir):
+        os.makedirs(work_dir)
+    builds = []
+    with open(os.path.join(work_dir, 'matrix-dir-map.txt'), 'wt') as fmatrixmap:
+        for index, build_desc in enumerate(build_descs):
+            build_desc_work_dir = os.path.join(work_dir, '%03d' % (index+1))
+            builds.append(BuildData(build_desc, build_desc_work_dir, source_dir))
+            fmatrixmap.write('%s: %s\n' % (build_desc_work_dir, build_desc))
+    for build in builds:
+        build.execute_build()
+    html_report_path = os.path.join(work_dir, 'batchbuild-report.html')
+    generate_html_report(html_report_path, builds)
+    print('Done')
+
+
+if __name__ == '__main__':
+    main()
+
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/fixeol.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/fixeol.py
index 4fed6ce..45252a0 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/fixeol.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/fixeol.py
@@ -1,13 +1,20 @@
-import os.path
+# Copyright 2010 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
-def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
+from __future__ import print_function
+import os.path
+import sys
+
+def fix_source_eol(path, is_dry_run = True, verbose = True, eol = '\n'):
     """Makes sure that all sources have the specified eol sequence (default: unix)."""
-    if not os.path.isfile( path ):
-        raise ValueError( 'Path "%s" is not a file' % path )
+    if not os.path.isfile(path):
+        raise ValueError('Path "%s" is not a file' % path)
     try:
         f = open(path, 'rb')
-    except IOError, msg:
-        print >> sys.stderr, "%s: I/O Error: %s" % (file, str(msg))
+    except IOError as msg:
+        print("%s: I/O Error: %s" % (file, str(msg)), file=sys.stderr)
         return False
     try:
         raw_lines = f.readlines()
@@ -15,7 +22,7 @@
         f.close()
     fixed_lines = [line.rstrip('\r\n') + eol for line in raw_lines]
     if raw_lines != fixed_lines:
-        print '%s =>' % path,
+        print('%s =>' % path, end=' ')
         if not is_dry_run:
             f = open(path, "wb")
             try:
@@ -23,32 +30,32 @@
             finally:
                 f.close()
         if verbose:
-            print is_dry_run and ' NEED FIX' or ' FIXED'
+            print(is_dry_run and ' NEED FIX' or ' FIXED')
     return True
 ##    
 ##    
 ##
-##def _do_fix( is_dry_run = True ):
+##def _do_fix(is_dry_run = True):
 ##    from waftools import antglob
-##    python_sources = antglob.glob( '.',
+##    python_sources = antglob.glob('.',
 ##        includes = '**/*.py **/wscript **/wscript_build',
 ##        excludes = antglob.default_excludes + './waf.py',
-##        prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
+##        prune_dirs = antglob.prune_dirs + 'waf-* ./build')
 ##    for path in python_sources:
-##        _fix_python_source( path, is_dry_run )
+##        _fix_python_source(path, is_dry_run)
 ##
-##    cpp_sources = antglob.glob( '.',
+##    cpp_sources = antglob.glob('.',
 ##        includes = '**/*.cpp **/*.h **/*.inl',
-##        prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
+##        prune_dirs = antglob.prune_dirs + 'waf-* ./build')
 ##    for path in cpp_sources:
-##        _fix_source_eol( path, is_dry_run )
+##        _fix_source_eol(path, is_dry_run)
 ##
 ##
 ##def dry_fix(context):
-##    _do_fix( is_dry_run = True )
+##    _do_fix(is_dry_run = True)
 ##
 ##def fix(context):
-##    _do_fix( is_dry_run = False )
+##    _do_fix(is_dry_run = False)
 ##
 ##def shutdown():
 ##    pass
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/licenseupdater.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/licenseupdater.py
index 866eada..36bdb5c 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/licenseupdater.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/licenseupdater.py
@@ -1,18 +1,19 @@
 """Updates the license text in source file.
 """
+from __future__ import print_function
 
 # An existing license is found if the file starts with the string below,
 # and ends with the first blank line.
 LICENSE_BEGIN = "// Copyright "
 
-BRIEF_LICENSE = LICENSE_BEGIN + """2007-2010 Baptiste Lepilleur
+BRIEF_LICENSE = LICENSE_BEGIN + """2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 """.replace('\r\n','\n')
 
-def update_license( path, dry_run, show_diff ):
+def update_license(path, dry_run, show_diff):
     """Update the license statement in the specified file.
     Parameters:
       path: path of the C++ source file to update.
@@ -21,28 +22,28 @@
       show_diff: if True, print the path of the file that would be modified,
                  as well as the change made to the file. 
     """
-    with open( path, 'rt' ) as fin:
+    with open(path, 'rt') as fin:
         original_text = fin.read().replace('\r\n','\n')
         newline = fin.newlines and fin.newlines[0] or '\n'
-    if not original_text.startswith( LICENSE_BEGIN ):
+    if not original_text.startswith(LICENSE_BEGIN):
         # No existing license found => prepend it
         new_text = BRIEF_LICENSE + original_text
     else:
-        license_end_index = original_text.index( '\n\n' ) # search first blank line
+        license_end_index = original_text.index('\n\n') # search first blank line
         new_text = BRIEF_LICENSE + original_text[license_end_index+2:]
     if original_text != new_text:
         if not dry_run:
-            with open( path, 'wb' ) as fout:
-                fout.write( new_text.replace('\n', newline ) )
-        print 'Updated', path
+            with open(path, 'wb') as fout:
+                fout.write(new_text.replace('\n', newline))
+        print('Updated', path)
         if show_diff:
             import difflib
-            print '\n'.join( difflib.unified_diff( original_text.split('\n'),
-                                                   new_text.split('\n') ) )
+            print('\n'.join(difflib.unified_diff(original_text.split('\n'),
+                                                   new_text.split('\n'))))
         return True
     return False
 
-def update_license_in_source_directories( source_dirs, dry_run, show_diff ):
+def update_license_in_source_directories(source_dirs, dry_run, show_diff):
     """Updates license text in C++ source files found in directory source_dirs.
     Parameters:
       source_dirs: list of directory to scan for C++ sources. Directories are
@@ -55,11 +56,11 @@
     from devtools import antglob
     prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
     for source_dir in source_dirs:
-        cpp_sources = antglob.glob( source_dir,
+        cpp_sources = antglob.glob(source_dir,
             includes = '''**/*.h **/*.cpp **/*.inl''',
-            prune_dirs = prune_dirs )
+            prune_dirs = prune_dirs)
         for source in cpp_sources:
-            update_license( source, dry_run, show_diff )
+            update_license(source, dry_run, show_diff)
 
 def main():
     usage = """%prog DIR [DIR2...]
@@ -82,8 +83,8 @@
         help="""On update, show change made to the file.""")
     parser.enable_interspersed_args()
     options, args = parser.parse_args()
-    update_license_in_source_directories( args, options.dry_run, options.show_diff )
-    print 'Done'
+    update_license_in_source_directories(args, options.dry_run, options.show_diff)
+    print('Done')
 
 if __name__ == '__main__':
     import sys
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/tarball.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/tarball.py
index ccbda39..3c0ba65 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/tarball.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/devtools/tarball.py
@@ -1,5 +1,10 @@
-import os.path
-import gzip
+# Copyright 2010 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+from contextlib import closing
+import os
 import tarfile
 
 TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
@@ -13,41 +18,35 @@
     prefix_dir: all files stored in the tarball be sub-directory of prefix_dir. Set to ''
         to make them child of root.
     """
-    base_dir = os.path.normpath( os.path.abspath( base_dir ) )
-    def archive_name( path ):
+    base_dir = os.path.normpath(os.path.abspath(base_dir))
+    def archive_name(path):
         """Makes path relative to base_dir."""
-        path = os.path.normpath( os.path.abspath( path ) )
-        common_path = os.path.commonprefix( (base_dir, path) )
+        path = os.path.normpath(os.path.abspath(path))
+        common_path = os.path.commonprefix((base_dir, path))
         archive_name = path[len(common_path):]
-        if os.path.isabs( archive_name ):
+        if os.path.isabs(archive_name):
             archive_name = archive_name[1:]
-        return os.path.join( prefix_dir, archive_name )
+        return os.path.join(prefix_dir, archive_name)
     def visit(tar, dirname, names):
         for name in names:
             path = os.path.join(dirname, name)
             if os.path.isfile(path):
                 path_in_tar = archive_name(path)
-                tar.add(path, path_in_tar )
+                tar.add(path, path_in_tar)
     compression = TARGZ_DEFAULT_COMPRESSION_LEVEL
-    tar = tarfile.TarFile.gzopen( tarball_path, 'w', compresslevel=compression )
-    try:
+    with closing(tarfile.TarFile.open(tarball_path, 'w:gz',
+            compresslevel=compression)) as tar:
         for source in sources:
             source_path = source
-            if os.path.isdir( source ):
-                os.path.walk(source_path, visit, tar)
+            if os.path.isdir(source):
+                for dirpath, dirnames, filenames in os.walk(source_path):
+                    visit(tar, dirpath, filenames)
             else:
                 path_in_tar = archive_name(source_path)
-                tar.add(source_path, path_in_tar )      # filename, arcname
-    finally:
-        tar.close()
+                tar.add(source_path, path_in_tar)      # filename, arcname
 
-def decompress( tarball_path, base_dir ):
+def decompress(tarball_path, base_dir):
     """Decompress the gzipped tarball into directory base_dir.
     """
-    # !!! This class method is not documented in the online doc
-    # nor is bz2open!
-    tar = tarfile.TarFile.gzopen(tarball_path, mode='r')
-    try:
-        tar.extractall( base_dir )
-    finally:
-        tar.close()
+    with closing(tarfile.TarFile.open(tarball_path)) as tar:
+        tar.extractall(base_dir)
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/doxyfile.in b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/doxyfile.in
index 48861d2..dcf514e 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/doxyfile.in
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/doxyfile.in
@@ -1,90 +1,112 @@
-# Doxyfile 1.5.9
+# Doxyfile 1.8.5
 
 # This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project
+# doxygen (www.doxygen.org) for a project.
 #
-# All text after a hash (#) is considered a comment and will be ignored
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
 # The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ")
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
 
 #---------------------------------------------------------------------------
 # Project related configuration options
 #---------------------------------------------------------------------------
 
-# This tag specifies the encoding used for all characters in the config file 
-# that follow. The default is UTF-8 which is also the encoding used for all 
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the 
-# iconv built into libc) for the transcoding. See 
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
 
 DOXYFILE_ENCODING      = UTF-8
 
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
-# by quotes) that should identify the project.
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
 
 PROJECT_NAME           = "JsonCpp"
 
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
-# This could be handy for archiving the generated documentation or 
-# if some version control system is used.
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
 
 PROJECT_NUMBER         = %JSONCPP_VERSION%
 
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
-# base path where the generated documentation will be put. 
-# If a relative path is entered, it will be relative to the location 
-# where doxygen was started. If left blank the current directory will be used.
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
 
 OUTPUT_DIRECTORY       = %DOC_TOPDIR%
 
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 
-# 4096 sub-directories (in 2 levels) under the output directory of each output 
-# format and will distribute the generated files over these directories. 
-# Enabling this option can be useful when feeding doxygen a huge amount of 
-# source files, where putting all generated files in the same directory would 
-# otherwise cause performance problems for the file system.
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
 
 CREATE_SUBDIRS         = NO
 
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
-# documentation generated by doxygen is written. Doxygen will use this 
-# information to generate all constant output in the proper language. 
-# The default language is English, other supported languages are: 
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, 
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, 
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English 
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, 
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, 
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-
+# Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi,
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en,
+# Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish,
+# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish,
+# Turkish, Ukrainian and Vietnamese.
+# The default value is: English.
 
 OUTPUT_LANGUAGE        = English
 
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
-# include brief member descriptions after the members that are listed in 
-# the file and class documentation (similar to JavaDoc). 
-# Set to NO to disable this.
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
 
 BRIEF_MEMBER_DESC      = YES
 
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
-# the brief description of a member or function before the detailed description. 
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
 # brief descriptions will be completely suppressed.
+# The default value is: YES.
 
 REPEAT_BRIEF           = YES
 
-# This tag implements a quasi-intelligent brief description abbreviator 
-# that is used to form the text in various listings. Each string 
-# in this list, if found as the leading text of the brief description, will be 
-# stripped from the text and the result after processing the whole list, is 
-# used as the annotated text. Otherwise, the brief description is used as-is. 
-# If left blank, the following values are used ("$name" is automatically 
-# replaced with the name of the entity): "The $name class" "The $name widget" 
-# "The $name file" "is" "provides" "specifies" "contains" 
-# "represents" "a" "an" "the"
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
 
 ABBREVIATE_BRIEF       = "The $name class" \
                          "The $name widget" \
@@ -98,1437 +120,2183 @@
                          an \
                          the
 
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
-# Doxygen will generate a detailed section even if there is only a brief 
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
 # description.
+# The default value is: NO.
 
 ALWAYS_DETAILED_SEC    = NO
 
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all 
-# inherited members of a class in the documentation of that class as if those 
-# members were ordinary class members. Constructors, destructors and assignment 
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
 # operators of the base classes will not be shown.
+# The default value is: NO.
 
 INLINE_INHERITED_MEMB  = NO
 
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
-# path before files name in the file list and in the header files. If set 
-# to NO the shortest path that makes the file name unique will be used.
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
 
 FULL_PATH_NAMES        = YES
 
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
-# can be used to strip a user-defined part of the path. Stripping is 
-# only done if one of the specified strings matches the left-hand part of 
-# the path. The tag can be used to show relative paths in the file list. 
-# If left blank the directory from which doxygen is run is used as the 
-# path to strip.
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
 
 STRIP_FROM_PATH        = %TOPDIR%
 
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of 
-# the path mentioned in the documentation of a class, which tells 
-# the reader which header file to include in order to use a class. 
-# If left blank only the name of the header file containing the class 
-# definition is used. Otherwise one should specify the include paths that 
-# are normally passed to the compiler using the -I flag.
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
 
 STRIP_FROM_INC_PATH    = %TOPDIR%/include
 
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
-# (but less readable) file names. This can be useful is your file systems 
-# doesn't support long names like on DOS, Mac, or CD-ROM.
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
 
 SHORT_NAMES            = NO
 
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
-# will interpret the first line (until the first dot) of a JavaDoc-style 
-# comment as the brief description. If set to NO, the JavaDoc 
-# comments will behave just like regular Qt-style comments 
-# (thus requiring an explicit @brief command for a brief description.)
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
 
 JAVADOC_AUTOBRIEF      = YES
 
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will 
-# interpret the first line (until the first dot) of a Qt-style 
-# comment as the brief description. If set to NO, the comments 
-# will behave just like regular Qt-style comments (thus requiring 
-# an explicit \brief command for a brief description.)
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
 
 QT_AUTOBRIEF           = NO
 
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
-# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
-# comments) as a brief description. This used to be the default behaviour. 
-# The new default is to treat a multi-line C++ comment block as a detailed 
-# description. Set this tag to YES if you prefer the old behaviour instead.
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
 
 MULTILINE_CPP_IS_BRIEF = NO
 
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
-# member inherits the documentation from any documented member that it 
-# re-implements.
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
 
 INHERIT_DOCS           = YES
 
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce 
-# a new page for each member. If set to NO, the documentation of a member will 
-# be part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
 
 SEPARATE_MEMBER_PAGES  = NO
 
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
-# Doxygen uses this value to replace tabs by spaces in code fragments.
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
 
 TAB_SIZE               = 3
 
-# This tag can be used to specify a number of aliases that acts 
-# as commands in the documentation. An alias has the form "name=value". 
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
-# put the command \sideeffect (or @sideeffect) in the documentation, which 
-# will result in a user-defined paragraph with heading "Side Effects:". 
-# You can put \n's in the value part of an alias to insert newlines.
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
 
 ALIASES                = "testCaseSetup=\link CppUT::TestCase::setUp() setUp()\endlink" \
                          "testCaseRun=\link CppUT::TestCase::run() run()\endlink" \
                          "testCaseTearDown=\link CppUT::TestCase::tearDown() tearDown()\endlink" \
                          "json_ref=<a HREF='http://www.json.org/'>JSON (JavaScript Object Notation)</a>"
 
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C 
-# sources only. Doxygen will then generate output that is more tailored for C. 
-# For instance, some of the names that are used will be different. The list 
-# of all members will be omitted, etc.
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_FOR_C  = NO
 
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java 
-# sources only. Doxygen will then generate output that is more tailored for 
-# Java. For instance, namespaces will be presented as packages, qualified 
-# scopes will look different, etc.
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_JAVA   = NO
 
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran 
-# sources only. Doxygen will then generate output that is more tailored for 
-# Fortran.
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
 
 OPTIMIZE_FOR_FORTRAN   = NO
 
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL 
-# sources. Doxygen will then generate output that is tailored for 
-# VHDL.
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_VHDL   = NO
 
-# Doxygen selects the parser to use depending on the extension of the files it parses. 
-# With this tag you can assign which parser to use for a given extension. 
-# Doxygen has a built-in mapping, but you can override or extend it using this tag. 
-# The format is ext=language, where ext is a file extension, and language is one of 
-# the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, 
-# Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat 
-# .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), 
-# use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
 
-EXTENSION_MAPPING      = 
+EXTENSION_MAPPING      =
 
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want 
-# to include (a tag file for) the STL sources as input, then you should 
-# set this tag to YES in order to let doxygen match functions declarations and 
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. 
-# func(std::string) {}). This also make the inheritance and collaboration 
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
 # diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
 
 BUILTIN_STL_SUPPORT    = YES
 
-# If you use Microsoft's C++/CLI language, you should set this option to YES to 
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
 # enable parsing support.
+# The default value is: NO.
 
 CPP_CLI_SUPPORT        = NO
 
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. 
-# Doxygen will parse them like normal C++ but will assume all classes use public 
-# instead of private inheritance when no explicit protection keyword is present.
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
 
 SIP_SUPPORT            = NO
 
-# For Microsoft's IDL there are propget and propput attributes to indicate getter 
-# and setter methods for a property. Setting this option to YES (the default) 
-# will make doxygen to replace the get and set methods by a property in the 
-# documentation. This will only work if the methods are indeed getting or 
-# setting a simple type. If this is not the case, or you want to show the 
-# methods anyway, you should set this option to NO.
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
 
 IDL_PROPERTY_SUPPORT   = YES
 
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
-# tag is set to YES, then doxygen will reuse the documentation of the first 
-# member in the group (if any) for the other members of the group. By default 
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
 # all members of a group must be documented explicitly.
+# The default value is: NO.
 
 DISTRIBUTE_GROUP_DOC   = NO
 
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
-# the same type (for instance a group of public functions) to be put as a 
-# subgroup of that type (e.g. under the Public Functions section). Set it to 
-# NO to prevent subgrouping. Alternatively, this can be done per class using 
-# the \nosubgrouping command.
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
 
 SUBGROUPING            = YES
 
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum 
-# is documented as struct, union, or enum with the name of the typedef. So 
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct 
-# with name TypeT. When disabled the typedef will appear as a member of a file, 
-# namespace, or class. And the struct will be named TypeS. This can typically 
-# be useful for C code in case the coding convention dictates that all compound 
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
 # types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
 
 TYPEDEF_HIDES_STRUCT   = NO
 
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to 
-# determine which symbols to keep in memory and which to flush to disk. 
-# When the cache is full, less often used symbols will be written to disk. 
-# For small to medium size projects (<1000 input files) the default value is 
-# probably good enough. For larger projects a too small cache size can cause 
-# doxygen to be busy swapping symbols to and from disk most of the time 
-# causing a significant performance penality. 
-# If the system has enough physical memory increasing the cache will improve the 
-# performance by keeping more symbols in memory. Note that the value works on 
-# a logarithmic scale so increasing the size by one will rougly double the 
-# memory usage. The cache size is given by this formula: 
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, 
-# corresponding to a cache size of 2^16 = 65536 symbols
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
 
-SYMBOL_CACHE_SIZE      = 0
+LOOKUP_CACHE_SIZE      = 0
 
 #---------------------------------------------------------------------------
 # Build related configuration options
 #---------------------------------------------------------------------------
 
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
-# documentation are documented, even if no documentation was available. 
-# Private class members and static file members will be hidden unless 
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
 
 EXTRACT_ALL            = YES
 
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
-# will be included in the documentation.
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
 
 EXTRACT_PRIVATE        = NO
 
-# If the EXTRACT_STATIC tag is set to YES all static members of a file 
-# will be included in the documentation.
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
 
 EXTRACT_STATIC         = YES
 
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
-# defined locally in source files will be included in the documentation. 
-# If set to NO only classes defined in header files are included.
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
 
 EXTRACT_LOCAL_CLASSES  = NO
 
-# This flag is only useful for Objective-C code. When set to YES local 
-# methods, which are defined in the implementation section but not in 
-# the interface are included in the documentation. 
-# If set to NO (the default) only methods in the interface are included.
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
 
 EXTRACT_LOCAL_METHODS  = NO
 
-# If this flag is set to YES, the members of anonymous namespaces will be 
-# extracted and appear in the documentation as a namespace called 
-# 'anonymous_namespace{file}', where file will be replaced with the base 
-# name of the file that contains the anonymous namespace. By default 
-# anonymous namespace are hidden.
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
 
 EXTRACT_ANON_NSPACES   = NO
 
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
-# undocumented members of documented classes, files or namespaces. 
-# If set to NO (the default) these members will be included in the 
-# various overviews, but no documentation section is generated. 
-# This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
 
 HIDE_UNDOC_MEMBERS     = NO
 
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
-# undocumented classes that are normally visible in the class hierarchy. 
-# If set to NO (the default) these classes will be included in the various 
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
 
 HIDE_UNDOC_CLASSES     = NO
 
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
-# friend (class|struct|union) declarations. 
-# If set to NO (the default) these declarations will be included in the 
-# documentation.
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
 
 HIDE_FRIEND_COMPOUNDS  = NO
 
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
-# documentation blocks found inside the body of a function. 
-# If set to NO (the default) these blocks will be appended to the 
-# function's detailed documentation block.
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
 
 HIDE_IN_BODY_DOCS      = NO
 
-# The INTERNAL_DOCS tag determines if documentation 
-# that is typed after a \internal command is included. If the tag is set 
-# to NO (the default) then the documentation will be excluded. 
-# Set it to YES to include the internal documentation.
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
 
 INTERNAL_DOCS          = YES
 
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
-# file names in lower-case letters. If set to YES upper-case letters are also 
-# allowed. This is useful if you have classes or files whose names only differ 
-# in case and if your file system supports case sensitive file names. Windows 
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
 # and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
 
 CASE_SENSE_NAMES       = NO
 
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
-# will show members with their full class and namespace scopes in the 
-# documentation. If set to YES the scope will be hidden.
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
 
 HIDE_SCOPE_NAMES       = NO
 
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
-# will put a list of the files that are included by a file in the documentation 
-# of that file.
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
 
 SHOW_INCLUDE_FILES     = YES
 
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
-# is inserted in the documentation for inline members.
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
 
 INLINE_INFO            = YES
 
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
-# will sort the (detailed) documentation of file and class members 
-# alphabetically by member name. If set to NO the members will appear in 
-# declaration order.
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
 
 SORT_MEMBER_DOCS       = YES
 
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
-# brief documentation of file, namespace and class members alphabetically 
-# by member name. If set to NO (the default) the members will appear in 
-# declaration order.
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: NO.
 
 SORT_BRIEF_DOCS        = NO
 
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the 
-# hierarchy of group names into alphabetical order. If set to NO (the default) 
-# the group names will appear in their defined order.
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
 
 SORT_GROUP_NAMES       = NO
 
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be 
-# sorted by fully-qualified names, including namespaces. If set to 
-# NO (the default), the class list will be sorted only by class name, 
-# not including the namespace part. 
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. 
-# Note: This option applies only to the class list, not to the 
-# alphabetical list.
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
 
 SORT_BY_SCOPE_NAME     = YES
 
-# The GENERATE_TODOLIST tag can be used to enable (YES) or 
-# disable (NO) the todo list. This list is created by putting \todo 
-# commands in the documentation.
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
 
 GENERATE_TODOLIST      = YES
 
-# The GENERATE_TESTLIST tag can be used to enable (YES) or 
-# disable (NO) the test list. This list is created by putting \test 
-# commands in the documentation.
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
 
 GENERATE_TESTLIST      = NO
 
-# The GENERATE_BUGLIST tag can be used to enable (YES) or 
-# disable (NO) the bug list. This list is created by putting \bug 
-# commands in the documentation.
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
 
 GENERATE_BUGLIST       = NO
 
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
-# disable (NO) the deprecated list. This list is created by putting 
-# \deprecated commands in the documentation.
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
 
 GENERATE_DEPRECATEDLIST= YES
 
-# The ENABLED_SECTIONS tag can be used to enable conditional 
-# documentation sections, marked by \if sectionname ... \endif.
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
 
-ENABLED_SECTIONS       = 
+ENABLED_SECTIONS       =
 
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
-# the initial value of a variable or define consists of for it to appear in 
-# the documentation. If the initializer consists of more lines than specified 
-# here it will be hidden. Use a value of 0 to hide initializers completely. 
-# The appearance of the initializer of individual variables and defines in the 
-# documentation can be controlled using \showinitializer or \hideinitializer 
-# command in the documentation regardless of this setting.
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
 
 MAX_INITIALIZER_LINES  = 30
 
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
-# at the bottom of the documentation of classes and structs. If set to YES the 
-# list will mention the files that were used to generate the documentation.
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
 
 SHOW_USED_FILES        = YES
 
-# If the sources in your project are distributed over multiple directories 
-# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy 
-# in the documentation. The default is NO.
-
-SHOW_DIRECTORIES       = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. 
-# This will remove the Files entry from the Quick Index and from the 
-# Folder Tree View (if specified). The default is YES.
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
 
 SHOW_FILES             = YES
 
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the 
-# Namespaces page. 
-# This will remove the Namespaces entry from the Quick Index 
-# and from the Folder Tree View (if specified). The default is YES.
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
 
 SHOW_NAMESPACES        = YES
 
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that 
-# doxygen should invoke to get the current version for each file (typically from 
-# the version control system). Doxygen will invoke the program by executing (via 
-# popen()) the command <command> <input-file>, where <command> is the value of 
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file 
-# provided by doxygen. Whatever the program writes to standard output 
-# is used as the file version. See the manual for examples.
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
 
-FILE_VERSION_FILTER    = 
+FILE_VERSION_FILTER    =
 
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by 
-# doxygen. The layout file controls the global structure of the generated output files 
-# in an output format independent way. The create the layout file that represents 
-# doxygen's defaults, run doxygen with the -l option. You can optionally specify a 
-# file name after the option, if omitted DoxygenLayout.xml will be used as the name 
-# of the layout file.
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
 
-LAYOUT_FILE            = 
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES         =
 
 #---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
+# Configuration options related to warning and progress messages
 #---------------------------------------------------------------------------
 
-# The QUIET tag can be used to turn on/off the messages that are generated 
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
 
 QUIET                  = NO
 
-# The WARNINGS tag can be used to turn on/off the warning messages that are 
-# generated by doxygen. Possible values are YES and NO. If left blank 
-# NO is used.
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
 
 WARNINGS               = YES
 
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
-# automatically be disabled.
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
 
 WARN_IF_UNDOCUMENTED   = YES
 
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
-# potential errors in the documentation, such as not documenting some 
-# parameters in a documented function, or documenting parameters that 
-# don't exist or using markup commands wrongly.
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
 
 WARN_IF_DOC_ERROR      = YES
 
-# This WARN_NO_PARAMDOC option can be abled to get warnings for 
-# functions that are documented, but have no documentation for their parameters 
-# or return value. If set to NO (the default) doxygen will only warn about 
-# wrong or incomplete parameter documentation, but not about the absence of 
-# documentation.
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
 
 WARN_NO_PARAMDOC       = NO
 
-# The WARN_FORMAT tag determines the format of the warning messages that 
-# doxygen can produce. The string should contain the $file, $line, and $text 
-# tags, which will be replaced by the file and line number from which the 
-# warning originated and the warning text. Optionally the format may contain 
-# $version, which will be replaced by the version of the file (if it could 
-# be obtained via FILE_VERSION_FILTER)
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
 
 WARN_FORMAT            = "$file:$line: $text"
 
-# The WARN_LOGFILE tag can be used to specify a file to which warning 
-# and error messages should be written. If left blank the output is written 
-# to stderr.
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
 
 WARN_LOGFILE           = %WARNING_LOG_PATH%
 
 #---------------------------------------------------------------------------
-# configuration options related to the input files
+# Configuration options related to the input files
 #---------------------------------------------------------------------------
 
-# The INPUT tag can be used to specify the files and/or directories that contain 
-# documented source files. You may enter file names like "myfile.cpp" or 
-# directories like "/usr/src/myproject". Separate the files or directories 
-# with spaces.
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
 
-INPUT                  = ../include ../src/lib_json .
+INPUT                  = ../include \
+                         ../src/lib_json \
+                         .
 
-# This tag can be used to specify the character encoding of the source files 
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is 
-# also the default input encoding. Doxygen uses libiconv (or the iconv built 
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for 
-# the list of possible encodings.
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
 
 INPUT_ENCODING         = UTF-8
 
-# If the value of the INPUT tag contains directories, you can use the 
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
-# and *.h) to filter out the source-files in the directories. If left 
-# blank the following patterns are tested: 
-# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx 
-# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
 
 FILE_PATTERNS          = *.h \
                          *.cpp \
                          *.inl \
                          *.dox
 
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
-# should be searched for input files as well. Possible values are YES and NO. 
-# If left blank NO is used.
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
 
 RECURSIVE              = YES
 
-# The EXCLUDE tag can be used to specify files and/or directories that should 
-# excluded from the INPUT source files. This way you can easily exclude a 
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
 # subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
 
-EXCLUDE                = 
+EXCLUDE                =
 
-# The EXCLUDE_SYMLINKS tag can be used select whether or not files or 
-# directories that are symbolic links (a Unix filesystem feature) are excluded 
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
 # from the input.
+# The default value is: NO.
 
 EXCLUDE_SYMLINKS       = NO
 
-# If the value of the INPUT tag contains directories, you can use the 
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
-# certain files from those directories. Note that the wildcards are matched 
-# against the file with absolute path, so to exclude all test directories 
-# for example use the pattern */test/*
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
 
-EXCLUDE_PATTERNS       = 
+EXCLUDE_PATTERNS       =
 
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names 
-# (namespaces, classes, functions, etc.) that should be excluded from the 
-# output. The symbol name can be a fully qualified name, a word, or if the 
-# wildcard * is used, a substring. Examples: ANamespace, AClass, 
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
 # AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
 
-EXCLUDE_SYMBOLS        = 
+EXCLUDE_SYMBOLS        =
 
-# The EXAMPLE_PATH tag can be used to specify one or more files or 
-# directories that contain example code fragments that are included (see 
-# the \include command).
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
 
-EXAMPLE_PATH           = 
+EXAMPLE_PATH           = ..
 
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
-# and *.h) to filter out the source-files in the directories. If left 
-# blank all files are included.
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
 
 EXAMPLE_PATTERNS       = *
 
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
-# searched for input files to be used with the \include or \dontinclude 
-# commands irrespective of the value of the RECURSIVE tag. 
-# Possible values are YES and NO. If left blank NO is used.
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
 
 EXAMPLE_RECURSIVE      = NO
 
-# The IMAGE_PATH tag can be used to specify one or more files or 
-# directories that contain image that are included in the documentation (see 
-# the \image command).
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
 
-IMAGE_PATH             = 
+IMAGE_PATH             =
 
-# The INPUT_FILTER tag can be used to specify a program that doxygen should 
-# invoke to filter for each input file. Doxygen will invoke the filter program 
-# by executing (via popen()) the command <filter> <input-file>, where <filter> 
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
-# input file. Doxygen will then use the output that the filter program writes 
-# to standard output. 
-# If FILTER_PATTERNS is specified, this tag will be 
-# ignored.
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
 
-INPUT_FILTER           = 
+INPUT_FILTER           =
 
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern 
-# basis. 
-# Doxygen will compare the file name with each pattern and apply the 
-# filter if there is a match. 
-# The filters are a list of the form: 
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further 
-# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER 
-# is applied to all files.
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
 
-FILTER_PATTERNS        = 
+FILTER_PATTERNS        =
 
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
-# INPUT_FILTER) will be used to filter the input files when producing source 
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
 
 FILTER_SOURCE_FILES    = NO
 
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
 #---------------------------------------------------------------------------
-# configuration options related to source browsing
+# Configuration options related to source browsing
 #---------------------------------------------------------------------------
 
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
-# be generated. Documented entities will be cross-referenced with these sources. 
-# Note: To get rid of all source code in the generated output, make sure also 
-# VERBATIM_HEADERS is set to NO.
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
 
 SOURCE_BROWSER         = YES
 
-# Setting the INLINE_SOURCES tag to YES will include the body 
-# of functions and classes directly in the documentation.
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
 
 INLINE_SOURCES         = NO
 
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
-# doxygen to hide any special comment blocks from generated source code 
-# fragments. Normal C and C++ comments will always remain visible.
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
 
 STRIP_CODE_COMMENTS    = YES
 
-# If the REFERENCED_BY_RELATION tag is set to YES 
-# then for each documented function all documented 
-# functions referencing it will be listed.
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
 
 REFERENCED_BY_RELATION = YES
 
-# If the REFERENCES_RELATION tag is set to YES 
-# then for each documented function all documented entities 
-# called/used by that function will be listed.
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
 
 REFERENCES_RELATION    = YES
 
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) 
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from 
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will 
-# link to the source code. 
-# Otherwise they will link to the documentation.
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
 
 REFERENCES_LINK_SOURCE = YES
 
-# If the USE_HTAGS tag is set to YES then the references to source code 
-# will point to the HTML generated by the htags(1) tool instead of doxygen 
-# built-in source browser. The htags tool is part of GNU's global source 
-# tagging system (see http://www.gnu.org/software/global/global.html). You 
-# will need version 4.8.6 or higher.
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
 
 USE_HTAGS              = NO
 
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
-# will generate a verbatim copy of the header file for each class for 
-# which an include is specified. Set to NO to disable this.
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
 
 VERBATIM_HEADERS       = YES
 
 #---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
+# Configuration options related to the alphabetical class index
 #---------------------------------------------------------------------------
 
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
-# of all compounds will be generated. Enable this if the project 
-# contains a lot of classes, structs, unions or interfaces.
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
 
-ALPHABETICAL_INDEX     = NO
+ALPHABETICAL_INDEX     = YES
+TOC_INCLUDE_HEADINGS = 2
 
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
-# in which this list will be split (can be a number in the range [1..20])
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
 
 COLS_IN_ALPHA_INDEX    = 5
 
-# In case all classes in a project start with a common prefix, all 
-# classes will be put under the same header in the alphabetical index. 
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
-# should be ignored while generating the index headers.
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
 
-IGNORE_PREFIX          = 
+IGNORE_PREFIX          =
 
 #---------------------------------------------------------------------------
-# configuration options related to the HTML output
+# Configuration options related to the HTML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
-# generate HTML output.
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
 
 GENERATE_HTML          = YES
 
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `html' will be used as the default path.
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_OUTPUT            = %HTML_OUTPUT%
 
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
-# doxygen will generate files with .html extension.
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_FILE_EXTENSION    = .html
 
-# The HTML_HEADER tag can be used to specify a personal HTML header for 
-# each generated HTML page. If it is left blank doxygen will generate a 
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
 # standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_HEADER            = header.html
 
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
-# each generated HTML page. If it is left blank doxygen will generate a 
-# standard footer.
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_FOOTER            = footer.html
 
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
-# style sheet that is used by each HTML page. It can be used to 
-# fine-tune the look of the HTML output. If the tag is left blank doxygen 
-# will generate a default style sheet. Note that doxygen will try to copy 
-# the style sheet file to the HTML output directory, so don't put your own 
-# stylesheet in the HTML output directory as well, or it will be erased!
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
-HTML_STYLESHEET        = 
+HTML_STYLESHEET        =
 
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
-# files or namespaces will be aligned in HTML using tables. If set to 
-# NO a bullet list will be used.
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
-HTML_ALIGN_MEMBERS     = YES
+HTML_EXTRA_STYLESHEET  =
 
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML 
-# documentation will contain sections that can be hidden and shown after the 
-# page has loaded. For this to work a browser that supports 
-# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox 
-# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_DYNAMIC_SECTIONS  = YES
 
-# If the GENERATE_DOCSET tag is set to YES, additional index files 
-# will be generated that can be used as input for Apple's Xcode 3 
-# integrated development environment, introduced with OSX 10.5 (Leopard). 
-# To create a documentation set, doxygen will generate a Makefile in the 
-# HTML output directory. Running make will produce the docset in that 
-# directory and running "make install" will install the docset in 
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find 
-# it at startup. 
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_DOCSET        = NO
 
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the 
-# feed. A documentation feed provides an umbrella under which multiple 
-# documentation sets from a single provider (such as a company or product suite) 
-# can be grouped.
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_FEEDNAME        = "Doxygen generated docs"
 
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that 
-# should uniquely identify the documentation set bundle. This should be a 
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen 
-# will append .docset to the name.
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_BUNDLE_ID       = org.doxygen.Project
 
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
-# will be generated that can be used as input for tools like the 
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) 
-# of the generated HTML documentation.
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_HTMLHELP      = %HTML_HELP%
 
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can 
-# be used to specify the file name of the resulting .chm file. You 
-# can add a path in front of the file if the result should not be 
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
 # written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 CHM_FILE               = jsoncpp-%JSONCPP_VERSION%.chm
 
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can 
-# be used to specify the location (absolute path including file name) of 
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run 
-# the HTML help compiler on the generated index.hhp.
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 HHC_LOCATION           = "c:\Program Files\HTML Help Workshop\hhc.exe"
 
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
-# controls if a separate .chi index file is generated (YES) or that 
-# it should be included in the master .chm file (NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 GENERATE_CHI           = YES
 
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING 
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file 
-# content.
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
-CHM_INDEX_ENCODING     = 
+CHM_INDEX_ENCODING     =
 
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
-# controls whether a binary table of contents is generated (YES) or a 
-# normal table of contents (NO) in the .chm file.
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 BINARY_TOC             = YES
 
-# The TOC_EXPAND flag can be set to YES to add extra items for group members 
-# to the contents of the HTML help documentation and to the tree view.
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 TOC_EXPAND             = YES
 
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER 
-# are set, an additional index file will be generated that can be used as input for 
-# Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated 
-# HTML documentation.
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_QHP           = NO
 
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can 
-# be used to specify the file name of the resulting .qch file. 
-# The path specified is relative to the HTML output folder.
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
-QCH_FILE               = 
+QCH_FILE               =
 
-# The QHP_NAMESPACE tag specifies the namespace to use when generating 
-# Qt Help Project output. For more information please see 
-# http://doc.trolltech.com/qthelpproject.html#namespace
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
-QHP_NAMESPACE          = 
+QHP_NAMESPACE          =
 
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating 
-# Qt Help Project output. For more information please see 
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_VIRTUAL_FOLDER     = doc
 
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. 
-# For more information please see 
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
-QHP_CUST_FILTER_NAME   = 
+QHP_CUST_FILTER_NAME   =
 
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see 
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>.
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
-QHP_CUST_FILTER_ATTRS  = 
+QHP_CUST_FILTER_ATTRS  =
 
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's 
-# filter section matches. 
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>.
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
-QHP_SECT_FILTER_ATTRS  = 
+QHP_SECT_FILTER_ATTRS  =
 
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can 
-# be used to specify the location of Qt's qhelpgenerator. 
-# If non-empty doxygen will try to run qhelpgenerator on the generated 
-# .qhp file.
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
-QHG_LOCATION           = 
+QHG_LOCATION           =
 
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
-# top of each HTML page. The value NO (the default) enables the index and 
-# the value YES disables it.
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 DISABLE_INDEX          = NO
 
-# This tag can be used to set the number of enum values (range [1..20]) 
-# that doxygen will group on one line in the generated HTML documentation.
-
-ENUM_VALUES_PER_LINE   = 4
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index 
-# structure should be generated to display hierarchical information. 
-# If the tag value is set to FRAME, a side panel will be generated 
-# containing a tree-like index structure (just like the one that 
-# is generated for HTML Help). For this to work a browser that supports 
-# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, 
-# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are 
-# probably better off using the HTML help feature. Other possible values 
-# for this tag are: HIERARCHIES, which will generate the Groups, Directories, 
-# and Class Hierarchy pages using a tree view instead of an ordered list; 
-# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which 
-# disables this behavior completely. For backwards compatibility with previous 
-# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE 
-# respectively.
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_TREEVIEW      = NO
 
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
-# used to set the initial width (in pixels) of the frame in which the tree 
-# is shown.
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 TREEVIEW_WIDTH         = 250
 
-# Use this tag to change the font size of Latex formulas included 
-# as images in the HTML documentation. The default is 10. Note that 
-# when you change the font size after a successful doxygen run you need 
-# to manually remove any form_*.png images from the HTML output directory 
-# to force them to be regenerated.
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 FORMULA_FONTSIZE       = 10
 
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using JavaScript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
 #---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
+# Configuration options related to the LaTeX output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
-# generate Latex output.
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
 
 GENERATE_LATEX         = NO
 
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `latex' will be used as the default path.
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_OUTPUT           = latex
 
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
-# invoked. If left blank `latex' will be used as the default command name.
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_CMD_NAME         = latex
 
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
-# generate index for LaTeX. If left blank `makeindex' will be used as the 
-# default command name.
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 MAKEINDEX_CMD_NAME     = makeindex
 
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
-# LaTeX documents. This may be useful for small projects and may help to 
-# save some trees in general.
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 COMPACT_LATEX          = NO
 
-# The PAPER_TYPE tag can be used to set the paper type that is used 
-# by the printer. Possible values are: a4, a4wide, letter, legal and 
-# executive. If left blank a4wide will be used.
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 PAPER_TYPE             = a4wide
 
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
-# packages that should be included in the LaTeX output.
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
-EXTRA_PACKAGES         = 
+EXTRA_PACKAGES         =
 
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
-# the generated latex document. The header should contain everything until 
-# the first chapter. If it is left blank doxygen will generate a 
-# standard header. Notice: only use this tag if you know what you are doing!
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
-LATEX_HEADER           = 
+LATEX_HEADER           =
 
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
-# contain links (just like the HTML output) instead of page references 
-# This makes the output suitable for online browsing using a pdf viewer.
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 PDF_HYPERLINKS         = NO
 
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
-# plain latex in the generated Makefile. Set this option to YES to get a 
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
 # higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 USE_PDFLATEX           = NO
 
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
-# command to the generated LaTeX files. This will instruct LaTeX to keep 
-# running if errors occur, instead of asking the user for help. 
-# This option is also used when generating formulas in HTML.
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_BATCHMODE        = NO
 
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
-# include the index chapters (such as File Index, Compound Index, etc.) 
-# in the output.
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_HIDE_INDICES     = NO
 
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER.
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_SOURCE_CODE      = NO
 
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
 #---------------------------------------------------------------------------
-# configuration options related to the RTF output
+# Configuration options related to the RTF output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
-# The RTF output is optimized for Word 97 and may not look very pretty with 
-# other RTF readers or editors.
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
 
 GENERATE_RTF           = NO
 
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `rtf' will be used as the default path.
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_OUTPUT             = rtf
 
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
-# RTF documents. This may be useful for small projects and may help to 
-# save some trees in general.
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 COMPACT_RTF            = NO
 
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
-# will contain hyperlink fields. The RTF file will 
-# contain links (just like the HTML output) instead of page references. 
-# This makes the output suitable for online browsing using WORD or other 
-# programs which support those fields. 
-# Note: wordpad (write) and others do not support links.
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_HYPERLINKS         = NO
 
-# Load stylesheet definitions from file. Syntax is similar to doxygen's 
-# config file, i.e. a series of assignments. You only have to provide 
-# replacements, missing definitions are set to their default value.
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
-RTF_STYLESHEET_FILE    = 
+RTF_STYLESHEET_FILE    =
 
-# Set optional variables used in the generation of an rtf document. 
-# Syntax is similar to doxygen's config file.
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
-RTF_EXTENSIONS_FILE    = 
+RTF_EXTENSIONS_FILE    =
 
 #---------------------------------------------------------------------------
-# configuration options related to the man page output
+# Configuration options related to the man page output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
-# generate man pages
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
 
 GENERATE_MAN           = NO
 
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `man' will be used as the default path.
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_OUTPUT             = man
 
-# The MAN_EXTENSION tag determines the extension that is added to 
-# the generated man pages (default is the subroutine's section .3)
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_EXTENSION          = .3
 
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
-# then it will generate one additional man file for each entity 
-# documented in the real man page(s). These additional files 
-# only source the real man page, but without them the man command 
-# would be unable to find the correct page. The default is NO.
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_LINKS              = NO
 
 #---------------------------------------------------------------------------
-# configuration options related to the XML output
+# Configuration options related to the XML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_XML tag is set to YES Doxygen will 
-# generate an XML file that captures the structure of 
-# the code including all documentation.
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
 
 GENERATE_XML           = NO
 
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
-# put in front of it. If left blank `xml' will be used as the default path.
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
 XML_OUTPUT             = xml
 
-# The XML_SCHEMA tag can be used to specify an XML schema, 
-# which can be used by a validating XML parser to check the 
-# syntax of the XML files.
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
-XML_SCHEMA             = 
+XML_SCHEMA             =
 
-# The XML_DTD tag can be used to specify an XML DTD, 
-# which can be used by a validating XML parser to check the 
-# syntax of the XML files.
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
-XML_DTD                = 
+XML_DTD                =
 
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will 
-# dump the program listings (including syntax highlighting 
-# and cross-referencing information) to the XML output. Note that 
-# enabling this will significantly increase the size of the XML output.
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
 XML_PROGRAMLISTING     = YES
 
 #---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
+# Configuration options related to the DOCBOOK output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
-# generate an AutoGen Definitions (see autogen.sf.net) file 
-# that captures the structure of the code including all 
-# documentation. Note that this feature is still experimental 
-# and incomplete at the moment.
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
 
 GENERATE_AUTOGEN_DEF   = NO
 
 #---------------------------------------------------------------------------
-# configuration options related to the Perl module output
+# Configuration options related to the Perl module output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
-# generate a Perl module file that captures the structure of 
-# the code including all documentation. Note that this 
-# feature is still experimental and incomplete at the 
-# moment.
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
 
 GENERATE_PERLMOD       = NO
 
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
-# to generate PDF and DVI output from the Perl module output.
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_LATEX          = NO
 
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
-# nicely formatted so it can be parsed by a human reader. 
-# This is useful 
-# if you want to understand what is going on. 
-# On the other hand, if this 
-# tag is set to NO the size of the Perl module output will be much smaller 
-# and Perl will parse it just the same.
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_PRETTY         = YES
 
-# The names of the make variables in the generated doxyrules.make file 
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
-# This is useful so different doxyrules.make files included by the same 
-# Makefile don't overwrite each other's variables.
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
-PERLMOD_MAKEVAR_PREFIX = 
+PERLMOD_MAKEVAR_PREFIX =
 
 #---------------------------------------------------------------------------
-# Configuration options related to the preprocessor   
+# Configuration options related to the preprocessor
 #---------------------------------------------------------------------------
 
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
-# evaluate all C-preprocessor directives found in the sources and include 
-# files.
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
 
 ENABLE_PREPROCESSING   = YES
 
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
-# names in the source code. If set to NO (the default) only conditional 
-# compilation will be performed. Macro expansion can be done in a controlled 
-# way by setting EXPAND_ONLY_PREDEF to YES.
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 MACRO_EXPANSION        = YES
 
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
-# then the macro expansion is limited to the macros specified with the 
-# PREDEFINED and EXPAND_AS_DEFINED tags.
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 EXPAND_ONLY_PREDEF     = NO
 
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
-# in the INCLUDE_PATH (see below) will be search if a #include is found.
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 SEARCH_INCLUDES        = YES
 
-# The INCLUDE_PATH tag can be used to specify one or more directories that 
-# contain include files that are not input files but should be processed by 
-# the preprocessor.
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
 
 INCLUDE_PATH           = ../include
 
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
-# patterns (like *.h and *.hpp) to filter out the header-files in the 
-# directories. If left blank, the patterns specified with FILE_PATTERNS will 
-# be used.
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 INCLUDE_FILE_PATTERNS  = *.h
 
-# The PREDEFINED tag can be used to specify one or more macro names that 
-# are defined before the preprocessor is started (similar to the -D option of 
-# gcc). The argument of the tag is a list of macros of the form: name 
-# or name=definition (no spaces). If the definition and the = are 
-# omitted =1 is assumed. To prevent a macro definition from being 
-# undefined via #undef or recursively expanded use the := operator 
-# instead of the = operator.
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
-PREDEFINED             = "_MSC_VER=1400" \
+PREDEFINED             = "_MSC_VER=1800" \
                          _CPPRTTI \
                          _WIN32 \
-                         JSONCPP_DOC_EXCLUDE_IMPLEMENTATION \
-                         JSON_VALUE_USE_INTERNAL_MAP
+                         JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
 
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
-# this tag can be used to specify a list of macro names that should be expanded. 
-# The macro definition that is found in the sources will be used. 
-# Use the PREDEFINED tag if you want to use a different macro definition.
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
-EXPAND_AS_DEFINED      = 
+EXPAND_AS_DEFINED      =
 
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
-# doxygen's preprocessor will remove all function-like macros that are alone 
-# on a line, have an all uppercase name, and do not end with a semicolon. Such 
-# function macros are typically used for boiler-plate code, and will confuse 
-# the parser if not removed.
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 SKIP_FUNCTION_MACROS   = YES
 
 #---------------------------------------------------------------------------
-# Configuration::additions related to external references   
+# Configuration options related to external references
 #---------------------------------------------------------------------------
 
-# The TAGFILES option can be used to specify one or more tagfiles. 
-# Optionally an initial location of the external documentation 
-# can be added for each tagfile. The format of a tag file without 
-# this location is as follows: 
-#  
-# TAGFILES = file1 file2 ... 
-# Adding location for the tag files is done as follows: 
-#  
-# TAGFILES = file1=loc1 "file2 = loc2" ... 
-# where "loc1" and "loc2" can be relative or absolute paths or 
-# URLs. If a location is present for each tag, the installdox tool 
-# does not have to be run to correct the links. 
-# Note that each tag file must have a unique name 
-# (where the name does NOT include the path) 
-# If a tag file is not located in the directory in which doxygen 
-# is run, you must also specify the path to the tagfile here.
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
 
-TAGFILES               = 
+TAGFILES               =
 
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
-# a tag file that is based on the input files it reads.
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
 
-GENERATE_TAGFILE       = 
+GENERATE_TAGFILE       =
 
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
-# in the class index. If set to NO only the inherited external classes 
-# will be listed.
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
 
 ALLEXTERNALS           = NO
 
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
-# in the modules index. If set to NO, only the current project's groups will 
-# be listed.
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
 
 EXTERNAL_GROUPS        = YES
 
-# The PERL_PATH should be the absolute path and name of the perl script 
-# interpreter (i.e. the result of `which perl').
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
 
 PERL_PATH              = /usr/bin/perl
 
 #---------------------------------------------------------------------------
-# Configuration options related to the dot tool   
+# Configuration options related to the dot tool
 #---------------------------------------------------------------------------
 
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base 
-# or super classes. Setting the tag to NO turns the diagrams off. Note that 
-# this option is superseded by the HAVE_DOT option below. This is only a 
-# fallback. It is recommended to install and use dot, since it yields more 
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
 # powerful graphs.
+# The default value is: YES.
 
 CLASS_DIAGRAMS         = NO
 
-# You can define message sequence charts within doxygen comments using the \msc 
-# command. Doxygen will then run the mscgen tool (see 
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the 
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where 
-# the mscgen tool resides. If left empty the tool is assumed to be found in the 
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
 # default search path.
 
-MSCGEN_PATH            = 
+MSCGEN_PATH            =
 
-# If set to YES, the inheritance and collaboration graphs will hide 
-# inheritance and usage relations if the target is undocumented 
-# or is not a class.
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
 
 HIDE_UNDOC_RELATIONS   = NO
 
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
-# available from the path. This tool is part of Graphviz, a graph visualization 
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
-# have no effect if this option is set to NO (the default)
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
 
 HAVE_DOT               = %HAVE_DOT%
 
-# By default doxygen will write a font called FreeSans.ttf to the output 
-# directory and reference it in all dot files that doxygen generates. This 
-# font does not include all possible unicode characters however, so when you need 
-# these (or just want a differently looking font) you can specify the font name 
-# using DOT_FONTNAME. You need need to make sure dot is able to find the font, 
-# which can be done by putting it in a standard location or by setting the 
-# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory 
-# containing the font.
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTNAME           = FreeSans
 
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. 
-# The default size is 10pt.
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTSIZE           = 10
 
-# By default doxygen will tell dot to use the output directory to look for the 
-# FreeSans.ttf font (which doxygen will put there itself). If you specify a 
-# different font using DOT_FONTNAME you can set the path where dot 
-# can find it using this tag.
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
-DOT_FONTPATH           = 
+DOT_FONTPATH           =
 
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
-# will generate a graph for each documented class showing the direct and 
-# indirect inheritance relations. Setting this tag to YES will force the 
-# the CLASS_DIAGRAMS tag to NO.
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CLASS_GRAPH            = YES
 
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
-# will generate a graph for each documented class showing the direct and 
-# indirect implementation dependencies (inheritance, containment, and 
-# class references variables) of the class with other documented classes.
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 COLLABORATION_GRAPH    = YES
 
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen 
-# will generate a graph for groups, showing the direct groups dependencies
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GROUP_GRAPHS           = YES
 
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
-# collaboration diagrams in a style similar to the OMG's Unified Modeling 
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
 # Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 UML_LOOK               = %UML_LOOK%
 
-# If set to YES, the inheritance and collaboration graphs will show the 
-# relations between templates and their instances.
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 TEMPLATE_RELATIONS     = YES
 
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
-# tags are set to YES then doxygen will generate a graph for each documented 
-# file showing the direct and indirect include dependencies of the file with 
-# other documented files.
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INCLUDE_GRAPH          = YES
 
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
-# documented header file showing the documented files that directly or 
-# indirectly include this file.
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INCLUDED_BY_GRAPH      = YES
 
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then 
-# doxygen will generate a call dependency graph for every global function 
-# or class method. Note that enabling this option will significantly increase 
-# the time of a run. So in most cases it will be better to enable call graphs 
-# for selected functions only using the \callgraph command.
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CALL_GRAPH             = NO
 
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then 
-# doxygen will generate a caller dependency graph for every global function 
-# or class method. Note that enabling this option will significantly increase 
-# the time of a run. So in most cases it will be better to enable caller 
-# graphs for selected functions only using the \callergraph command.
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CALLER_GRAPH           = YES
 
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
-# will graphical hierarchy of all classes instead of a textual one.
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GRAPHICAL_HIERARCHY    = YES
 
-# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES 
-# then doxygen will show the dependencies a directory has on other directories 
-# in a graphical way. The dependency relations are determined by the #include 
-# relations between the files in the directories.
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DIRECTORY_GRAPH        = YES
 
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
-# generated by dot. Possible values are png, jpg, or gif 
-# If left blank png will be used.
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_IMAGE_FORMAT       = png
 
-# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
 # found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_PATH               = %DOT_PATH%
 
-# The DOTFILE_DIRS tag can be used to specify one or more directories that 
-# contain dot files that are included in the documentation (see the 
-# \dotfile command).
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
 
-DOTFILE_DIRS           = 
+DOTFILE_DIRS           =
 
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of 
-# nodes that will be shown in the graph. If the number of nodes in a graph 
-# becomes larger than this value, doxygen will truncate the graph, which is 
-# visualized by representing a node as a red box. Note that doxygen if the 
-# number of direct children of the root node in a graph is already larger than 
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note 
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_GRAPH_MAX_NODES    = 50
 
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
-# graphs generated by dot. A depth value of 3 means that only nodes reachable 
-# from the root by following a path via at most 3 edges will be shown. Nodes 
-# that lay further from the root node will be omitted. Note that setting this 
-# option to 1 or 2 may greatly reduce the computation time needed for large 
-# code bases. Also note that the size of a graph can be further restricted by 
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
 # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 MAX_DOT_GRAPH_DEPTH    = 1000
 
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent 
-# background. This is disabled by default, because dot on Windows does not 
-# seem to support this out of the box. Warning: Depending on the platform used, 
-# enabling this option may lead to badly anti-aliased labels on the edges of 
-# a graph (i.e. they become hard to read).
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_TRANSPARENT        = NO
 
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output 
-# files in one run (i.e. multiple -o and -T options on the command line). This 
-# makes dot run faster, but since only newer versions of dot (>1.8.10) 
-# support this, this feature is disabled by default.
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_MULTI_TARGETS      = YES
 
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
-# generate a legend page explaining the meaning of the various boxes and 
-# arrows in the dot generated graphs.
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GENERATE_LEGEND        = YES
 
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
-# remove the intermediate dot files that are used to generate 
-# the various graphs.
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_CLEANUP            = YES
-
-#---------------------------------------------------------------------------
-# Options related to the search engine
-#---------------------------------------------------------------------------
-
-# The SEARCHENGINE tag specifies whether or not a search engine should be 
-# used. If set to NO the values of all tags below this one will be ignored.
-
-SEARCHENGINE           = NO
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/footer.html b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/footer.html
index a61d952..a24bf2b 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/footer.html
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/footer.html
@@ -1,23 +1,21 @@
-<hr>
-<table width="100%">
-  <tr>
-    <td width="10%" align="left" valign="center">
-      <a href="http://sourceforge.net"> 
-      <img
-      src="http://sourceforge.net/sflogo.php?group_id=144446"
-      width="88" height="31" border="0" alt="SourceForge Logo"></a>
-    </td>
-    <td width="20%" align="left" valign="center">
-      hosts this site.
-    </td>
-    <td>
-    </td>
-    <td align="right" valign="center">
-      Send comments to:<br>
-      <a href="mailto:jsoncpp-devel@lists.sourceforge.net">Json-cpp Developers</a>
-    </td>
-  </tr>
-</table>
-
-</body> 
+<!-- HTML footer for doxygen 1.8.13-->
+<!-- start footer part -->
+<!--BEGIN GENERATE_TREEVIEW-->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    $navpath
+    <li class="footer">$generatedby
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="$relpath^doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
+  </ul>
+</div>
+<!--END GENERATE_TREEVIEW-->
+<!--BEGIN !GENERATE_TREEVIEW-->
+<hr class="footer"/><address class="footer"><small>
+$generatedby &#160;<a href="http://www.doxygen.org/index.html">
+<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/>
+</a> $doxygenversion
+</small></address>
+<!--END !GENERATE_TREEVIEW-->
+</body>
 </html>
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/header.html b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/header.html
index 1a6ad61..f0b93e0 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/header.html
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/header.html
@@ -1,24 +1,64 @@
-<html>
+<!-- HTML header for doxygen 1.8.13-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
 <head>
-<title>
-JsonCpp - JSON data format manipulation library
-</title>
-<link href="doxygen.css" rel="stylesheet" type="text/css">
-<link href="tabs.css" rel="stylesheet" type="text/css">
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen $doxygenversion"/>
+<meta name="viewport" content="width=device-width, initial-scale=1"/>
+<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="$relpath^jquery.js"></script>
+<script type="text/javascript" src="$relpath^dynsections.js"></script>
+$treeview
+$search
+$mathjax
+<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
+$extrastylesheet
 </head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
 
-<body bgcolor="#ffffff"> 
+<!--BEGIN TITLEAREA-->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <!--BEGIN PROJECT_LOGO-->
+  <td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
+  <!--END PROJECT_LOGO-->
+  <!--BEGIN DISABLE_INDEX-->
+   <!--BEGIN SEARCHENGINE-->
+   <td>$searchbox</td>
+   <!--END SEARCHENGINE-->
+  <!--END DISABLE_INDEX-->
+ </tr>
+ </tbody>
+</table>
+</div>
+<!--END TITLEAREA-->
+<body bgcolor="#ffffff">
 <table width="100%">
   <tr>
-    <td width="40%" align="left" valign="center">
-      <a href="http://sourceforge.net/projects/jsoncpp/">
+    <td width="30%" align="left" valign="center">
+      <a href="https://github.com/open-source-parsers/jsoncpp">
       JsonCpp project page
       </a>
     </td>
-    <td width="40%" align="right" valign="center">
-      <a href="http://jsoncpp.sourceforge.net">JsonCpp home page</a>
+    <td width="20%" align="center" valign="center">
+      <a href="hierarchy.html">
+        Classes
+      </a>
+    </td>
+    <td width="20%" align="center" valign="center">
+      <a href="namespace_json.html">
+        Namespace
+      </a>
+    </td>
+    <td width="30%" align="right" valign="center">
+      <a href="http://open-source-parsers.github.io/jsoncpp-docs/doxygen/">JsonCpp home page</a>
     </td>
   </tr>
 </table>
 
 <hr>
+<!-- end header part -->
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/jsoncpp.dox b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/jsoncpp.dox
index 97cc108..47efc8a 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/jsoncpp.dox
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/jsoncpp.dox
@@ -4,11 +4,21 @@
 
 <a HREF="http://www.json.org/">JSON (JavaScript Object Notation)</a>
  is a lightweight data-interchange format. 
-It can represent integer, real number, string, an ordered sequence of value, and
-a collection of name/value pairs.
 
 Here is an example of JSON data:
 \verbatim
+{
+    "encoding" : "UTF-8",
+    "plug-ins" : [
+        "python",
+        "c++",
+        "ruby"
+        ],
+    "indent" : { "length" : 3, "use_space": true }
+}
+\endverbatim
+<b>JsonCpp</b> supports comments as <i>meta-data</i>:
+\code
 // Configuration options
 {
     // Default encoding for text
@@ -17,21 +27,22 @@
     // Plug-ins loaded at start-up
     "plug-ins" : [
         "python",
-        "c++",
+        "c++",  // trailing comment
         "ruby"
         ],
         
     // Tab indent size
-    "indent" : { "length" : 3, "use_space": true }
+    // (multi-line comment)
+    "indent" : { /*embedded comment*/ "length" : 3, "use_space": true }
 }
-\endverbatim
+\endcode
 
 \section _features Features
 - read and write JSON document
-- attach C and C++ style comments to element during parsing
+- attach C++ style comments to element during parsing
 - rewrite JSON document preserving original comments
 
-Notes: Comments used to be supported in JSON but where removed for 
+Notes: Comments used to be supported in JSON but were removed for
 portability (C like comments are not supported in Python). Since
 comments are useful in configuration/input file, this feature was
 preserved.
@@ -39,88 +50,115 @@
 \section _example Code example
 
 \code
-Json::Value root;   // will contains the root value after parsing.
-Json::Reader reader;
-bool parsingSuccessful = reader.parse( config_doc, root );
-if ( !parsingSuccessful )
-{
-    // report to the user the failure and their locations in the document.
-    std::cout  << "Failed to parse configuration\n"
-               << reader.getFormattedErrorMessages();
-    return;
-}
+Json::Value root;   // 'root' will contain the root value after parsing.
+std::cin >> root;
 
-// Get the value of the member of root named 'encoding', return 'UTF-8' if there is no
-// such member.
-std::string encoding = root.get("encoding", "UTF-8" ).asString();
-// Get the value of the member of root named 'encoding', return a 'null' value if
-// there is no such member.
-const Json::Value plugins = root["plug-ins"];
-for ( int index = 0; index < plugins.size(); ++index )  // Iterates over the sequence elements.
-   loadPlugIn( plugins[index].asString() );
-   
-setIndentLength( root["indent"].get("length", 3).asInt() );
-setIndentUseSpace( root["indent"].get("use_space", true).asBool() );
-
-// ...
-// At application shutdown to make the new configuration document:
-// Since Json::Value has implicit constructor for all value types, it is not
-// necessary to explicitly construct the Json::Value object:
-root["encoding"] = getCurrentEncoding();
-root["indent"]["length"] = getCurrentIndentLength();
-root["indent"]["use_space"] = getCurrentIndentUseSpace();
-
-Json::StyledWriter writer;
-// Make a new JSON document for the configuration. Preserve original comments.
-std::string outputConfig = writer.write( root );
-
-// You can also use streams.  This will put the contents of any JSON
-// stream at a particular sub-value, if you'd like.
+// You can also read into a particular sub-value.
 std::cin >> root["subtree"];
 
-// And you can write to a stream, using the StyledWriter automatically.
+// Get the value of the member of root named 'encoding',
+// and return 'UTF-8' if there is no such member.
+std::string encoding = root.get("encoding", "UTF-8" ).asString();
+
+// Get the value of the member of root named 'plug-ins'; return a 'null' value if
+// there is no such member.
+const Json::Value plugins = root["plug-ins"];
+
+// Iterate over the sequence elements.
+for ( int index = 0; index < plugins.size(); ++index )
+   loadPlugIn( plugins[index].asString() );
+   
+// Try other datatypes. Some are auto-convertible to others.
+foo::setIndentLength( root["indent"].get("length", 3).asInt() );
+foo::setIndentUseSpace( root["indent"].get("use_space", true).asBool() );
+
+// Since Json::Value has an implicit constructor for all value types, it is not
+// necessary to explicitly construct the Json::Value object.
+root["encoding"] = foo::getCurrentEncoding();
+root["indent"]["length"] = foo::getCurrentIndentLength();
+root["indent"]["use_space"] = foo::getCurrentIndentUseSpace();
+
+// If you like the defaults, you can insert directly into a stream.
 std::cout << root;
+// Of course, you can write to `std::ostringstream` if you prefer.
+
+// If desired, remember to add a linefeed and flush.
+std::cout << std::endl;
+\endcode
+
+\section _advanced Advanced usage
+
+Configure *builders* to create *readers* and *writers*. For
+configuration, we use our own `Json::Value` (rather than
+standard setters/getters) so that we can add
+features without losing binary-compatibility.
+
+\code
+// For convenience, use `writeString()` with a specialized builder.
+Json::StreamWriterBuilder wbuilder;
+wbuilder["indentation"] = "\t";
+std::string document = Json::writeString(wbuilder, root);
+
+// Here, using a specialized Builder, we discard comments and
+// record errors as we parse.
+Json::CharReaderBuilder rbuilder;
+rbuilder["collectComments"] = false;
+std::string errs;
+bool ok = Json::parseFromStream(rbuilder, std::cin, &root, &errs);
+\endcode
+
+Yes, compile-time configuration-checking would be helpful,
+but `Json::Value` lets you
+write and read the builder configuration, which is better! In other words,
+you can configure your JSON parser using JSON.
+
+CharReaders and StreamWriters are not thread-safe, but they are re-usable.
+\code
+Json::CharReaderBuilder rbuilder;
+cfg >> rbuilder.settings_;
+std::unique_ptr<Json::CharReader> const reader(rbuilder.newCharReader());
+reader->parse(start, stop, &value1, &errs);
+// ...
+reader->parse(start, stop, &value2, &errs);
+// etc.
 \endcode
 
 \section _pbuild Build instructions
 The build instructions are located in the file 
-<a HREF="README.txt">README.txt</a> in the top-directory of the project.
+<a HREF="https://github.com/open-source-parsers/jsoncpp/blob/master/README.md">README.md</a> in the top-directory of the project.
 
-Permanent link to the latest revision of the file in subversion: 
-<a HREF="http://jsoncpp.svn.sourceforge.net/viewvc/jsoncpp/trunk/jsoncpp/README.txt?view=markup">latest README.txt</a>
-
-\section _pdownload Download
-The sources can be downloaded from 
-<a HREF="http://sourceforge.net/projects/jsoncpp/files/">SourceForge download page</a>.
-
-The latest version of the source is available in the project's subversion repository: 
-<a HREF="http://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/trunk/">
-http://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/trunk/</a>
-
-To checkout the source, see the following 
-<a HREF="http://sourceforge.net/scm/?type=svn&group_id=144446">instructions</a>.
+The latest version of the source is available in the project's GitHub repository:
+<a HREF="https://github.com/open-source-parsers/jsoncpp/">
+jsoncpp</a>
 
 \section _news What's New?
 The description of latest changes can be found in 
-<a HREF="NEWS.txt">NEWS.txt</a> in the top-directory of the project.
-
-Permanent link to the latest revision of the file in subversion: 
-<a HREF="http://svn.sourceforge.net/viewcvs.cgi/jsoncpp/README.txt?view=markup">latest NEWS.txt</a>
-
-\section _plinks Project links
-- <a HREF="http://jsoncpp.sourceforge.net">json-cpp home</a>
-- <a HREF="http://www.sourceforge.net/projects/jsoncpp/">json-cpp sourceforge project</a>
+<a HREF="https://github.com/open-source-parsers/jsoncpp/wiki/NEWS">
+  the NEWS wiki
+</a>.
 
 \section _rlinks Related links
 - <a HREF="http://www.json.org/">JSON</a> Specification and alternate language implementations.
 - <a HREF="http://www.yaml.org/">YAML</a> A data format designed for human readability.
 - <a HREF="http://www.cl.cam.ac.uk/~mgk25/unicode.html">UTF-8 and Unicode FAQ</a>.
 
+\section _plinks Old project links
+- <a href="https://sourceforge.net/projects/jsoncpp/">https://sourceforge.net/projects/jsoncpp/</a>
+- <a href="http://jsoncpp.sourceforge.net">http://jsoncpp.sourceforge.net</a>
+- <a href="http://sourceforge.net/projects/jsoncpp/files/">http://sourceforge.net/projects/jsoncpp/files/</a>
+- <a href="http://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/trunk/">http://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/trunk/</a>
+- <a href="http://jsoncpp.sourceforge.net/old.html">http://jsoncpp.sourceforge.net/old.html</a>
+
 \section _license License
-See file <a HREF="LICENSE">LICENSE</a> in the top-directory of the project.
+See file <a href="https://github.com/open-source-parsers/jsoncpp/blob/master/LICENSE"><code>LICENSE</code></a> in the top-directory of the project.
 
 Basically JsonCpp is licensed under MIT license, or public domain if desired 
 and recognized in your jurisdiction.
 
-\author Baptiste Lepilleur <blep@users.sourceforge.net>
+\author Baptiste Lepilleur <blep@users.sourceforge.net> (originator)
+\author Christopher Dunn <cdunn2001@gmail.com> (primary maintainer)
+\version \include version
+We make strong guarantees about binary-compatibility, consistent with
+<a href="http://apr.apache.org/versioning.html">the Apache versioning scheme</a>.
+\sa version.h
 */
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/roadmap.dox b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/roadmap.dox
index e6fc17a..12294bb 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/roadmap.dox
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/roadmap.dox
@@ -1,37 +1,3 @@
 /*! \page roadmap JsonCpp roadmap
-  \section ms_release Makes JsonCpp ready for release
-  - Build system clean-up:
-	- Fix build on Windows (shared-library build is broken)
-	- Add enable/disable flag for static and shared library build
-	- Enhance help
-  - Platform portability check: (Notes: was ok on last check)
-	- linux/gcc, 
-	- solaris/cc, 
-	- windows/msvc678, 
-	- aix/vacpp
-  - Add JsonCpp version to header as numeric for use in preprocessor test
-  - Remove buggy experimental hash stuff
-  \section ms_strict Adds a strict mode to reader/parser
-	Strict JSON support as specific in RFC 4627 (http://www.ietf.org/rfc/rfc4627.txt?number=4627).
-	- Enforce only object or array as root element
-	- Disable comment support
-    - Get jsonchecker failing tests to pass in strict mode
-  \section ms_writer Writter control
-    Provides more control to determine how specific items are serialized when JSON allow choice:
-	- Optionally allow escaping of non-ASCII characters using unicode escape sequence "\\u".
-	- Optionally allow escaping of "/" using "\/".
-  \section ms_separation Expose json reader/writer API that do not impose using Json::Value.
-	Some typical use-case involve an application specific structure to/from a JSON document.
-    - Event base parser to allow unserializing a Json document directly in datastructure instead of
-      using the intermediate Json::Value.
-    - Stream based parser to serialized a Json document without using Json::Value as input.
-	- Performance oriented parser/writer:
-		- Provides an event based parser. Should allow pulling & skipping events for ease of use.
-		- Provides a JSON document builder: fast only.
-  \section ms_perfo Performance tuning
-    - Provides support for static property name definition avoiding allocation 
-    - Static property dictionnary can be provided to JSON reader
-    - Performance scenario & benchmarking
-  \section testing Testing
-    - Adds more tests for unicode parsing (e.g. including surrogate and error detection).
+  Moved to: https://github.com/open-source-parsers/jsoncpp/wiki/Roadmap
 */
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/web_doxyfile.in b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/web_doxyfile.in
new file mode 100644
index 0000000..df09dcc
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doc/web_doxyfile.in
@@ -0,0 +1,2290 @@
+# Doxyfile 1.8.5
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = "JsonCpp"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         = %JSONCPP_VERSION%
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF           = "JSON data format manipulation library"
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = %DOC_TOPDIR%
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-
+# Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi,
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en,
+# Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish,
+# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish,
+# Turkish, Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        = %TOPDIR%
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    = %TOPDIR%/include
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 3
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                = "testCaseSetup=\link CppUT::TestCase::setUp() setUp()\endlink" \
+                         "testCaseRun=\link CppUT::TestCase::run() run()\endlink" \
+                         "testCaseTearDown=\link CppUT::TestCase::tearDown() tearDown()\endlink" \
+                         "json_ref=<a HREF='http://www.json.org/'>JSON (JavaScript Object Notation)</a>"
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = YES
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = NO
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = YES
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = YES
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = NO
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = NO
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           = %WARNING_LOG_PATH%
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = ../include \
+                         ../src/lib_json \
+                         .
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS          = *.h \
+                         *.cpp \
+                         *.inl \
+                         *.dox
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           = ..
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+TOC_INCLUDE_HEADINGS = 2
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = %HTML_OUTPUT%
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            = header.html
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            = footer.html
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = NO
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = YES
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = %HTML_HELP%
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               = jsoncpp-%JSONCPP_VERSION%.chm
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           = "c:\Program Files\HTML Help Workshop\hhc.exe"
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = YES
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = YES
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = YES
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using JavaScript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = NO
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           = ../include
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  = *.h
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             = "_MSC_VER=1800" \
+                         _CPPRTTI \
+                         _WIN32 \
+                         JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = NO
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = NO
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = %HAVE_DOT%
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = %UML_LOOK%
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = YES
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = YES
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               = %DOT_PATH%
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 1000
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP            = YES
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doxybuild.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doxybuild.py
index 03ad68d..862c1f4 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doxybuild.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/doxybuild.py
@@ -1,12 +1,27 @@
 """Script to generate doxygen documentation.
 """
-
+from __future__ import print_function
+from __future__ import unicode_literals
+from devtools import tarball
+from contextlib import contextmanager
+import subprocess
+import traceback
 import re
 import os
-import os.path
 import sys
 import shutil
-from devtools import tarball
+
+@contextmanager
+def cd(newdir):
+    """
+    http://stackoverflow.com/questions/431684/how-do-i-cd-in-python
+    """
+    prevdir = os.getcwd()
+    os.chdir(newdir)
+    try:
+        yield
+    finally:
+        os.chdir(prevdir)
 
 def find_program(*filenames):
     """find a program in folders path_lst, and sets env[var]
@@ -14,9 +29,9 @@
     @return: the full path of the filename if found, or '' if filename could not be found
 """
     paths = os.environ.get('PATH', '').split(os.pathsep)
-    suffixes = ('win32' in sys.platform ) and '.exe .com .bat .cmd' or ''
+    suffixes = ('win32' in sys.platform) and '.exe .com .bat .cmd' or ''
     for filename in filenames:
-        for name in [filename+ext for ext in suffixes.split()]:
+        for name in [filename+ext for ext in suffixes.split(' ')]:
             for directory in paths:
                 full_path = os.path.join(directory, name)
                 if os.path.isfile(full_path):
@@ -28,53 +43,56 @@
     For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
     then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
     """
-    try:
-        f = open(sourcefile, 'rb')
+    with open(sourcefile, 'r') as f:
         contents = f.read()
-        f.close()
-    except:
-        print "Can't read source file %s"%sourcefile
-        raise
-    for (k,v) in dict.items():
+    for (k,v) in list(dict.items()):
         v = v.replace('\\','\\\\') 
         contents = re.sub(k, v, contents)
-    try:
-        f = open(targetfile, 'wb')
+    with open(targetfile, 'w') as f:
         f.write(contents)
-        f.close()
+
+def getstatusoutput(cmd):
+    """cmd is a list.
+    """
+    try:
+        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        output, _ = process.communicate()
+        status = process.returncode
     except:
-        print "Can't write target file %s"%targetfile
-        raise
+        status = -1
+        output = traceback.format_exc()
+    return status, output
+
+def run_cmd(cmd, silent=False):
+    """Raise exception on failure.
+    """
+    info = 'Running: %r in %r' %(' '.join(cmd), os.getcwd())
+    print(info)
+    sys.stdout.flush()
+    if silent:
+        status, output = getstatusoutput(cmd)
+    else:
+        status, output = subprocess.call(cmd), ''
+    if status:
+        msg = 'Error while %s ...\n\terror=%d, output="""%s"""' %(info, status, output)
+        raise Exception(msg)
+
+def assert_is_exe(path):
+    if not path:
+        raise Exception('path is empty.')
+    if not os.path.isfile(path):
+        raise Exception('%r is not a file.' %path)
+    if not os.access(path, os.X_OK):
+        raise Exception('%r is not executable by this user.' %path)
 
 def run_doxygen(doxygen_path, config_file, working_dir, is_silent):
-    config_file = os.path.abspath( config_file )
-    doxygen_path = doxygen_path
-    old_cwd = os.getcwd()
-    try:
-        os.chdir( working_dir )
+    assert_is_exe(doxygen_path)
+    config_file = os.path.abspath(config_file)
+    with cd(working_dir):
         cmd = [doxygen_path, config_file]
-        print 'Running:', ' '.join( cmd )
-        try:
-            import subprocess
-        except:
-            if os.system( ' '.join( cmd ) ) != 0:
-                print 'Documentation generation failed'
-                return False
-        else:
-            if is_silent:
-                process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
-            else:
-                process = subprocess.Popen( cmd )
-            stdout, _ = process.communicate()
-            if process.returncode:
-                print 'Documentation generation failed:'
-                print stdout
-                return False
-        return True
-    finally:
-        os.chdir( old_cwd )
+        run_cmd(cmd, is_silent)
 
-def build_doc( options,  make_release=False ):
+def build_doc(options,  make_release=False):
     if make_release:
         options.make_tarball = True
         options.with_dot = True
@@ -83,62 +101,62 @@
         options.open = False
         options.silent = True
 
-    version = open('version','rt').read().strip()
+    version = open('version', 'rt').read().strip()
     output_dir = 'dist/doxygen' # relative to doc/doxyfile location.
-    if not os.path.isdir( output_dir ):
-        os.makedirs( output_dir )
-    top_dir = os.path.abspath( '.' )
+    if not os.path.isdir(output_dir):
+        os.makedirs(output_dir)
+    top_dir = os.path.abspath('.')
     html_output_dirname = 'jsoncpp-api-html-' + version
-    tarball_path = os.path.join( 'dist', html_output_dirname + '.tar.gz' )
-    warning_log_path = os.path.join( output_dir, '../jsoncpp-doxygen-warning.log' )
-    html_output_path = os.path.join( output_dir, html_output_dirname )
-    def yesno( bool ):
+    tarball_path = os.path.join('dist', html_output_dirname + '.tar.gz')
+    warning_log_path = os.path.join(output_dir, '../jsoncpp-doxygen-warning.log')
+    html_output_path = os.path.join(output_dir, html_output_dirname)
+    def yesno(bool):
         return bool and 'YES' or 'NO'
     subst_keys = {
         '%JSONCPP_VERSION%': version,
         '%DOC_TOPDIR%': '',
         '%TOPDIR%': top_dir,
-        '%HTML_OUTPUT%': os.path.join( '..', output_dir, html_output_dirname ),
+        '%HTML_OUTPUT%': os.path.join('..', output_dir, html_output_dirname),
         '%HAVE_DOT%': yesno(options.with_dot),
         '%DOT_PATH%': os.path.split(options.dot_path)[0],
         '%HTML_HELP%': yesno(options.with_html_help),
         '%UML_LOOK%': yesno(options.with_uml_look),
-        '%WARNING_LOG_PATH%': os.path.join( '..', warning_log_path )
+        '%WARNING_LOG_PATH%': os.path.join('..', warning_log_path)
         }
 
-    if os.path.isdir( output_dir ):
-        print 'Deleting directory:', output_dir
-        shutil.rmtree( output_dir )
-    if not os.path.isdir( output_dir ):
-        os.makedirs( output_dir )
+    if os.path.isdir(output_dir):
+        print('Deleting directory:', output_dir)
+        shutil.rmtree(output_dir)
+    if not os.path.isdir(output_dir):
+        os.makedirs(output_dir)
 
-    do_subst_in_file( 'doc/doxyfile', 'doc/doxyfile.in', subst_keys )
-    ok = run_doxygen( options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent )
+    do_subst_in_file('doc/doxyfile', options.doxyfile_input_path, subst_keys)
+    run_doxygen(options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent)
     if not options.silent:
-        print open(warning_log_path, 'rb').read()
-    index_path = os.path.abspath(os.path.join(subst_keys['%HTML_OUTPUT%'], 'index.html'))
-    print 'Generated documentation can be found in:'
-    print index_path
+        print(open(warning_log_path, 'r').read())
+    index_path = os.path.abspath(os.path.join('doc', subst_keys['%HTML_OUTPUT%'], 'index.html'))
+    print('Generated documentation can be found in:')
+    print(index_path)
     if options.open:
         import webbrowser
-        webbrowser.open( 'file://' + index_path )
+        webbrowser.open('file://' + index_path)
     if options.make_tarball:
-        print 'Generating doc tarball to', tarball_path
+        print('Generating doc tarball to', tarball_path)
         tarball_sources = [
             output_dir,
-            'README.txt',
+            'README.md',
             'LICENSE',
             'NEWS.txt',
             'version'
             ]
-        tarball_basedir = os.path.join( output_dir, html_output_dirname )
-        tarball.make_tarball( tarball_path, tarball_sources, tarball_basedir, html_output_dirname )
+        tarball_basedir = os.path.join(output_dir, html_output_dirname)
+        tarball.make_tarball(tarball_path, tarball_sources, tarball_basedir, html_output_dirname)
     return tarball_path, html_output_dirname
 
 def main():
     usage = """%prog
     Generates doxygen documentation in build/doxygen.
-    Optionaly makes a tarball of the documentation to dist/.
+    Optionally makes a tarball of the documentation to dist/.
 
     Must be started in the project top directory.    
     """
@@ -151,6 +169,8 @@
         help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
     parser.add_option('--doxygen', dest="doxygen_path", action='store', default=find_program('doxygen'),
         help="""Path to Doxygen tool. [Default: %default]""")
+    parser.add_option('--in', dest="doxyfile_input_path", action='store', default='doc/doxyfile.in',
+        help="""Path to doxygen inputs. [Default: %default]""")
     parser.add_option('--with-html-help', dest="with_html_help", action='store_true', default=False,
         help="""Enable generation of Microsoft HTML HELP""")
     parser.add_option('--no-uml-look', dest="with_uml_look", action='store_false', default=True,
@@ -163,7 +183,7 @@
         help="""Hides doxygen output""")
     parser.enable_interspersed_args()
     options, args = parser.parse_args()
-    build_doc( options )
+    build_doc(options)
 
 if __name__ == '__main__':
     main()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/CMakeLists.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/CMakeLists.txt
new file mode 100644
index 0000000..facfab1
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/CMakeLists.txt
@@ -0,0 +1,6 @@
+file(GLOB INCLUDE_FILES "json/*.h")
+install(FILES
+    ${INCLUDE_FILES}
+    ${PROJECT_BINARY_DIR}/include/json/version.h
+    DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/json)
+
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/allocator.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/allocator.h
new file mode 100644
index 0000000..d5f987e
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/allocator.h
@@ -0,0 +1,89 @@
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef CPPTL_JSON_ALLOCATOR_H_INCLUDED
+#define CPPTL_JSON_ALLOCATOR_H_INCLUDED
+
+#include <cstring>
+#include <memory>
+
+#pragma pack(push, 8)
+
+namespace Json {
+template <typename T> class SecureAllocator {
+public:
+  // Type definitions
+  using value_type = T;
+  using pointer = T*;
+  using const_pointer = const T*;
+  using reference = T&;
+  using const_reference = const T&;
+  using size_type = std::size_t;
+  using difference_type = std::ptrdiff_t;
+
+  /**
+   * Allocate memory for N items using the standard allocator.
+   */
+  pointer allocate(size_type n) {
+    // allocate using "global operator new"
+    return static_cast<pointer>(::operator new(n * sizeof(T)));
+  }
+
+  /**
+   * Release memory which was allocated for N items at pointer P.
+   *
+   * The memory block is filled with zeroes before being released.
+   * The pointer argument is tagged as "volatile" to prevent the
+   * compiler optimizing out this critical step.
+   */
+  void deallocate(volatile pointer p, size_type n) {
+    std::memset(p, 0, n * sizeof(T));
+    // free using "global operator delete"
+    ::operator delete(p);
+  }
+
+  /**
+   * Construct an item in-place at pointer P.
+   */
+  template <typename... Args> void construct(pointer p, Args&&... args) {
+    // construct using "placement new" and "perfect forwarding"
+    ::new (static_cast<void*>(p)) T(std::forward<Args>(args)...);
+  }
+
+  size_type max_size() const { return size_t(-1) / sizeof(T); }
+
+  pointer address(reference x) const { return std::addressof(x); }
+
+  const_pointer address(const_reference x) const { return std::addressof(x); }
+
+  /**
+   * Destroy an item in-place at pointer P.
+   */
+  void destroy(pointer p) {
+    // destroy using "explicit destructor"
+    p->~T();
+  }
+
+  // Boilerplate
+  SecureAllocator() {}
+  template <typename U> SecureAllocator(const SecureAllocator<U>&) {}
+  template <typename U> struct rebind { using other = SecureAllocator<U>; };
+};
+
+template <typename T, typename U>
+bool operator==(const SecureAllocator<T>&, const SecureAllocator<U>&) {
+  return true;
+}
+
+template <typename T, typename U>
+bool operator!=(const SecureAllocator<T>&, const SecureAllocator<U>&) {
+  return false;
+}
+
+} // namespace Json
+
+#pragma pack(pop)
+
+#endif // CPPTL_JSON_ALLOCATOR_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/assertions.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/assertions.h
index a480585..20716b0 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/assertions.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/assertions.h
@@ -1,31 +1,59 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef CPPTL_JSON_ASSERTIONS_H_INCLUDED
-# define CPPTL_JSON_ASSERTIONS_H_INCLUDED
+#define CPPTL_JSON_ASSERTIONS_H_INCLUDED
 
-#include <stdlib.h>
+#include <cstdlib>
+#include <sstream>
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include <json/config.h>
+#include "config.h"
 #endif // if !defined(JSON_IS_AMALGAMATION)
 
+/** It should not be possible for a maliciously designed file to
+ *  cause an abort() or seg-fault, so these macros are used only
+ *  for pre-condition violations and internal logic errors.
+ */
 #if JSON_USE_EXCEPTION
-#define JSON_ASSERT( condition ) assert( condition );  // @todo <= change this into an exception throw
-#define JSON_FAIL_MESSAGE( message ) throw std::runtime_error( message );
-#else  // JSON_USE_EXCEPTION
-#define JSON_ASSERT( condition ) assert( condition );
+
+// @todo <= add detail about condition in exception
+#define JSON_ASSERT(condition)                                                 \
+  {                                                                            \
+    if (!(condition)) {                                                        \
+      Json::throwLogicError("assert json failed");                             \
+    }                                                                          \
+  }
+
+#define JSON_FAIL_MESSAGE(message)                                             \
+  {                                                                            \
+    OStringStream oss;                                                         \
+    oss << message;                                                            \
+    Json::throwLogicError(oss.str());                                          \
+    abort();                                                                   \
+  }
+
+#else // JSON_USE_EXCEPTION
+
+#define JSON_ASSERT(condition) assert(condition)
 
 // The call to assert() will show the failure message in debug builds. In
-// release bugs we write to invalid memory in order to crash hard, so that a
-// debugger or crash reporter gets the chance to take over. We still call exit()
-// afterward in order to tell the compiler that this macro doesn't return.
-#define JSON_FAIL_MESSAGE( message ) { assert(false && message); strcpy(reinterpret_cast<char*>(666), message); exit(123); }
+// release builds we abort, for a core-dump or debugger.
+#define JSON_FAIL_MESSAGE(message)                                             \
+  {                                                                            \
+    OStringStream oss;                                                         \
+    oss << message;                                                            \
+    assert(false && oss.str().c_str());                                        \
+    abort();                                                                   \
+  }
 
 #endif
 
-#define JSON_ASSERT_MESSAGE( condition, message ) if (!( condition )) { JSON_FAIL_MESSAGE( message ) }
+#define JSON_ASSERT_MESSAGE(condition, message)                                \
+  if (!(condition)) {                                                          \
+    JSON_FAIL_MESSAGE(message);                                                \
+  }
 
 #endif // CPPTL_JSON_ASSERTIONS_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/autolink.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/autolink.h
index 02328d1..b2c0f00 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/autolink.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/autolink.h
@@ -1,24 +1,25 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef JSON_AUTOLINK_H_INCLUDED
-# define JSON_AUTOLINK_H_INCLUDED
+#define JSON_AUTOLINK_H_INCLUDED
 
-# include "config.h"
+#include "config.h"
 
-# ifdef JSON_IN_CPPTL
-#  include <cpptl/cpptl_autolink.h>
-# endif
+#ifdef JSON_IN_CPPTL
+#include <cpptl/cpptl_autolink.h>
+#endif
 
-# if !defined(JSON_NO_AUTOLINK)  &&  !defined(JSON_DLL_BUILD)  &&  !defined(JSON_IN_CPPTL)
-#  define CPPTL_AUTOLINK_NAME "json"
-#  undef CPPTL_AUTOLINK_DLL
-#  ifdef JSON_DLL
-#   define CPPTL_AUTOLINK_DLL
-#  endif
-#  include "autolink.h"
-# endif
+#if !defined(JSON_NO_AUTOLINK) && !defined(JSON_DLL_BUILD) &&                  \
+    !defined(JSON_IN_CPPTL)
+#define CPPTL_AUTOLINK_NAME "json"
+#undef CPPTL_AUTOLINK_DLL
+#ifdef JSON_DLL
+#define CPPTL_AUTOLINK_DLL
+#endif
+#include "autolink.h"
+#endif
 
 #endif // JSON_AUTOLINK_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/config.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/config.h
index 72437c4..8724ad9 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/config.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/config.h
@@ -1,98 +1,180 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef JSON_CONFIG_H_INCLUDED
-# define JSON_CONFIG_H_INCLUDED
+#define JSON_CONFIG_H_INCLUDED
+#include <cstddef>
+#include <cstdint>
+#include <istream>
+#include <memory>
+#include <ostream>
+#include <sstream>
+#include <string>
+#include <type_traits>
 
 /// If defined, indicates that json library is embedded in CppTL library.
 //# define JSON_IN_CPPTL 1
 
 /// If defined, indicates that json may leverage CppTL library
 //#  define JSON_USE_CPPTL 1
-/// If defined, indicates that cpptl vector based map should be used instead of std::map
+/// If defined, indicates that cpptl vector based map should be used instead of
+/// std::map
 /// as Value container.
 //#  define JSON_USE_CPPTL_SMALLMAP 1
-/// If defined, indicates that Json specific container should be used
-/// (hash table & simple deque container with customizable allocator).
-/// THIS FEATURE IS STILL EXPERIMENTAL! There is know bugs: See #3177332
-//#  define JSON_VALUE_USE_INTERNAL_MAP 1
-/// Force usage of standard new/malloc based allocator instead of memory pool based allocator.
-/// The memory pools allocator used optimization (initializing Value and ValueInternalLink
-/// as if it was a POD) that may cause some validation tool to report errors.
-/// Only has effects if JSON_VALUE_USE_INTERNAL_MAP is defined.
-//#  define JSON_USE_SIMPLE_INTERNAL_ALLOCATOR 1
 
 // If non-zero, the library uses exceptions to report bad input instead of C
 // assertion macros. The default is to use exceptions.
-# ifndef JSON_USE_EXCEPTION
-# define JSON_USE_EXCEPTION 1
-# endif
+#ifndef JSON_USE_EXCEPTION
+#define JSON_USE_EXCEPTION 1
+#endif
 
-/// If defined, indicates that the source file is amalgated
+// Temporary, tracked for removal with issue #982.
+#ifndef JSON_USE_NULLREF
+#define JSON_USE_NULLREF 1
+#endif
+
+/// If defined, indicates that the source file is amalgamated
 /// to prevent private header inclusion.
-/// Remarks: it is automatically defined in the generated amalgated header.
+/// Remarks: it is automatically defined in the generated amalgamated header.
 // #define JSON_IS_AMALGAMATION
 
+#ifdef JSON_IN_CPPTL
+#include <cpptl/config.h>
+#ifndef JSON_USE_CPPTL
+#define JSON_USE_CPPTL 1
+#endif
+#endif
 
-# ifdef JSON_IN_CPPTL
-#  include <cpptl/config.h>
-#  ifndef JSON_USE_CPPTL
-#   define JSON_USE_CPPTL 1
-#  endif
-# endif
+#ifdef JSON_IN_CPPTL
+#define JSON_API CPPTL_API
+#elif defined(JSON_DLL_BUILD)
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define JSON_API __declspec(dllexport)
+#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
+#elif defined(__GNUC__) || defined(__clang__)
+#define JSON_API __attribute__((visibility("default")))
+#endif // if defined(_MSC_VER)
+#elif defined(JSON_DLL)
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#define JSON_API __declspec(dllimport)
+#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
+#endif // if defined(_MSC_VER)
+#endif // ifdef JSON_IN_CPPTL
+#if !defined(JSON_API)
+#define JSON_API
+#endif
 
-# ifdef JSON_IN_CPPTL
-#  define JSON_API CPPTL_API
-# elif defined(JSON_DLL_BUILD)
-#  define JSON_API __declspec(dllexport)
-# elif defined(JSON_DLL)
-#  define JSON_API __declspec(dllimport)
-# else
-#  define JSON_API
-# endif
+#if defined(_MSC_VER) && _MSC_VER < 1800
+#error                                                                         \
+    "ERROR:  Visual Studio 12 (2013) with _MSC_VER=1800 is the oldest supported compiler with sufficient C++11 capabilities"
+#endif
 
-// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for integer
+#if defined(_MSC_VER) && _MSC_VER < 1900
+// As recommended at
+// https://stackoverflow.com/questions/2915672/snprintf-and-visual-studio-2010
+extern JSON_API int
+msvc_pre1900_c99_snprintf(char* outBuf, size_t size, const char* format, ...);
+#define jsoncpp_snprintf msvc_pre1900_c99_snprintf
+#else
+#define jsoncpp_snprintf std::snprintf
+#endif
+
+// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for
+// integer
 // Storages, and 64 bits integer support is disabled.
 // #define JSON_NO_INT64 1
 
-#if defined(_MSC_VER)  &&  _MSC_VER <= 1200 // MSVC 6
-// Microsoft Visual Studio 6 only support conversion from __int64 to double
-// (no conversion from unsigned __int64).
-#define JSON_USE_INT64_DOUBLE_CONVERSION 1
-#endif // if defined(_MSC_VER)  &&  _MSC_VER < 1200 // MSVC 6
+// JSONCPP_OVERRIDE is maintained for backwards compatibility of external tools.
+// C++11 should be used directly in JSONCPP.
+#define JSONCPP_OVERRIDE override
 
-#if defined(_MSC_VER)  &&  _MSC_VER >= 1500 // MSVC 2008
-/// Indicates that the following function is deprecated.
-# define JSONCPP_DEPRECATED(message) __declspec(deprecated(message))
+#if __cplusplus >= 201103L
+#define JSONCPP_NOEXCEPT noexcept
+#define JSONCPP_OP_EXPLICIT explicit
+#elif defined(_MSC_VER) && _MSC_VER < 1900
+#define JSONCPP_NOEXCEPT throw()
+#define JSONCPP_OP_EXPLICIT explicit
+#elif defined(_MSC_VER) && _MSC_VER >= 1900
+#define JSONCPP_NOEXCEPT noexcept
+#define JSONCPP_OP_EXPLICIT explicit
+#else
+#define JSONCPP_NOEXCEPT throw()
+#define JSONCPP_OP_EXPLICIT
 #endif
 
+#ifdef __clang__
+#if __has_extension(attribute_deprecated_with_message)
+#define JSONCPP_DEPRECATED(message) __attribute__((deprecated(message)))
+#endif
+#elif defined __GNUC__ // not clang (gcc comes later since clang emulates gcc)
+#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+#define JSONCPP_DEPRECATED(message) __attribute__((deprecated(message)))
+#elif (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
+#define JSONCPP_DEPRECATED(message) __attribute__((__deprecated__))
+#endif                  // GNUC version
+#elif defined(_MSC_VER) // MSVC (after clang because clang on Windows emulates
+                        // MSVC)
+#define JSONCPP_DEPRECATED(message) __declspec(deprecated(message))
+#endif // __clang__ || __GNUC__ || _MSC_VER
+
 #if !defined(JSONCPP_DEPRECATED)
-# define JSONCPP_DEPRECATED(message)
+#define JSONCPP_DEPRECATED(message)
 #endif // if !defined(JSONCPP_DEPRECATED)
 
-namespace Json {
-   typedef int Int;
-   typedef unsigned int UInt;
-# if defined(JSON_NO_INT64)
-   typedef int LargestInt;
-   typedef unsigned int LargestUInt;
-#  undef JSON_HAS_INT64
-# else // if defined(JSON_NO_INT64)
-   // For Microsoft Visual use specific types as long long is not supported
-#  if defined(_MSC_VER) // Microsoft Visual Studio
-   typedef __int64 Int64;
-   typedef unsigned __int64 UInt64;
-#  else // if defined(_MSC_VER) // Other platforms, use long long
-   typedef long long int Int64;
-   typedef unsigned long long int UInt64;
-#  endif // if defined(_MSC_VER)
-   typedef Int64 LargestInt;
-   typedef UInt64 LargestUInt;
-#  define JSON_HAS_INT64
-# endif // if defined(JSON_NO_INT64)
-} // end namespace Json
+#if __GNUC__ >= 6
+#define JSON_USE_INT64_DOUBLE_CONVERSION 1
+#endif
 
+#if !defined(JSON_IS_AMALGAMATION)
+
+#include "allocator.h"
+#include "version.h"
+
+#endif // if !defined(JSON_IS_AMALGAMATION)
+
+namespace Json {
+typedef int Int;
+typedef unsigned int UInt;
+#if defined(JSON_NO_INT64)
+typedef int LargestInt;
+typedef unsigned int LargestUInt;
+#undef JSON_HAS_INT64
+#else                 // if defined(JSON_NO_INT64)
+// For Microsoft Visual use specific types as long long is not supported
+#if defined(_MSC_VER) // Microsoft Visual Studio
+typedef __int64 Int64;
+typedef unsigned __int64 UInt64;
+#else                 // if defined(_MSC_VER) // Other platforms, use long long
+typedef int64_t Int64;
+typedef uint64_t UInt64;
+#endif                // if defined(_MSC_VER)
+typedef Int64 LargestInt;
+typedef UInt64 LargestUInt;
+#define JSON_HAS_INT64
+#endif // if defined(JSON_NO_INT64)
+
+template <typename T>
+using Allocator = typename std::conditional<JSONCPP_USING_SECURE_MEMORY,
+                                            SecureAllocator<T>,
+                                            std::allocator<T>>::type;
+using String = std::basic_string<char, std::char_traits<char>, Allocator<char>>;
+using IStringStream = std::basic_istringstream<String::value_type,
+                                               String::traits_type,
+                                               String::allocator_type>;
+using OStringStream = std::basic_ostringstream<String::value_type,
+                                               String::traits_type,
+                                               String::allocator_type>;
+using IStream = std::istream;
+using OStream = std::ostream;
+} // namespace Json
+
+// Legacy names (formerly macros).
+using JSONCPP_STRING = Json::String;
+using JSONCPP_ISTRINGSTREAM = Json::IStringStream;
+using JSONCPP_OSTRINGSTREAM = Json::OStringStream;
+using JSONCPP_ISTREAM = Json::IStream;
+using JSONCPP_OSTREAM = Json::OStream;
 
 #endif // JSON_CONFIG_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/features.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/features.h
index 4353278..ba25e8d 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/features.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/features.h
@@ -1,49 +1,61 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef CPPTL_JSON_FEATURES_H_INCLUDED
-# define CPPTL_JSON_FEATURES_H_INCLUDED
+#define CPPTL_JSON_FEATURES_H_INCLUDED
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include "forwards.h"
+#include "forwards.h"
 #endif // if !defined(JSON_IS_AMALGAMATION)
 
+#pragma pack(push, 8)
+
 namespace Json {
 
-   /** \brief Configuration passed to reader and writer.
-    * This configuration object can be used to force the Reader or Writer
-    * to behave in a standard conforming way.
-    */
-   class JSON_API Features
-   {
-   public:
-      /** \brief A configuration that allows all features and assumes all strings are UTF-8.
-       * - C & C++ comments are allowed
-       * - Root object can be any JSON value
-       * - Assumes Value strings are encoded in UTF-8
-       */
-      static Features all();
+/** \brief Configuration passed to reader and writer.
+ * This configuration object can be used to force the Reader or Writer
+ * to behave in a standard conforming way.
+ */
+class JSON_API Features {
+public:
+  /** \brief A configuration that allows all features and assumes all strings
+   * are UTF-8.
+   * - C & C++ comments are allowed
+   * - Root object can be any JSON value
+   * - Assumes Value strings are encoded in UTF-8
+   */
+  static Features all();
 
-      /** \brief A configuration that is strictly compatible with the JSON specification.
-       * - Comments are forbidden.
-       * - Root object must be either an array or an object value.
-       * - Assumes Value strings are encoded in UTF-8
-       */
-      static Features strictMode();
+  /** \brief A configuration that is strictly compatible with the JSON
+   * specification.
+   * - Comments are forbidden.
+   * - Root object must be either an array or an object value.
+   * - Assumes Value strings are encoded in UTF-8
+   */
+  static Features strictMode();
 
-      /** \brief Initialize the configuration like JsonConfig::allFeatures;
-       */
-      Features();
+  /** \brief Initialize the configuration like JsonConfig::allFeatures;
+   */
+  Features();
 
-      /// \c true if comments are allowed. Default: \c true.
-      bool allowComments_;
+  /// \c true if comments are allowed. Default: \c true.
+  bool allowComments_{true};
 
-      /// \c true if root must be either an array or an object value. Default: \c false.
-      bool strictRoot_;
-   };
+  /// \c true if root must be either an array or an object value. Default: \c
+  /// false.
+  bool strictRoot_{false};
+
+  /// \c true if dropped null placeholders are allowed. Default: \c false.
+  bool allowDroppedNullPlaceholders_{false};
+
+  /// \c true if numeric object key are allowed. Default: \c false.
+  bool allowNumericKeys_{false};
+};
 
 } // namespace Json
 
+#pragma pack(pop)
+
 #endif // CPPTL_JSON_FEATURES_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/forwards.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/forwards.h
index ab863da..958b5bc 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/forwards.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/forwards.h
@@ -1,44 +1,43 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef JSON_FORWARDS_H_INCLUDED
-# define JSON_FORWARDS_H_INCLUDED
+#define JSON_FORWARDS_H_INCLUDED
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include "config.h"
+#include "config.h"
 #endif // if !defined(JSON_IS_AMALGAMATION)
 
 namespace Json {
 
-   // writer.h
-   class FastWriter;
-   class StyledWriter;
+// writer.h
+class StreamWriter;
+class StreamWriterBuilder;
+class Writer;
+class FastWriter;
+class StyledWriter;
+class StyledStreamWriter;
 
-   // reader.h
-   class Reader;
+// reader.h
+class Reader;
+class CharReader;
+class CharReaderBuilder;
 
-   // features.h
-   class Features;
+// features.h
+class Features;
 
-   // value.h
-   typedef unsigned int ArrayIndex;
-   class StaticString;
-   class Path;
-   class PathArgument;
-   class Value;
-   class ValueIteratorBase;
-   class ValueIterator;
-   class ValueConstIterator;
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   class ValueMapAllocator;
-   class ValueInternalLink;
-   class ValueInternalArray;
-   class ValueInternalMap;
-#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP
+// value.h
+typedef unsigned int ArrayIndex;
+class StaticString;
+class Path;
+class PathArgument;
+class Value;
+class ValueIteratorBase;
+class ValueIterator;
+class ValueConstIterator;
 
 } // namespace Json
 
-
 #endif // JSON_FORWARDS_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/json.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/json.h
index da5fc96..19f14c2 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/json.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/json.h
@@ -1,15 +1,15 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef JSON_JSON_H_INCLUDED
-# define JSON_JSON_H_INCLUDED
+#define JSON_JSON_H_INCLUDED
 
-# include "autolink.h"
-# include "value.h"
-# include "reader.h"
-# include "writer.h"
-# include "features.h"
+#include "autolink.h"
+#include "features.h"
+#include "reader.h"
+#include "value.h"
+#include "writer.h"
 
 #endif // JSON_JSON_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/reader.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/reader.h
index a3023b3..0f489d6 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/reader.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/reader.h
@@ -1,213 +1,417 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef CPPTL_JSON_READER_H_INCLUDED
-# define CPPTL_JSON_READER_H_INCLUDED
+#define CPPTL_JSON_READER_H_INCLUDED
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include "features.h"
-# include "value.h"
+#include "features.h"
+#include "value.h"
 #endif // if !defined(JSON_IS_AMALGAMATION)
-# include <deque>
-# include <stack>
-# include <string>
+#include <deque>
+#include <iosfwd>
+#include <istream>
+#include <stack>
+#include <string>
+
+// Disable warning C4251: <data member>: <type> needs to have dll-interface to
+// be used by...
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(push)
+#pragma warning(disable : 4251)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+#pragma pack(push, 8)
 
 namespace Json {
 
-   /** \brief Unserialize a <a HREF="http://www.json.org">JSON</a> document into a Value.
-    *
-    */
-   class JSON_API Reader
-   {
-   public:
-      typedef char Char;
-      typedef const Char *Location;
+/** \brief Unserialize a <a HREF="http://www.json.org">JSON</a> document into a
+ *Value.
+ *
+ * \deprecated Use CharReader and CharReaderBuilder.
+ */
+class JSON_API Reader {
+public:
+  typedef char Char;
+  typedef const Char* Location;
 
-      /** \brief Constructs a Reader allowing all features
-       * for parsing.
-       */
-      Reader();
-
-      /** \brief Constructs a Reader allowing the specified feature set
-       * for parsing.
-       */
-      Reader( const Features &features );
-
-      /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a> document.
-       * \param document UTF-8 encoded string containing the document to read.
-       * \param root [out] Contains the root value of the document if it was
-       *             successfully parsed.
-       * \param collectComments \c true to collect comment and allow writing them back during
-       *                        serialization, \c false to discard comments.
-       *                        This parameter is ignored if Features::allowComments_
-       *                        is \c false.
-       * \return \c true if the document was successfully parsed, \c false if an error occurred.
-       */
-      bool parse( const std::string &document, 
-                  Value &root,
-                  bool collectComments = true );
-
-      /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a> document.
-       * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the document to read.
-       * \param endDoc Pointer on the end of the UTF-8 encoded string of the document to read. 
-       \               Must be >= beginDoc.
-       * \param root [out] Contains the root value of the document if it was
-       *             successfully parsed.
-       * \param collectComments \c true to collect comment and allow writing them back during
-       *                        serialization, \c false to discard comments.
-       *                        This parameter is ignored if Features::allowComments_
-       *                        is \c false.
-       * \return \c true if the document was successfully parsed, \c false if an error occurred.
-       */
-      bool parse( const char *beginDoc, const char *endDoc, 
-                  Value &root,
-                  bool collectComments = true );
-
-      /// \brief Parse from input stream.
-      /// \see Json::operator>>(std::istream&, Json::Value&).
-      bool parse( std::istream &is,
-                  Value &root,
-                  bool collectComments = true );
-
-      /** \brief Returns a user friendly string that list errors in the parsed document.
-       * \return Formatted error message with the list of errors with their location in 
-       *         the parsed document. An empty string is returned if no error occurred
-       *         during parsing.
-       * \deprecated Use getFormattedErrorMessages() instead (typo fix).
-       */
-      JSONCPP_DEPRECATED("Use getFormattedErrorMessages instead") 
-      std::string getFormatedErrorMessages() const;
-
-      /** \brief Returns a user friendly string that list errors in the parsed document.
-       * \return Formatted error message with the list of errors with their location in 
-       *         the parsed document. An empty string is returned if no error occurred
-       *         during parsing.
-       */
-      std::string getFormattedErrorMessages() const;
-
-   private:
-      enum TokenType
-      {
-         tokenEndOfStream = 0,
-         tokenObjectBegin,
-         tokenObjectEnd,
-         tokenArrayBegin,
-         tokenArrayEnd,
-         tokenString,
-         tokenNumber,
-         tokenTrue,
-         tokenFalse,
-         tokenNull,
-         tokenArraySeparator,
-         tokenMemberSeparator,
-         tokenComment,
-         tokenError
-      };
-
-      class Token
-      {
-      public:
-         TokenType type_;
-         Location start_;
-         Location end_;
-      };
-
-      class ErrorInfo
-      {
-      public:
-         Token token_;
-         std::string message_;
-         Location extra_;
-      };
-
-      typedef std::deque<ErrorInfo> Errors;
-
-      bool expectToken( TokenType type, Token &token, const char *message );
-      bool readToken( Token &token );
-      void skipSpaces();
-      bool match( Location pattern, 
-                  int patternLength );
-      bool readComment();
-      bool readCStyleComment();
-      bool readCppStyleComment();
-      bool readString();
-      void readNumber();
-      bool readValue();
-      bool readObject( Token &token );
-      bool readArray( Token &token );
-      bool decodeNumber( Token &token );
-      bool decodeString( Token &token );
-      bool decodeString( Token &token, std::string &decoded );
-      bool decodeDouble( Token &token );
-      bool decodeUnicodeCodePoint( Token &token, 
-                                   Location &current, 
-                                   Location end, 
-                                   unsigned int &unicode );
-      bool decodeUnicodeEscapeSequence( Token &token, 
-                                        Location &current, 
-                                        Location end, 
-                                        unsigned int &unicode );
-      bool addError( const std::string &message, 
-                     Token &token,
-                     Location extra = 0 );
-      bool recoverFromError( TokenType skipUntilToken );
-      bool addErrorAndRecover( const std::string &message, 
-                               Token &token,
-                               TokenType skipUntilToken );
-      void skipUntilSpace();
-      Value &currentValue();
-      Char getNextChar();
-      void getLocationLineAndColumn( Location location,
-                                     int &line,
-                                     int &column ) const;
-      std::string getLocationLineAndColumn( Location location ) const;
-      void addComment( Location begin, 
-                       Location end, 
-                       CommentPlacement placement );
-      void skipCommentTokens( Token &token );
-   
-      typedef std::stack<Value *> Nodes;
-      Nodes nodes_;
-      Errors errors_;
-      std::string document_;
-      Location begin_;
-      Location end_;
-      Location current_;
-      Location lastValueEnd_;
-      Value *lastValue_;
-      std::string commentsBefore_;
-      Features features_;
-      bool collectComments_;
-   };
-
-   /** \brief Read from 'sin' into 'root'.
-
-    Always keep comments from the input JSON.
-
-    This can be used to read a file into a particular sub-object.
-    For example:
-    \code
-    Json::Value root;
-    cin >> root["dir"]["file"];
-    cout << root;
-    \endcode
-    Result:
-    \verbatim
-    {
-    "dir": {
-        "file": {
-        // The input stream JSON would be nested here.
-        }
-    }
-    }
-    \endverbatim
-    \throw std::exception on parse error.
-    \see Json::operator<<()
+  /** \brief An error tagged with where in the JSON text it was encountered.
+   *
+   * The offsets give the [start, limit) range of bytes within the text. Note
+   * that this is bytes, not codepoints.
+   *
    */
-   std::istream& operator>>( std::istream&, Value& );
+  struct StructuredError {
+    ptrdiff_t offset_start;
+    ptrdiff_t offset_limit;
+    String message;
+  };
+
+  /** \brief Constructs a Reader allowing all features
+   * for parsing.
+   */
+  JSONCPP_DEPRECATED("Use CharReader and CharReaderBuilder instead")
+  Reader();
+
+  /** \brief Constructs a Reader allowing the specified feature set
+   * for parsing.
+   */
+  JSONCPP_DEPRECATED("Use CharReader and CharReaderBuilder instead")
+  Reader(const Features& features);
+
+  /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
+   * document.
+   * \param document UTF-8 encoded string containing the document to read.
+   * \param root [out] Contains the root value of the document if it was
+   *             successfully parsed.
+   * \param collectComments \c true to collect comment and allow writing them
+   * back during
+   *                        serialization, \c false to discard comments.
+   *                        This parameter is ignored if
+   * Features::allowComments_
+   *                        is \c false.
+   * \return \c true if the document was successfully parsed, \c false if an
+   * error occurred.
+   */
+  bool
+  parse(const std::string& document, Value& root, bool collectComments = true);
+
+  /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
+   document.
+   * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the
+   document to read.
+   * \param endDoc Pointer on the end of the UTF-8 encoded string of the
+   document to read.
+   *               Must be >= beginDoc.
+   * \param root [out] Contains the root value of the document if it was
+   *             successfully parsed.
+   * \param collectComments \c true to collect comment and allow writing them
+   back during
+   *                        serialization, \c false to discard comments.
+   *                        This parameter is ignored if
+   Features::allowComments_
+   *                        is \c false.
+   * \return \c true if the document was successfully parsed, \c false if an
+   error occurred.
+   */
+  bool parse(const char* beginDoc,
+             const char* endDoc,
+             Value& root,
+             bool collectComments = true);
+
+  /// \brief Parse from input stream.
+  /// \see Json::operator>>(std::istream&, Json::Value&).
+  bool parse(IStream& is, Value& root, bool collectComments = true);
+
+  /** \brief Returns a user friendly string that list errors in the parsed
+   * document.
+   * \return Formatted error message with the list of errors with their location
+   * in
+   *         the parsed document. An empty string is returned if no error
+   * occurred
+   *         during parsing.
+   * \deprecated Use getFormattedErrorMessages() instead (typo fix).
+   */
+  JSONCPP_DEPRECATED("Use getFormattedErrorMessages() instead.")
+  String getFormatedErrorMessages() const;
+
+  /** \brief Returns a user friendly string that list errors in the parsed
+   * document.
+   * \return Formatted error message with the list of errors with their location
+   * in
+   *         the parsed document. An empty string is returned if no error
+   * occurred
+   *         during parsing.
+   */
+  String getFormattedErrorMessages() const;
+
+  /** \brief Returns a vector of structured erros encounted while parsing.
+   * \return A (possibly empty) vector of StructuredError objects. Currently
+   *         only one error can be returned, but the caller should tolerate
+   * multiple
+   *         errors.  This can occur if the parser recovers from a non-fatal
+   *         parse error and then encounters additional errors.
+   */
+  std::vector<StructuredError> getStructuredErrors() const;
+
+  /** \brief Add a semantic error message.
+   * \param value JSON Value location associated with the error
+   * \param message The error message.
+   * \return \c true if the error was successfully added, \c false if the
+   * Value offset exceeds the document size.
+   */
+  bool pushError(const Value& value, const String& message);
+
+  /** \brief Add a semantic error message with extra context.
+   * \param value JSON Value location associated with the error
+   * \param message The error message.
+   * \param extra Additional JSON Value location to contextualize the error
+   * \return \c true if the error was successfully added, \c false if either
+   * Value offset exceeds the document size.
+   */
+  bool pushError(const Value& value, const String& message, const Value& extra);
+
+  /** \brief Return whether there are any errors.
+   * \return \c true if there are no errors to report \c false if
+   * errors have occurred.
+   */
+  bool good() const;
+
+private:
+  enum TokenType {
+    tokenEndOfStream = 0,
+    tokenObjectBegin,
+    tokenObjectEnd,
+    tokenArrayBegin,
+    tokenArrayEnd,
+    tokenString,
+    tokenNumber,
+    tokenTrue,
+    tokenFalse,
+    tokenNull,
+    tokenArraySeparator,
+    tokenMemberSeparator,
+    tokenComment,
+    tokenError
+  };
+
+  class Token {
+  public:
+    TokenType type_;
+    Location start_;
+    Location end_;
+  };
+
+  class ErrorInfo {
+  public:
+    Token token_;
+    String message_;
+    Location extra_;
+  };
+
+  typedef std::deque<ErrorInfo> Errors;
+
+  bool readToken(Token& token);
+  void skipSpaces();
+  bool match(Location pattern, int patternLength);
+  bool readComment();
+  bool readCStyleComment();
+  bool readCppStyleComment();
+  bool readString();
+  void readNumber();
+  bool readValue();
+  bool readObject(Token& token);
+  bool readArray(Token& token);
+  bool decodeNumber(Token& token);
+  bool decodeNumber(Token& token, Value& decoded);
+  bool decodeString(Token& token);
+  bool decodeString(Token& token, String& decoded);
+  bool decodeDouble(Token& token);
+  bool decodeDouble(Token& token, Value& decoded);
+  bool decodeUnicodeCodePoint(Token& token,
+                              Location& current,
+                              Location end,
+                              unsigned int& unicode);
+  bool decodeUnicodeEscapeSequence(Token& token,
+                                   Location& current,
+                                   Location end,
+                                   unsigned int& unicode);
+  bool addError(const String& message, Token& token, Location extra = nullptr);
+  bool recoverFromError(TokenType skipUntilToken);
+  bool addErrorAndRecover(const String& message,
+                          Token& token,
+                          TokenType skipUntilToken);
+  void skipUntilSpace();
+  Value& currentValue();
+  Char getNextChar();
+  void
+  getLocationLineAndColumn(Location location, int& line, int& column) const;
+  String getLocationLineAndColumn(Location location) const;
+  void addComment(Location begin, Location end, CommentPlacement placement);
+  void skipCommentTokens(Token& token);
+
+  static bool containsNewLine(Location begin, Location end);
+  static String normalizeEOL(Location begin, Location end);
+
+  typedef std::stack<Value*> Nodes;
+  Nodes nodes_;
+  Errors errors_;
+  String document_;
+  Location begin_{};
+  Location end_{};
+  Location current_{};
+  Location lastValueEnd_{};
+  Value* lastValue_{};
+  String commentsBefore_;
+  Features features_;
+  bool collectComments_{};
+}; // Reader
+
+/** Interface for reading JSON from a char array.
+ */
+class JSON_API CharReader {
+public:
+  virtual ~CharReader() = default;
+  /** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
+   document.
+   * The document must be a UTF-8 encoded string containing the document to
+   read.
+   *
+   * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the
+   document to read.
+   * \param endDoc Pointer on the end of the UTF-8 encoded string of the
+   document to read.
+   *        Must be >= beginDoc.
+   * \param root [out] Contains the root value of the document if it was
+   *             successfully parsed.
+   * \param errs [out] Formatted error messages (if not NULL)
+   *        a user friendly string that lists errors in the parsed
+   * document.
+   * \return \c true if the document was successfully parsed, \c false if an
+   error occurred.
+   */
+  virtual bool parse(char const* beginDoc,
+                     char const* endDoc,
+                     Value* root,
+                     String* errs) = 0;
+
+  class JSON_API Factory {
+  public:
+    virtual ~Factory() = default;
+    /** \brief Allocate a CharReader via operator new().
+     * \throw std::exception if something goes wrong (e.g. invalid settings)
+     */
+    virtual CharReader* newCharReader() const = 0;
+  }; // Factory
+};   // CharReader
+
+/** \brief Build a CharReader implementation.
+
+Usage:
+\code
+  using namespace Json;
+  CharReaderBuilder builder;
+  builder["collectComments"] = false;
+  Value value;
+  String errs;
+  bool ok = parseFromStream(builder, std::cin, &value, &errs);
+\endcode
+*/
+class JSON_API CharReaderBuilder : public CharReader::Factory {
+public:
+  // Note: We use a Json::Value so that we can add data-members to this class
+  // without a major version bump.
+  /** Configuration of this builder.
+    These are case-sensitive.
+    Available settings (case-sensitive):
+    - `"collectComments": false or true`
+      - true to collect comment and allow writing them
+        back during serialization, false to discard comments.
+        This parameter is ignored if allowComments is false.
+    - `"allowComments": false or true`
+      - true if comments are allowed.
+    - `"strictRoot": false or true`
+      - true if root must be either an array or an object value
+    - `"allowDroppedNullPlaceholders": false or true`
+      - true if dropped null placeholders are allowed. (See
+    StreamWriterBuilder.)
+    - `"allowNumericKeys": false or true`
+      - true if numeric object keys are allowed.
+    - `"allowSingleQuotes": false or true`
+      - true if '' are allowed for strings (both keys and values)
+    - `"stackLimit": integer`
+      - Exceeding stackLimit (recursive depth of `readValue()`) will
+        cause an exception.
+      - This is a security issue (seg-faults caused by deeply nested JSON),
+        so the default is low.
+    - `"failIfExtra": false or true`
+      - If true, `parse()` returns false when extra non-whitespace trails
+        the JSON value in the input string.
+    - `"rejectDupKeys": false or true`
+      - If true, `parse()` returns false when a key is duplicated within an
+    object.
+    - `"allowSpecialFloats": false or true`
+      - If true, special float values (NaNs and infinities) are allowed
+        and their values are lossfree restorable.
+
+    You can examine 'settings_` yourself
+    to see the defaults. You can also write and read them just like any
+    JSON Value.
+    \sa setDefaults()
+    */
+  Json::Value settings_;
+
+  CharReaderBuilder();
+  ~CharReaderBuilder() override;
+
+  CharReader* newCharReader() const override;
+
+  /** \return true if 'settings' are legal and consistent;
+   *   otherwise, indicate bad settings via 'invalid'.
+   */
+  bool validate(Json::Value* invalid) const;
+
+  /** A simple way to update a specific setting.
+   */
+  Value& operator[](const String& key);
+
+  /** Called by ctor, but you can use this to reset settings_.
+   * \pre 'settings' != NULL (but Json::null is fine)
+   * \remark Defaults:
+   * \snippet src/lib_json/json_reader.cpp CharReaderBuilderDefaults
+   */
+  static void setDefaults(Json::Value* settings);
+  /** Same as old Features::strictMode().
+   * \pre 'settings' != NULL (but Json::null is fine)
+   * \remark Defaults:
+   * \snippet src/lib_json/json_reader.cpp CharReaderBuilderStrictMode
+   */
+  static void strictMode(Json::Value* settings);
+};
+
+/** Consume entire stream and use its begin/end.
+ * Someday we might have a real StreamReader, but for now this
+ * is convenient.
+ */
+bool JSON_API parseFromStream(CharReader::Factory const&,
+                              IStream&,
+                              Value* root,
+                              String* errs);
+
+/** \brief Read from 'sin' into 'root'.
+
+ Always keep comments from the input JSON.
+
+ This can be used to read a file into a particular sub-object.
+ For example:
+ \code
+ Json::Value root;
+ cin >> root["dir"]["file"];
+ cout << root;
+ \endcode
+ Result:
+ \verbatim
+ {
+ "dir": {
+     "file": {
+     // The input stream JSON would be nested here.
+     }
+ }
+ }
+ \endverbatim
+ \throw std::exception on parse error.
+ \see Json::operator<<()
+*/
+JSON_API IStream& operator>>(IStream&, Value&);
 
 } // namespace Json
 
+#pragma pack(pop)
+
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(pop)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
 #endif // CPPTL_JSON_READER_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/value.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/value.h
index b013c9b..957f3f6 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/value.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/value.h
@@ -1,1109 +1,907 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef CPPTL_JSON_H_INCLUDED
-# define CPPTL_JSON_H_INCLUDED
+#define CPPTL_JSON_H_INCLUDED
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include "forwards.h"
+#include "forwards.h"
 #endif // if !defined(JSON_IS_AMALGAMATION)
-# include <string>
-# include <vector>
+#include <array>
+#include <exception>
+#include <memory>
+#include <string>
+#include <vector>
 
-# ifndef JSON_USE_CPPTL_SMALLMAP
-#  include <map>
-# else
-#  include <cpptl/smallmap.h>
-# endif
-# ifdef JSON_USE_CPPTL
-#  include <cpptl/forwards.h>
-# endif
+#ifndef JSON_USE_CPPTL_SMALLMAP
+#include <map>
+#else
+#include <cpptl/smallmap.h>
+#endif
+#ifdef JSON_USE_CPPTL
+#include <cpptl/forwards.h>
+#endif
+
+// Disable warning C4251: <data member>: <type> needs to have dll-interface to
+// be used by...
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(push)
+#pragma warning(disable : 4251)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+#pragma pack(push, 8)
 
 /** \brief JSON (JavaScript Object Notation).
  */
 namespace Json {
 
-   /** \brief Type of the value held by a Value object.
-    */
-   enum ValueType
-   {
-      nullValue = 0, ///< 'null' value
-      intValue,      ///< signed integer value
-      uintValue,     ///< unsigned integer value
-      realValue,     ///< double value
-      stringValue,   ///< UTF-8 string value
-      booleanValue,  ///< bool value
-      arrayValue,    ///< array value (ordered list)
-      objectValue    ///< object value (collection of name/value pairs).
-   };
+#if JSON_USE_EXCEPTION
+/** Base class for all exceptions we throw.
+ *
+ * We use nothing but these internally. Of course, STL can throw others.
+ */
+class JSON_API Exception : public std::exception {
+public:
+  Exception(String msg);
+  ~Exception() JSONCPP_NOEXCEPT override;
+  char const* what() const JSONCPP_NOEXCEPT override;
 
-   enum CommentPlacement
-   {
-      commentBefore = 0,        ///< a comment placed on the line before a value
-      commentAfterOnSameLine,   ///< a comment just after a value on the same line
-      commentAfter,             ///< a comment on the line after a value (only make sense for root value)
-      numberOfCommentPlacement
-   };
+protected:
+  String msg_;
+};
+
+/** Exceptions which the user cannot easily avoid.
+ *
+ * E.g. out-of-memory (when we use malloc), stack-overflow, malicious input
+ *
+ * \remark derived from Json::Exception
+ */
+class JSON_API RuntimeError : public Exception {
+public:
+  RuntimeError(String const& msg);
+};
+
+/** Exceptions thrown by JSON_ASSERT/JSON_FAIL macros.
+ *
+ * These are precondition-violations (user bugs) and internal errors (our bugs).
+ *
+ * \remark derived from Json::Exception
+ */
+class JSON_API LogicError : public Exception {
+public:
+  LogicError(String const& msg);
+};
+#endif
+
+/// used internally
+[[noreturn]] void throwRuntimeError(String const& msg);
+/// used internally
+[[noreturn]] void throwLogicError(String const& msg);
+
+/** \brief Type of the value held by a Value object.
+ */
+enum ValueType {
+  nullValue = 0, ///< 'null' value
+  intValue,      ///< signed integer value
+  uintValue,     ///< unsigned integer value
+  realValue,     ///< double value
+  stringValue,   ///< UTF-8 string value
+  booleanValue,  ///< bool value
+  arrayValue,    ///< array value (ordered list)
+  objectValue    ///< object value (collection of name/value pairs).
+};
+
+enum CommentPlacement {
+  commentBefore = 0,      ///< a comment placed on the line before a value
+  commentAfterOnSameLine, ///< a comment just after a value on the same line
+  commentAfter, ///< a comment on the line after a value (only make sense for
+  /// root value)
+  numberOfCommentPlacement
+};
+
+/** \brief Type of precision for formatting of real values.
+ */
+enum PrecisionType {
+  significantDigits = 0, ///< we set max number of significant digits in string
+  decimalPlaces          ///< we set max number of digits after "." in string
+};
 
 //# ifdef JSON_USE_CPPTL
 //   typedef CppTL::AnyEnumerator<const char *> EnumMemberNames;
 //   typedef CppTL::AnyEnumerator<const Value &> EnumValues;
 //# endif
 
-   /** \brief Lightweight wrapper to tag static string.
-    *
-    * Value constructor and objectValue member assignement takes advantage of the
-    * StaticString and avoid the cost of string duplication when storing the
-    * string or the member name.
-    *
-    * Example of usage:
-    * \code
-    * Json::Value aValue( StaticString("some text") );
-    * Json::Value object;
-    * static const StaticString code("code");
-    * object[code] = 1234;
-    * \endcode
-    */
-   class JSON_API StaticString
-   {
-   public:
-      explicit StaticString( const char *czstring )
-         : str_( czstring )
-      {
-      }
+/** \brief Lightweight wrapper to tag static string.
+ *
+ * Value constructor and objectValue member assignment takes advantage of the
+ * StaticString and avoid the cost of string duplication when storing the
+ * string or the member name.
+ *
+ * Example of usage:
+ * \code
+ * Json::Value aValue( StaticString("some text") );
+ * Json::Value object;
+ * static const StaticString code("code");
+ * object[code] = 1234;
+ * \endcode
+ */
+class JSON_API StaticString {
+public:
+  explicit StaticString(const char* czstring) : c_str_(czstring) {}
 
-      operator const char *() const
-      {
-         return str_;
-      }
+  operator const char*() const { return c_str_; }
 
-      const char *c_str() const
-      {
-         return str_;
-      }
+  const char* c_str() const { return c_str_; }
 
-   private:
-      const char *str_;
-   };
+private:
+  const char* c_str_;
+};
 
-   /** \brief Represents a <a HREF="http://www.json.org">JSON</a> value.
-    *
-    * This class is a discriminated union wrapper that can represents a:
-    * - signed integer [range: Value::minInt - Value::maxInt]
-    * - unsigned integer (range: 0 - Value::maxUInt)
-    * - double
-    * - UTF-8 string
-    * - boolean
-    * - 'null'
-    * - an ordered list of Value
-    * - collection of name/value pairs (javascript object)
-    *
-    * The type of the held value is represented by a #ValueType and 
-    * can be obtained using type().
-    *
-    * values of an #objectValue or #arrayValue can be accessed using operator[]() methods. 
-    * Non const methods will automatically create the a #nullValue element 
-    * if it does not exist. 
-    * The sequence of an #arrayValue will be automatically resize and initialized 
-    * with #nullValue. resize() can be used to enlarge or truncate an #arrayValue.
-    *
-    * The get() methods can be used to obtanis default value in the case the required element
-    * does not exist.
-    *
-    * It is possible to iterate over the list of a #objectValue values using 
-    * the getMemberNames() method.
-    */
-   class JSON_API Value 
-   {
-      friend class ValueIteratorBase;
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-      friend class ValueInternalLink;
-      friend class ValueInternalMap;
-# endif
-   public:
-      typedef std::vector<std::string> Members;
-      typedef ValueIterator iterator;
-      typedef ValueConstIterator const_iterator;
-      typedef Json::UInt UInt;
-      typedef Json::Int Int;
-# if defined(JSON_HAS_INT64)
-      typedef Json::UInt64 UInt64;
-      typedef Json::Int64 Int64;
+/** \brief Represents a <a HREF="http://www.json.org">JSON</a> value.
+ *
+ * This class is a discriminated union wrapper that can represents a:
+ * - signed integer [range: Value::minInt - Value::maxInt]
+ * - unsigned integer (range: 0 - Value::maxUInt)
+ * - double
+ * - UTF-8 string
+ * - boolean
+ * - 'null'
+ * - an ordered list of Value
+ * - collection of name/value pairs (javascript object)
+ *
+ * The type of the held value is represented by a #ValueType and
+ * can be obtained using type().
+ *
+ * Values of an #objectValue or #arrayValue can be accessed using operator[]()
+ * methods.
+ * Non-const methods will automatically create the a #nullValue element
+ * if it does not exist.
+ * The sequence of an #arrayValue will be automatically resized and initialized
+ * with #nullValue. resize() can be used to enlarge or truncate an #arrayValue.
+ *
+ * The get() methods can be used to obtain default value in the case the
+ * required element does not exist.
+ *
+ * It is possible to iterate over the list of member keys of an object using
+ * the getMemberNames() method.
+ *
+ * \note #Value string-length fit in size_t, but keys must be < 2^30.
+ * (The reason is an implementation detail.) A #CharReader will raise an
+ * exception if a bound is exceeded to avoid security holes in your app,
+ * but the Value API does *not* check bounds. That is the responsibility
+ * of the caller.
+ */
+class JSON_API Value {
+  friend class ValueIteratorBase;
+
+public:
+  typedef std::vector<String> Members;
+  typedef ValueIterator iterator;
+  typedef ValueConstIterator const_iterator;
+  typedef Json::UInt UInt;
+  typedef Json::Int Int;
+#if defined(JSON_HAS_INT64)
+  typedef Json::UInt64 UInt64;
+  typedef Json::Int64 Int64;
 #endif // defined(JSON_HAS_INT64)
-      typedef Json::LargestInt LargestInt;
-      typedef Json::LargestUInt LargestUInt;
-      typedef Json::ArrayIndex ArrayIndex;
+  typedef Json::LargestInt LargestInt;
+  typedef Json::LargestUInt LargestUInt;
+  typedef Json::ArrayIndex ArrayIndex;
 
-      static const Value null;
-      /// Minimum signed integer value that can be stored in a Json::Value.
-      static const LargestInt minLargestInt;
-      /// Maximum signed integer value that can be stored in a Json::Value.
-      static const LargestInt maxLargestInt;
-      /// Maximum unsigned integer value that can be stored in a Json::Value.
-      static const LargestUInt maxLargestUInt;
+  // Required for boost integration, e. g. BOOST_TEST
+  typedef std::string value_type;
 
-      /// Minimum signed int value that can be stored in a Json::Value.
-      static const Int minInt;
-      /// Maximum signed int value that can be stored in a Json::Value.
-      static const Int maxInt;
-      /// Maximum unsigned int value that can be stored in a Json::Value.
-      static const UInt maxUInt;
+#if JSON_USE_NULLREF
+  // Binary compatibility kludges, do not use.
+  static const Value& null;
+  static const Value& nullRef;
+#endif
 
-# if defined(JSON_HAS_INT64)
-      /// Minimum signed 64 bits int value that can be stored in a Json::Value.
-      static const Int64 minInt64;
-      /// Maximum signed 64 bits int value that can be stored in a Json::Value.
-      static const Int64 maxInt64;
-      /// Maximum unsigned 64 bits int value that can be stored in a Json::Value.
-      static const UInt64 maxUInt64;
+  // null and nullRef are deprecated, use this instead.
+  static Value const& nullSingleton();
+
+  /// Minimum signed integer value that can be stored in a Json::Value.
+  static const LargestInt minLargestInt;
+  /// Maximum signed integer value that can be stored in a Json::Value.
+  static const LargestInt maxLargestInt;
+  /// Maximum unsigned integer value that can be stored in a Json::Value.
+  static const LargestUInt maxLargestUInt;
+
+  /// Minimum signed int value that can be stored in a Json::Value.
+  static const Int minInt;
+  /// Maximum signed int value that can be stored in a Json::Value.
+  static const Int maxInt;
+  /// Maximum unsigned int value that can be stored in a Json::Value.
+  static const UInt maxUInt;
+
+#if defined(JSON_HAS_INT64)
+  /// Minimum signed 64 bits int value that can be stored in a Json::Value.
+  static const Int64 minInt64;
+  /// Maximum signed 64 bits int value that can be stored in a Json::Value.
+  static const Int64 maxInt64;
+  /// Maximum unsigned 64 bits int value that can be stored in a Json::Value.
+  static const UInt64 maxUInt64;
 #endif // defined(JSON_HAS_INT64)
 
-   private:
+  /// Default precision for real value for string representation.
+  static const UInt defaultRealPrecision;
+
+// Workaround for bug in the NVIDIAs CUDA 9.1 nvcc compiler
+// when using gcc and clang backend compilers.  CZString
+// cannot be defined as private.  See issue #486
+#ifdef __NVCC__
+public:
+#else
+private:
+#endif
 #ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-# ifndef JSON_VALUE_USE_INTERNAL_MAP
-      class CZString 
-      {
-      public:
-         enum DuplicationPolicy 
-         {
-            noDuplication = 0,
-            duplicate,
-            duplicateOnCopy
-         };
-         CZString( ArrayIndex index );
-         CZString( const char *cstr, DuplicationPolicy allocate );
-         CZString( const CZString &other );
-         ~CZString();
-         CZString &operator =( const CZString &other );
-         bool operator<( const CZString &other ) const;
-         bool operator==( const CZString &other ) const;
-         ArrayIndex index() const;
-         const char *c_str() const;
-         bool isStaticString() const;
-      private:
-         void swap( CZString &other );
-         const char *cstr_;
-         ArrayIndex index_;
-      };
+  class CZString {
+  public:
+    enum DuplicationPolicy { noDuplication = 0, duplicate, duplicateOnCopy };
+    CZString(ArrayIndex index);
+    CZString(char const* str, unsigned length, DuplicationPolicy allocate);
+    CZString(CZString const& other);
+    CZString(CZString&& other);
+    ~CZString();
+    CZString& operator=(const CZString& other);
+    CZString& operator=(CZString&& other);
 
-   public:
-#  ifndef JSON_USE_CPPTL_SMALLMAP
-      typedef std::map<CZString, Value> ObjectValues;
-#  else
-      typedef CppTL::SmallMap<CZString, Value> ObjectValues;
-#  endif // ifndef JSON_USE_CPPTL_SMALLMAP
-# endif // ifndef JSON_VALUE_USE_INTERNAL_MAP
+    bool operator<(CZString const& other) const;
+    bool operator==(CZString const& other) const;
+    ArrayIndex index() const;
+    // const char* c_str() const; ///< \deprecated
+    char const* data() const;
+    unsigned length() const;
+    bool isStaticString() const;
+
+  private:
+    void swap(CZString& other);
+
+    struct StringStorage {
+      unsigned policy_ : 2;
+      unsigned length_ : 30; // 1GB max
+    };
+
+    char const* cstr_; // actually, a prefixed string, unless policy is noDup
+    union {
+      ArrayIndex index_;
+      StringStorage storage_;
+    };
+  };
+
+public:
+#ifndef JSON_USE_CPPTL_SMALLMAP
+  typedef std::map<CZString, Value> ObjectValues;
+#else
+  typedef CppTL::SmallMap<CZString, Value> ObjectValues;
+#endif // ifndef JSON_USE_CPPTL_SMALLMAP
 #endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
 
-   public:
-      /** \brief Create a default Value of the given type.
+public:
+  /** \brief Create a default Value of the given type.
 
-        This is a very useful constructor.
-        To create an empty array, pass arrayValue.
-        To create an empty object, pass objectValue.
-        Another Value can then be set to this one by assignment.
-    This is useful since clear() and resize() will not alter types.
+    This is a very useful constructor.
+    To create an empty array, pass arrayValue.
+    To create an empty object, pass objectValue.
+    Another Value can then be set to this one by assignment.
+This is useful since clear() and resize() will not alter types.
 
-        Examples:
-    \code
-    Json::Value null_value; // null
-    Json::Value arr_value(Json::arrayValue); // []
-    Json::Value obj_value(Json::objectValue); // {}
-    \endcode
-      */
-      Value( ValueType type = nullValue );
-      Value( Int value );
-      Value( UInt value );
+    Examples:
+\code
+Json::Value null_value; // null
+Json::Value arr_value(Json::arrayValue); // []
+Json::Value obj_value(Json::objectValue); // {}
+\endcode
+  */
+  Value(ValueType type = nullValue);
+  Value(Int value);
+  Value(UInt value);
 #if defined(JSON_HAS_INT64)
-      Value( Int64 value );
-      Value( UInt64 value );
+  Value(Int64 value);
+  Value(UInt64 value);
 #endif // if defined(JSON_HAS_INT64)
-      Value( double value );
-      Value( const char *value );
-      Value( const char *beginValue, const char *endValue );
-      /** \brief Constructs a value from a static string.
+  Value(double value);
+  Value(const char* value); ///< Copy til first 0. (NULL causes to seg-fault.)
+  Value(const char* begin, const char* end); ///< Copy all, incl zeroes.
+  /** \brief Constructs a value from a static string.
 
-       * Like other value string constructor but do not duplicate the string for
-       * internal storage. The given string must remain alive after the call to this
-       * constructor.
-       * Example of usage:
-       * \code
-       * Json::Value aValue( StaticString("some text") );
-       * \endcode
-       */
-      Value( const StaticString &value );
-      Value( const std::string &value );
-# ifdef JSON_USE_CPPTL
-      Value( const CppTL::ConstString &value );
-# endif
-      Value( bool value );
-      Value( const Value &other );
-      ~Value();
-
-      Value &operator=( const Value &other );
-      /// Swap values.
-      /// \note Currently, comments are intentionally not swapped, for
-      /// both logic and efficiency.
-      void swap( Value &other );
-
-      ValueType type() const;
-
-      bool operator <( const Value &other ) const;
-      bool operator <=( const Value &other ) const;
-      bool operator >=( const Value &other ) const;
-      bool operator >( const Value &other ) const;
-
-      bool operator ==( const Value &other ) const;
-      bool operator !=( const Value &other ) const;
-
-      int compare( const Value &other ) const;
-
-      const char *asCString() const;
-      std::string asString() const;
-# ifdef JSON_USE_CPPTL
-      CppTL::ConstString asConstString() const;
-# endif
-      Int asInt() const;
-      UInt asUInt() const;
-#if defined(JSON_HAS_INT64)
-      Int64 asInt64() const;
-      UInt64 asUInt64() const;
-#endif // if defined(JSON_HAS_INT64)
-      LargestInt asLargestInt() const;
-      LargestUInt asLargestUInt() const;
-      float asFloat() const;
-      double asDouble() const;
-      bool asBool() const;
-
-      bool isNull() const;
-      bool isBool() const;
-      bool isInt() const;
-      bool isInt64() const;
-      bool isUInt() const;
-      bool isUInt64() const;
-      bool isIntegral() const;
-      bool isDouble() const;
-      bool isNumeric() const;
-      bool isString() const;
-      bool isArray() const;
-      bool isObject() const;
-
-      bool isConvertibleTo( ValueType other ) const;
-
-      /// Number of values in array or object
-      ArrayIndex size() const;
-
-      /// \brief Return true if empty array, empty object, or null;
-      /// otherwise, false.
-      bool empty() const;
-
-      /// Return isNull()
-      bool operator!() const;
-
-      /// Remove all object members and array elements.
-      /// \pre type() is arrayValue, objectValue, or nullValue
-      /// \post type() is unchanged
-      void clear();
-
-      /// Resize the array to size elements. 
-      /// New elements are initialized to null.
-      /// May only be called on nullValue or arrayValue.
-      /// \pre type() is arrayValue or nullValue
-      /// \post type() is arrayValue
-      void resize( ArrayIndex size );
-
-      /// Access an array element (zero based index ).
-      /// If the array contains less than index element, then null value are inserted
-      /// in the array so that its size is index+1.
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      Value &operator[]( ArrayIndex index );
-
-      /// Access an array element (zero based index ).
-      /// If the array contains less than index element, then null value are inserted
-      /// in the array so that its size is index+1.
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      Value &operator[]( int index );
-
-      /// Access an array element (zero based index )
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      const Value &operator[]( ArrayIndex index ) const;
-
-      /// Access an array element (zero based index )
-      /// (You may need to say 'value[0u]' to get your compiler to distinguish
-      ///  this from the operator[] which takes a string.)
-      const Value &operator[]( int index ) const;
-
-      /// If the array contains at least index+1 elements, returns the element value, 
-      /// otherwise returns defaultValue.
-      Value get( ArrayIndex index, 
-                 const Value &defaultValue ) const;
-      /// Return true if index < size().
-      bool isValidIndex( ArrayIndex index ) const;
-      /// \brief Append value to array at the end.
-      ///
-      /// Equivalent to jsonvalue[jsonvalue.size()] = value;
-      Value &append( const Value &value );
-
-      /// Access an object value by name, create a null member if it does not exist.
-      Value &operator[]( const char *key );
-      /// Access an object value by name, returns null if there is no member with that name.
-      const Value &operator[]( const char *key ) const;
-      /// Access an object value by name, create a null member if it does not exist.
-      Value &operator[]( const std::string &key );
-      /// Access an object value by name, returns null if there is no member with that name.
-      const Value &operator[]( const std::string &key ) const;
-      /** \brief Access an object value by name, create a null member if it does not exist.
-
-       * If the object as no entry for that name, then the member name used to store
-       * the new entry is not duplicated.
-       * Example of use:
-       * \code
-       * Json::Value object;
-       * static const StaticString code("code");
-       * object[code] = 1234;
-       * \endcode
-       */
-      Value &operator[]( const StaticString &key );
-# ifdef JSON_USE_CPPTL
-      /// Access an object value by name, create a null member if it does not exist.
-      Value &operator[]( const CppTL::ConstString &key );
-      /// Access an object value by name, returns null if there is no member with that name.
-      const Value &operator[]( const CppTL::ConstString &key ) const;
-# endif
-      /// Return the member named key if it exist, defaultValue otherwise.
-      Value get( const char *key, 
-                 const Value &defaultValue ) const;
-      /// Return the member named key if it exist, defaultValue otherwise.
-      Value get( const std::string &key,
-                 const Value &defaultValue ) const;
-# ifdef JSON_USE_CPPTL
-      /// Return the member named key if it exist, defaultValue otherwise.
-      Value get( const CppTL::ConstString &key,
-                 const Value &defaultValue ) const;
-# endif
-      /// \brief Remove and return the named member.  
-      ///
-      /// Do nothing if it did not exist.
-      /// \return the removed Value, or null.
-      /// \pre type() is objectValue or nullValue
-      /// \post type() is unchanged
-      Value removeMember( const char* key );
-      /// Same as removeMember(const char*)
-      Value removeMember( const std::string &key );
-
-      /// Return true if the object has a member named key.
-      bool isMember( const char *key ) const;
-      /// Return true if the object has a member named key.
-      bool isMember( const std::string &key ) const;
-# ifdef JSON_USE_CPPTL
-      /// Return true if the object has a member named key.
-      bool isMember( const CppTL::ConstString &key ) const;
-# endif
-
-      /// \brief Return a list of the member names.
-      ///
-      /// If null, return an empty list.
-      /// \pre type() is objectValue or nullValue
-      /// \post if type() was nullValue, it remains nullValue
-      Members getMemberNames() const;
-
-//# ifdef JSON_USE_CPPTL
-//      EnumMemberNames enumMemberNames() const;
-//      EnumValues enumValues() const;
-//# endif
-
-      /// Comments must be //... or /* ... */
-      void setComment( const char *comment,
-                       CommentPlacement placement );
-      /// Comments must be //... or /* ... */
-      void setComment( const std::string &comment,
-                       CommentPlacement placement );
-      bool hasComment( CommentPlacement placement ) const;
-      /// Include delimiters and embedded newlines.
-      std::string getComment( CommentPlacement placement ) const;
-
-      std::string toStyledString() const;
-
-      const_iterator begin() const;
-      const_iterator end() const;
-
-      iterator begin();
-      iterator end();
-
-   private:
-      Value &resolveReference( const char *key, 
-                               bool isStatic );
-
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-      inline bool isItemAvailable() const
-      {
-         return itemIsUsed_ == 0;
-      }
-
-      inline void setItemUsed( bool isUsed = true )
-      {
-         itemIsUsed_ = isUsed ? 1 : 0;
-      }
-
-      inline bool isMemberNameStatic() const
-      {
-         return memberNameIsStatic_ == 0;
-      }
-
-      inline void setMemberNameIsStatic( bool isStatic )
-      {
-         memberNameIsStatic_ = isStatic ? 1 : 0;
-      }
-# endif // # ifdef JSON_VALUE_USE_INTERNAL_MAP
-
-   private:
-      struct CommentInfo
-      {
-         CommentInfo();
-         ~CommentInfo();
-
-         void setComment( const char *text );
-
-         char *comment_;
-      };
-
-      //struct MemberNamesTransform
-      //{
-      //   typedef const char *result_type;
-      //   const char *operator()( const CZString &name ) const
-      //   {
-      //      return name.c_str();
-      //   }
-      //};
-
-      union ValueHolder
-      {
-         LargestInt int_;
-         LargestUInt uint_;
-         double real_;
-         bool bool_;
-         char *string_;
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-         ValueInternalArray *array_;
-         ValueInternalMap *map_;
-#else
-         ObjectValues *map_;
-# endif
-      } value_;
-      ValueType type_ : 8;
-      int allocated_ : 1;     // Notes: if declared as bool, bitfield is useless.
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-      unsigned int itemIsUsed_ : 1;      // used by the ValueInternalMap container.
-      int memberNameIsStatic_ : 1;       // used by the ValueInternalMap container.
-# endif
-      CommentInfo *comments_;
-   };
-
-
-   /** \brief Experimental and untested: represents an element of the "path" to access a node.
-    */
-   class PathArgument
-   {
-   public:
-      friend class Path;
-
-      PathArgument();
-      PathArgument( ArrayIndex index );
-      PathArgument( const char *key );
-      PathArgument( const std::string &key );
-
-   private:
-      enum Kind
-      {
-         kindNone = 0,
-         kindIndex,
-         kindKey
-      };
-      std::string key_;
-      ArrayIndex index_;
-      Kind kind_;
-   };
-
-   /** \brief Experimental and untested: represents a "path" to access a node.
-    *
-    * Syntax:
-    * - "." => root node
-    * - ".[n]" => elements at index 'n' of root node (an array value)
-    * - ".name" => member named 'name' of root node (an object value)
-    * - ".name1.name2.name3"
-    * - ".[0][1][2].name1[3]"
-    * - ".%" => member name is provided as parameter
-    * - ".[%]" => index is provied as parameter
-    */
-   class Path
-   {
-   public:
-      Path( const std::string &path,
-            const PathArgument &a1 = PathArgument(),
-            const PathArgument &a2 = PathArgument(),
-            const PathArgument &a3 = PathArgument(),
-            const PathArgument &a4 = PathArgument(),
-            const PathArgument &a5 = PathArgument() );
-
-      const Value &resolve( const Value &root ) const;
-      Value resolve( const Value &root, 
-                     const Value &defaultValue ) const;
-      /// Creates the "path" to access the specified node and returns a reference on the node.
-      Value &make( Value &root ) const;
-
-   private:
-      typedef std::vector<const PathArgument *> InArgs;
-      typedef std::vector<PathArgument> Args;
-
-      void makePath( const std::string &path,
-                     const InArgs &in );
-      void addPathInArg( const std::string &path, 
-                         const InArgs &in, 
-                         InArgs::const_iterator &itInArg, 
-                         PathArgument::Kind kind );
-      void invalidPath( const std::string &path, 
-                        int location );
-
-      Args args_;
-   };
-
-
-
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   /** \brief Allocator to customize Value internal map.
-    * Below is an example of a simple implementation (default implementation actually
-    * use memory pool for speed).
-    * \code
-      class DefaultValueMapAllocator : public ValueMapAllocator
-      {
-      public: // overridden from ValueMapAllocator
-         virtual ValueInternalMap *newMap()
-         {
-            return new ValueInternalMap();
-         }
-
-         virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other )
-         {
-            return new ValueInternalMap( other );
-         }
-
-         virtual void destructMap( ValueInternalMap *map )
-         {
-            delete map;
-         }
-
-         virtual ValueInternalLink *allocateMapBuckets( unsigned int size )
-         {
-            return new ValueInternalLink[size];
-         }
-
-         virtual void releaseMapBuckets( ValueInternalLink *links )
-         {
-            delete [] links;
-         }
-
-         virtual ValueInternalLink *allocateMapLink()
-         {
-            return new ValueInternalLink();
-         }
-
-         virtual void releaseMapLink( ValueInternalLink *link )
-         {
-            delete link;
-         }
-      };
-    * \endcode
-    */ 
-   class JSON_API ValueMapAllocator
-   {
-   public:
-      virtual ~ValueMapAllocator();
-      virtual ValueInternalMap *newMap() = 0;
-      virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other ) = 0;
-      virtual void destructMap( ValueInternalMap *map ) = 0;
-      virtual ValueInternalLink *allocateMapBuckets( unsigned int size ) = 0;
-      virtual void releaseMapBuckets( ValueInternalLink *links ) = 0;
-      virtual ValueInternalLink *allocateMapLink() = 0;
-      virtual void releaseMapLink( ValueInternalLink *link ) = 0;
-   };
-
-   /** \brief ValueInternalMap hash-map bucket chain link (for internal use only).
-    * \internal previous_ & next_ allows for bidirectional traversal.
-    */
-   class JSON_API ValueInternalLink
-   {
-   public:
-      enum { itemPerLink = 6 };  // sizeof(ValueInternalLink) = 128 on 32 bits architecture.
-      enum InternalFlags { 
-         flagAvailable = 0,
-         flagUsed = 1
-      };
-
-      ValueInternalLink();
-
-      ~ValueInternalLink();
-
-      Value items_[itemPerLink];
-      char *keys_[itemPerLink];
-      ValueInternalLink *previous_;
-      ValueInternalLink *next_;
-   };
-
-
-   /** \brief A linked page based hash-table implementation used internally by Value.
-    * \internal ValueInternalMap is a tradional bucket based hash-table, with a linked
-    * list in each bucket to handle collision. There is an addional twist in that
-    * each node of the collision linked list is a page containing a fixed amount of
-    * value. This provides a better compromise between memory usage and speed.
-    * 
-    * Each bucket is made up of a chained list of ValueInternalLink. The last
-    * link of a given bucket can be found in the 'previous_' field of the following bucket.
-    * The last link of the last bucket is stored in tailLink_ as it has no following bucket.
-    * Only the last link of a bucket may contains 'available' item. The last link always
-    * contains at least one element unless is it the bucket one very first link.
-    */
-   class JSON_API ValueInternalMap
-   {
-      friend class ValueIteratorBase;
-      friend class Value;
-   public:
-      typedef unsigned int HashKey;
-      typedef unsigned int BucketIndex;
-
-# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-      struct IteratorState
-      {
-         IteratorState() 
-            : map_(0)
-            , link_(0)
-            , itemIndex_(0)
-            , bucketIndex_(0) 
-         {
-         }
-         ValueInternalMap *map_;
-         ValueInternalLink *link_;
-         BucketIndex itemIndex_;
-         BucketIndex bucketIndex_;
-      };
-# endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
-      ValueInternalMap();
-      ValueInternalMap( const ValueInternalMap &other );
-      ValueInternalMap &operator =( const ValueInternalMap &other );
-      ~ValueInternalMap();
-
-      void swap( ValueInternalMap &other );
-
-      BucketIndex size() const;
-
-      void clear();
-
-      bool reserveDelta( BucketIndex growth );
-
-      bool reserve( BucketIndex newItemCount );
-
-      const Value *find( const char *key ) const;
-
-      Value *find( const char *key );
-
-      Value &resolveReference( const char *key, 
-                               bool isStatic );
-
-      void remove( const char *key );
-
-      void doActualRemove( ValueInternalLink *link, 
-                           BucketIndex index,
-                           BucketIndex bucketIndex );
-
-      ValueInternalLink *&getLastLinkInBucket( BucketIndex bucketIndex );
-
-      Value &setNewItem( const char *key, 
-                         bool isStatic, 
-                         ValueInternalLink *link, 
-                         BucketIndex index );
-
-      Value &unsafeAdd( const char *key, 
-                        bool isStatic, 
-                        HashKey hashedKey );
-
-      HashKey hash( const char *key ) const;
-
-      int compare( const ValueInternalMap &other ) const;
-
-   private:
-      void makeBeginIterator( IteratorState &it ) const;
-      void makeEndIterator( IteratorState &it ) const;
-      static bool equals( const IteratorState &x, const IteratorState &other );
-      static void increment( IteratorState &iterator );
-      static void incrementBucket( IteratorState &iterator );
-      static void decrement( IteratorState &iterator );
-      static const char *key( const IteratorState &iterator );
-      static const char *key( const IteratorState &iterator, bool &isStatic );
-      static Value &value( const IteratorState &iterator );
-      static int distance( const IteratorState &x, const IteratorState &y );
-
-   private:
-      ValueInternalLink *buckets_;
-      ValueInternalLink *tailLink_;
-      BucketIndex bucketsSize_;
-      BucketIndex itemCount_;
-   };
-
-   /** \brief A simplified deque implementation used internally by Value.
-   * \internal
-   * It is based on a list of fixed "page", each page contains a fixed number of items.
-   * Instead of using a linked-list, a array of pointer is used for fast item look-up.
-   * Look-up for an element is as follow:
-   * - compute page index: pageIndex = itemIndex / itemsPerPage
-   * - look-up item in page: pages_[pageIndex][itemIndex % itemsPerPage]
+   * Like other value string constructor but do not duplicate the string for
+   * internal storage. The given string must remain alive after the call to this
+   * constructor.
+   * \note This works only for null-terminated strings. (We cannot change the
+   *   size of this class, so we have nowhere to store the length,
+   *   which might be computed later for various operations.)
    *
-   * Insertion is amortized constant time (only the array containing the index of pointers
-   * need to be reallocated when items are appended).
+   * Example of usage:
+   * \code
+   * static StaticString foo("some text");
+   * Json::Value aValue(foo);
+   * \endcode
    */
-   class JSON_API ValueInternalArray
-   {
-      friend class Value;
-      friend class ValueIteratorBase;
-   public:
-      enum { itemsPerPage = 8 };    // should be a power of 2 for fast divide and modulo.
-      typedef Value::ArrayIndex ArrayIndex;
-      typedef unsigned int PageIndex;
+  Value(const StaticString& value);
+  Value(const String& value); ///< Copy data() til size(). Embedded
+                              ///< zeroes too.
+#ifdef JSON_USE_CPPTL
+  Value(const CppTL::ConstString& value);
+#endif
+  Value(bool value);
+  Value(const Value& other);
+  Value(Value&& other);
+  ~Value();
 
-# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-      struct IteratorState // Must be a POD
-      {
-         IteratorState() 
-            : array_(0)
-            , currentPageIndex_(0)
-            , currentItemIndex_(0) 
-         {
-         }
-         ValueInternalArray *array_;
-         Value **currentPageIndex_;
-         unsigned int currentItemIndex_;
-      };
-# endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
+  /// \note Overwrite existing comments. To preserve comments, use
+  /// #swapPayload().
+  Value& operator=(const Value& other);
+  Value& operator=(Value&& other);
 
-      ValueInternalArray();
-      ValueInternalArray( const ValueInternalArray &other );
-      ValueInternalArray &operator =( const ValueInternalArray &other );
-      ~ValueInternalArray();
-      void swap( ValueInternalArray &other );
+  /// Swap everything.
+  void swap(Value& other);
+  /// Swap values but leave comments and source offsets in place.
+  void swapPayload(Value& other);
 
-      void clear();
-      void resize( ArrayIndex newSize );
+  /// copy everything.
+  void copy(const Value& other);
+  /// copy values but leave comments and source offsets in place.
+  void copyPayload(const Value& other);
 
-      Value &resolveReference( ArrayIndex index );
+  ValueType type() const;
 
-      Value *find( ArrayIndex index ) const;
+  /// Compare payload only, not comments etc.
+  bool operator<(const Value& other) const;
+  bool operator<=(const Value& other) const;
+  bool operator>=(const Value& other) const;
+  bool operator>(const Value& other) const;
+  bool operator==(const Value& other) const;
+  bool operator!=(const Value& other) const;
+  int compare(const Value& other) const;
 
-      ArrayIndex size() const;
+  const char* asCString() const; ///< Embedded zeroes could cause you trouble!
+#if JSONCPP_USING_SECURE_MEMORY
+  unsigned getCStringLength() const; // Allows you to understand the length of
+                                     // the CString
+#endif
+  String asString() const; ///< Embedded zeroes are possible.
+  /** Get raw char* of string-value.
+   *  \return false if !string. (Seg-fault if str or end are NULL.)
+   */
+  bool getString(char const** begin, char const** end) const;
+#ifdef JSON_USE_CPPTL
+  CppTL::ConstString asConstString() const;
+#endif
+  Int asInt() const;
+  UInt asUInt() const;
+#if defined(JSON_HAS_INT64)
+  Int64 asInt64() const;
+  UInt64 asUInt64() const;
+#endif // if defined(JSON_HAS_INT64)
+  LargestInt asLargestInt() const;
+  LargestUInt asLargestUInt() const;
+  float asFloat() const;
+  double asDouble() const;
+  bool asBool() const;
 
-      int compare( const ValueInternalArray &other ) const;
+  bool isNull() const;
+  bool isBool() const;
+  bool isInt() const;
+  bool isInt64() const;
+  bool isUInt() const;
+  bool isUInt64() const;
+  bool isIntegral() const;
+  bool isDouble() const;
+  bool isNumeric() const;
+  bool isString() const;
+  bool isArray() const;
+  bool isObject() const;
 
-   private:
-      static bool equals( const IteratorState &x, const IteratorState &other );
-      static void increment( IteratorState &iterator );
-      static void decrement( IteratorState &iterator );
-      static Value &dereference( const IteratorState &iterator );
-      static Value &unsafeDereference( const IteratorState &iterator );
-      static int distance( const IteratorState &x, const IteratorState &y );
-      static ArrayIndex indexOf( const IteratorState &iterator );
-      void makeBeginIterator( IteratorState &it ) const;
-      void makeEndIterator( IteratorState &it ) const;
-      void makeIterator( IteratorState &it, ArrayIndex index ) const;
+  bool isConvertibleTo(ValueType other) const;
 
-      void makeIndexValid( ArrayIndex index );
+  /// Number of values in array or object
+  ArrayIndex size() const;
 
-      Value **pages_;
-      ArrayIndex size_;
-      PageIndex pageCount_;
-   };
+  /// \brief Return true if empty array, empty object, or null;
+  /// otherwise, false.
+  bool empty() const;
 
-   /** \brief Experimental: do not use. Allocator to customize Value internal array.
-    * Below is an example of a simple implementation (actual implementation use
-    * memory pool).
-      \code
-class DefaultValueArrayAllocator : public ValueArrayAllocator
-{
-public: // overridden from ValueArrayAllocator
-   virtual ~DefaultValueArrayAllocator()
-   {
-   }
+  /// Return !isNull()
+  JSONCPP_OP_EXPLICIT operator bool() const;
 
-   virtual ValueInternalArray *newArray()
-   {
-      return new ValueInternalArray();
-   }
+  /// Remove all object members and array elements.
+  /// \pre type() is arrayValue, objectValue, or nullValue
+  /// \post type() is unchanged
+  void clear();
 
-   virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other )
-   {
-      return new ValueInternalArray( other );
-   }
+  /// Resize the array to newSize elements.
+  /// New elements are initialized to null.
+  /// May only be called on nullValue or arrayValue.
+  /// \pre type() is arrayValue or nullValue
+  /// \post type() is arrayValue
+  void resize(ArrayIndex newSize);
 
-   virtual void destruct( ValueInternalArray *array )
-   {
-      delete array;
-   }
+  /// Access an array element (zero based index ).
+  /// If the array contains less than index element, then null value are
+  /// inserted
+  /// in the array so that its size is index+1.
+  /// (You may need to say 'value[0u]' to get your compiler to distinguish
+  ///  this from the operator[] which takes a string.)
+  Value& operator[](ArrayIndex index);
 
-   virtual void reallocateArrayPageIndex( Value **&indexes, 
-                                          ValueInternalArray::PageIndex &indexCount,
-                                          ValueInternalArray::PageIndex minNewIndexCount )
-   {
-      ValueInternalArray::PageIndex newIndexCount = (indexCount*3)/2 + 1;
-      if ( minNewIndexCount > newIndexCount )
-         newIndexCount = minNewIndexCount;
-      void *newIndexes = realloc( indexes, sizeof(Value*) * newIndexCount );
-      if ( !newIndexes )
-         throw std::bad_alloc();
-      indexCount = newIndexCount;
-      indexes = static_cast<Value **>( newIndexes );
-   }
-   virtual void releaseArrayPageIndex( Value **indexes, 
-                                       ValueInternalArray::PageIndex indexCount )
-   {
-      if ( indexes )
-         free( indexes );
-   }
+  /// Access an array element (zero based index ).
+  /// If the array contains less than index element, then null value are
+  /// inserted
+  /// in the array so that its size is index+1.
+  /// (You may need to say 'value[0u]' to get your compiler to distinguish
+  ///  this from the operator[] which takes a string.)
+  Value& operator[](int index);
 
-   virtual Value *allocateArrayPage()
-   {
-      return static_cast<Value *>( malloc( sizeof(Value) * ValueInternalArray::itemsPerPage ) );
-   }
+  /// Access an array element (zero based index )
+  /// (You may need to say 'value[0u]' to get your compiler to distinguish
+  ///  this from the operator[] which takes a string.)
+  const Value& operator[](ArrayIndex index) const;
 
-   virtual void releaseArrayPage( Value *value )
-   {
-      if ( value )
-         free( value );
-   }
+  /// Access an array element (zero based index )
+  /// (You may need to say 'value[0u]' to get your compiler to distinguish
+  ///  this from the operator[] which takes a string.)
+  const Value& operator[](int index) const;
+
+  /// If the array contains at least index+1 elements, returns the element
+  /// value,
+  /// otherwise returns defaultValue.
+  Value get(ArrayIndex index, const Value& defaultValue) const;
+  /// Return true if index < size().
+  bool isValidIndex(ArrayIndex index) const;
+  /// \brief Append value to array at the end.
+  ///
+  /// Equivalent to jsonvalue[jsonvalue.size()] = value;
+  Value& append(const Value& value);
+  Value& append(Value&& value);
+
+  /// Access an object value by name, create a null member if it does not exist.
+  /// \note Because of our implementation, keys are limited to 2^30 -1 chars.
+  ///  Exceeding that will cause an exception.
+  Value& operator[](const char* key);
+  /// Access an object value by name, returns null if there is no member with
+  /// that name.
+  const Value& operator[](const char* key) const;
+  /// Access an object value by name, create a null member if it does not exist.
+  /// \param key may contain embedded nulls.
+  Value& operator[](const String& key);
+  /// Access an object value by name, returns null if there is no member with
+  /// that name.
+  /// \param key may contain embedded nulls.
+  const Value& operator[](const String& key) const;
+  /** \brief Access an object value by name, create a null member if it does not
+   exist.
+
+   * If the object has no entry for that name, then the member name used to
+   store
+   * the new entry is not duplicated.
+   * Example of use:
+   * \code
+   * Json::Value object;
+   * static const StaticString code("code");
+   * object[code] = 1234;
+   * \endcode
+   */
+  Value& operator[](const StaticString& key);
+#ifdef JSON_USE_CPPTL
+  /// Access an object value by name, create a null member if it does not exist.
+  Value& operator[](const CppTL::ConstString& key);
+  /// Access an object value by name, returns null if there is no member with
+  /// that name.
+  const Value& operator[](const CppTL::ConstString& key) const;
+#endif
+  /// Return the member named key if it exist, defaultValue otherwise.
+  /// \note deep copy
+  Value get(const char* key, const Value& defaultValue) const;
+  /// Return the member named key if it exist, defaultValue otherwise.
+  /// \note deep copy
+  /// \note key may contain embedded nulls.
+  Value
+  get(const char* begin, const char* end, const Value& defaultValue) const;
+  /// Return the member named key if it exist, defaultValue otherwise.
+  /// \note deep copy
+  /// \param key may contain embedded nulls.
+  Value get(const String& key, const Value& defaultValue) const;
+#ifdef JSON_USE_CPPTL
+  /// Return the member named key if it exist, defaultValue otherwise.
+  /// \note deep copy
+  Value get(const CppTL::ConstString& key, const Value& defaultValue) const;
+#endif
+  /// Most general and efficient version of isMember()const, get()const,
+  /// and operator[]const
+  /// \note As stated elsewhere, behavior is undefined if (end-begin) >= 2^30
+  Value const* find(char const* begin, char const* end) const;
+  /// Most general and efficient version of object-mutators.
+  /// \note As stated elsewhere, behavior is undefined if (end-begin) >= 2^30
+  /// \return non-zero, but JSON_ASSERT if this is neither object nor nullValue.
+  Value* demand(char const* begin, char const* end);
+  /// \brief Remove and return the named member.
+  ///
+  /// Do nothing if it did not exist.
+  /// \pre type() is objectValue or nullValue
+  /// \post type() is unchanged
+  void removeMember(const char* key);
+  /// Same as removeMember(const char*)
+  /// \param key may contain embedded nulls.
+  void removeMember(const String& key);
+  /// Same as removeMember(const char* begin, const char* end, Value* removed),
+  /// but 'key' is null-terminated.
+  bool removeMember(const char* key, Value* removed);
+  /** \brief Remove the named map member.
+
+      Update 'removed' iff removed.
+      \param key may contain embedded nulls.
+      \return true iff removed (no exceptions)
+  */
+  bool removeMember(String const& key, Value* removed);
+  /// Same as removeMember(String const& key, Value* removed)
+  bool removeMember(const char* begin, const char* end, Value* removed);
+  /** \brief Remove the indexed array element.
+
+      O(n) expensive operations.
+      Update 'removed' iff removed.
+      \return true if removed (no exceptions)
+  */
+  bool removeIndex(ArrayIndex index, Value* removed);
+
+  /// Return true if the object has a member named key.
+  /// \note 'key' must be null-terminated.
+  bool isMember(const char* key) const;
+  /// Return true if the object has a member named key.
+  /// \param key may contain embedded nulls.
+  bool isMember(const String& key) const;
+  /// Same as isMember(String const& key)const
+  bool isMember(const char* begin, const char* end) const;
+#ifdef JSON_USE_CPPTL
+  /// Return true if the object has a member named key.
+  bool isMember(const CppTL::ConstString& key) const;
+#endif
+
+  /// \brief Return a list of the member names.
+  ///
+  /// If null, return an empty list.
+  /// \pre type() is objectValue or nullValue
+  /// \post if type() was nullValue, it remains nullValue
+  Members getMemberNames() const;
+
+  //# ifdef JSON_USE_CPPTL
+  //      EnumMemberNames enumMemberNames() const;
+  //      EnumValues enumValues() const;
+  //# endif
+
+  /// \deprecated Always pass len.
+  JSONCPP_DEPRECATED("Use setComment(String const&) instead.")
+  void setComment(const char* comment, CommentPlacement placement) {
+    setComment(String(comment, strlen(comment)), placement);
+  }
+  /// Comments must be //... or /* ... */
+  void setComment(const char* comment, size_t len, CommentPlacement placement) {
+    setComment(String(comment, len), placement);
+  }
+  /// Comments must be //... or /* ... */
+  void setComment(String comment, CommentPlacement placement);
+  bool hasComment(CommentPlacement placement) const;
+  /// Include delimiters and embedded newlines.
+  String getComment(CommentPlacement placement) const;
+
+  String toStyledString() const;
+
+  const_iterator begin() const;
+  const_iterator end() const;
+
+  iterator begin();
+  iterator end();
+
+  // Accessors for the [start, limit) range of bytes within the JSON text from
+  // which this value was parsed, if any.
+  void setOffsetStart(ptrdiff_t start);
+  void setOffsetLimit(ptrdiff_t limit);
+  ptrdiff_t getOffsetStart() const;
+  ptrdiff_t getOffsetLimit() const;
+
+private:
+  void setType(ValueType v) {
+    bits_.value_type_ = static_cast<unsigned char>(v);
+  }
+  bool isAllocated() const { return bits_.allocated_; }
+  void setIsAllocated(bool v) { bits_.allocated_ = v; }
+
+  void initBasic(ValueType type, bool allocated = false);
+  void dupPayload(const Value& other);
+  void releasePayload();
+  void dupMeta(const Value& other);
+
+  Value& resolveReference(const char* key);
+  Value& resolveReference(const char* key, const char* end);
+
+  // struct MemberNamesTransform
+  //{
+  //   typedef const char *result_type;
+  //   const char *operator()( const CZString &name ) const
+  //   {
+  //      return name.c_str();
+  //   }
+  //};
+
+  union ValueHolder {
+    LargestInt int_;
+    LargestUInt uint_;
+    double real_;
+    bool bool_;
+    char* string_; // if allocated_, ptr to { unsigned, char[] }.
+    ObjectValues* map_;
+  } value_;
+
+  struct {
+    // Really a ValueType, but types should agree for bitfield packing.
+    unsigned int value_type_ : 8;
+    // Unless allocated_, string_ must be null-terminated.
+    unsigned int allocated_ : 1;
+  } bits_;
+
+  class Comments {
+  public:
+    Comments() = default;
+    Comments(const Comments& that);
+    Comments(Comments&& that);
+    Comments& operator=(const Comments& that);
+    Comments& operator=(Comments&& that);
+    bool has(CommentPlacement slot) const;
+    String get(CommentPlacement slot) const;
+    void set(CommentPlacement slot, String s);
+
+  private:
+    using Array = std::array<String, numberOfCommentPlacement>;
+    std::unique_ptr<Array> ptr_;
+  };
+  Comments comments_;
+
+  // [start, limit) byte offsets in the source JSON text from which this Value
+  // was extracted.
+  ptrdiff_t start_;
+  ptrdiff_t limit_;
 };
-      \endcode
-    */ 
-   class JSON_API ValueArrayAllocator
-   {
-   public:
-      virtual ~ValueArrayAllocator();
-      virtual ValueInternalArray *newArray() = 0;
-      virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other ) = 0;
-      virtual void destructArray( ValueInternalArray *array ) = 0;
-      /** \brief Reallocate array page index.
-       * Reallocates an array of pointer on each page.
-       * \param indexes [input] pointer on the current index. May be \c NULL.
-       *                [output] pointer on the new index of at least 
-       *                         \a minNewIndexCount pages. 
-       * \param indexCount [input] current number of pages in the index.
-       *                   [output] number of page the reallocated index can handle.
-       *                            \b MUST be >= \a minNewIndexCount.
-       * \param minNewIndexCount Minimum number of page the new index must be able to
-       *                         handle.
-       */
-      virtual void reallocateArrayPageIndex( Value **&indexes, 
-                                             ValueInternalArray::PageIndex &indexCount,
-                                             ValueInternalArray::PageIndex minNewIndexCount ) = 0;
-      virtual void releaseArrayPageIndex( Value **indexes, 
-                                          ValueInternalArray::PageIndex indexCount ) = 0;
-      virtual Value *allocateArrayPage() = 0;
-      virtual void releaseArrayPage( Value *value ) = 0;
-   };
-#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP
 
+/** \brief Experimental and untested: represents an element of the "path" to
+ * access a node.
+ */
+class JSON_API PathArgument {
+public:
+  friend class Path;
 
-   /** \brief base class for Value iterators.
-    *
-    */
-   class ValueIteratorBase
-   {
-   public:
-      typedef unsigned int size_t;
-      typedef int difference_type;
-      typedef ValueIteratorBase SelfType;
+  PathArgument();
+  PathArgument(ArrayIndex index);
+  PathArgument(const char* key);
+  PathArgument(const String& key);
 
-      ValueIteratorBase();
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      explicit ValueIteratorBase( const Value::ObjectValues::iterator &current );
-#else
-      ValueIteratorBase( const ValueInternalArray::IteratorState &state );
-      ValueIteratorBase( const ValueInternalMap::IteratorState &state );
-#endif
+private:
+  enum Kind { kindNone = 0, kindIndex, kindKey };
+  String key_;
+  ArrayIndex index_{};
+  Kind kind_{kindNone};
+};
 
-      bool operator ==( const SelfType &other ) const
-      {
-         return isEqual( other );
-      }
+/** \brief Experimental and untested: represents a "path" to access a node.
+ *
+ * Syntax:
+ * - "." => root node
+ * - ".[n]" => elements at index 'n' of root node (an array value)
+ * - ".name" => member named 'name' of root node (an object value)
+ * - ".name1.name2.name3"
+ * - ".[0][1][2].name1[3]"
+ * - ".%" => member name is provided as parameter
+ * - ".[%]" => index is provied as parameter
+ */
+class JSON_API Path {
+public:
+  Path(const String& path,
+       const PathArgument& a1 = PathArgument(),
+       const PathArgument& a2 = PathArgument(),
+       const PathArgument& a3 = PathArgument(),
+       const PathArgument& a4 = PathArgument(),
+       const PathArgument& a5 = PathArgument());
 
-      bool operator !=( const SelfType &other ) const
-      {
-         return !isEqual( other );
-      }
+  const Value& resolve(const Value& root) const;
+  Value resolve(const Value& root, const Value& defaultValue) const;
+  /// Creates the "path" to access the specified node and returns a reference on
+  /// the node.
+  Value& make(Value& root) const;
 
-      difference_type operator -( const SelfType &other ) const
-      {
-         return computeDistance( other );
-      }
+private:
+  typedef std::vector<const PathArgument*> InArgs;
+  typedef std::vector<PathArgument> Args;
 
-      /// Return either the index or the member name of the referenced value as a Value.
-      Value key() const;
+  void makePath(const String& path, const InArgs& in);
+  void addPathInArg(const String& path,
+                    const InArgs& in,
+                    InArgs::const_iterator& itInArg,
+                    PathArgument::Kind kind);
+  static void invalidPath(const String& path, int location);
 
-      /// Return the index of the referenced Value. -1 if it is not an arrayValue.
-      UInt index() const;
+  Args args_;
+};
 
-      /// Return the member name of the referenced Value. "" if it is not an objectValue.
-      const char *memberName() const;
+/** \brief base class for Value iterators.
+ *
+ */
+class JSON_API ValueIteratorBase {
+public:
+  typedef std::bidirectional_iterator_tag iterator_category;
+  typedef unsigned int size_t;
+  typedef int difference_type;
+  typedef ValueIteratorBase SelfType;
 
-   protected:
-      Value &deref() const;
+  bool operator==(const SelfType& other) const { return isEqual(other); }
 
-      void increment();
+  bool operator!=(const SelfType& other) const { return !isEqual(other); }
 
-      void decrement();
+  difference_type operator-(const SelfType& other) const {
+    return other.computeDistance(*this);
+  }
 
-      difference_type computeDistance( const SelfType &other ) const;
+  /// Return either the index or the member name of the referenced value as a
+  /// Value.
+  Value key() const;
 
-      bool isEqual( const SelfType &other ) const;
+  /// Return the index of the referenced Value, or -1 if it is not an
+  /// arrayValue.
+  UInt index() const;
 
-      void copy( const SelfType &other );
+  /// Return the member name of the referenced Value, or "" if it is not an
+  /// objectValue.
+  /// \note Avoid `c_str()` on result, as embedded zeroes are possible.
+  String name() const;
 
-   private:
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      Value::ObjectValues::iterator current_;
-      // Indicates that iterator is for a null value.
-      bool isNull_;
-#else
-      union
-      {
-         ValueInternalArray::IteratorState array_;
-         ValueInternalMap::IteratorState map_;
-      } iterator_;
-      bool isArray_;
-#endif
-   };
+  /// Return the member name of the referenced Value. "" if it is not an
+  /// objectValue.
+  /// \deprecated This cannot be used for UTF-8 strings, since there can be
+  /// embedded nulls.
+  JSONCPP_DEPRECATED("Use `key = name();` instead.")
+  char const* memberName() const;
+  /// Return the member name of the referenced Value, or NULL if it is not an
+  /// objectValue.
+  /// \note Better version than memberName(). Allows embedded nulls.
+  char const* memberName(char const** end) const;
 
-   /** \brief const iterator for object and array value.
-    *
-    */
-   class ValueConstIterator : public ValueIteratorBase
-   {
-      friend class Value;
-   public:
-      typedef unsigned int size_t;
-      typedef int difference_type;
-      typedef const Value &reference;
-      typedef const Value *pointer;
-      typedef ValueConstIterator SelfType;
+protected:
+  Value& deref() const;
 
-      ValueConstIterator();
-   private:
-      /*! \internal Use by Value to create an iterator.
-       */
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      explicit ValueConstIterator( const Value::ObjectValues::iterator &current );
-#else
-      ValueConstIterator( const ValueInternalArray::IteratorState &state );
-      ValueConstIterator( const ValueInternalMap::IteratorState &state );
-#endif
-   public:
-      SelfType &operator =( const ValueIteratorBase &other );
+  void increment();
 
-      SelfType operator++( int )
-      {
-         SelfType temp( *this );
-         ++*this;
-         return temp;
-      }
+  void decrement();
 
-      SelfType operator--( int )
-      {
-         SelfType temp( *this );
-         --*this;
-         return temp;
-      }
+  difference_type computeDistance(const SelfType& other) const;
 
-      SelfType &operator--()
-      {
-         decrement();
-         return *this;
-      }
+  bool isEqual(const SelfType& other) const;
 
-      SelfType &operator++()
-      {
-         increment();
-         return *this;
-      }
+  void copy(const SelfType& other);
 
-      reference operator *() const
-      {
-         return deref();
-      }
-   };
+private:
+  Value::ObjectValues::iterator current_;
+  // Indicates that iterator is for a null value.
+  bool isNull_{true};
 
+public:
+  // For some reason, BORLAND needs these at the end, rather
+  // than earlier. No idea why.
+  ValueIteratorBase();
+  explicit ValueIteratorBase(const Value::ObjectValues::iterator& current);
+};
 
-   /** \brief Iterator for object and array value.
-    */
-   class ValueIterator : public ValueIteratorBase
-   {
-      friend class Value;
-   public:
-      typedef unsigned int size_t;
-      typedef int difference_type;
-      typedef Value &reference;
-      typedef Value *pointer;
-      typedef ValueIterator SelfType;
+/** \brief const iterator for object and array value.
+ *
+ */
+class JSON_API ValueConstIterator : public ValueIteratorBase {
+  friend class Value;
 
-      ValueIterator();
-      ValueIterator( const ValueConstIterator &other );
-      ValueIterator( const ValueIterator &other );
-   private:
-      /*! \internal Use by Value to create an iterator.
-       */
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-      explicit ValueIterator( const Value::ObjectValues::iterator &current );
-#else
-      ValueIterator( const ValueInternalArray::IteratorState &state );
-      ValueIterator( const ValueInternalMap::IteratorState &state );
-#endif
-   public:
+public:
+  typedef const Value value_type;
+  // typedef unsigned int size_t;
+  // typedef int difference_type;
+  typedef const Value& reference;
+  typedef const Value* pointer;
+  typedef ValueConstIterator SelfType;
 
-      SelfType &operator =( const SelfType &other );
+  ValueConstIterator();
+  ValueConstIterator(ValueIterator const& other);
 
-      SelfType operator++( int )
-      {
-         SelfType temp( *this );
-         ++*this;
-         return temp;
-      }
+private:
+  /*! \internal Use by Value to create an iterator.
+   */
+  explicit ValueConstIterator(const Value::ObjectValues::iterator& current);
 
-      SelfType operator--( int )
-      {
-         SelfType temp( *this );
-         --*this;
-         return temp;
-      }
+public:
+  SelfType& operator=(const ValueIteratorBase& other);
 
-      SelfType &operator--()
-      {
-         decrement();
-         return *this;
-      }
+  SelfType operator++(int) {
+    SelfType temp(*this);
+    ++*this;
+    return temp;
+  }
 
-      SelfType &operator++()
-      {
-         increment();
-         return *this;
-      }
+  SelfType operator--(int) {
+    SelfType temp(*this);
+    --*this;
+    return temp;
+  }
 
-      reference operator *() const
-      {
-         return deref();
-      }
-   };
+  SelfType& operator--() {
+    decrement();
+    return *this;
+  }
 
+  SelfType& operator++() {
+    increment();
+    return *this;
+  }
+
+  reference operator*() const { return deref(); }
+
+  pointer operator->() const { return &deref(); }
+};
+
+/** \brief Iterator for object and array value.
+ */
+class JSON_API ValueIterator : public ValueIteratorBase {
+  friend class Value;
+
+public:
+  typedef Value value_type;
+  typedef unsigned int size_t;
+  typedef int difference_type;
+  typedef Value& reference;
+  typedef Value* pointer;
+  typedef ValueIterator SelfType;
+
+  ValueIterator();
+  explicit ValueIterator(const ValueConstIterator& other);
+  ValueIterator(const ValueIterator& other);
+
+private:
+  /*! \internal Use by Value to create an iterator.
+   */
+  explicit ValueIterator(const Value::ObjectValues::iterator& current);
+
+public:
+  SelfType& operator=(const SelfType& other);
+
+  SelfType operator++(int) {
+    SelfType temp(*this);
+    ++*this;
+    return temp;
+  }
+
+  SelfType operator--(int) {
+    SelfType temp(*this);
+    --*this;
+    return temp;
+  }
+
+  SelfType& operator--() {
+    decrement();
+    return *this;
+  }
+
+  SelfType& operator++() {
+    increment();
+    return *this;
+  }
+
+  reference operator*() const { return deref(); }
+
+  pointer operator->() const { return &deref(); }
+};
+
+inline void swap(Value& a, Value& b) { a.swap(b); }
 
 } // namespace Json
 
+#pragma pack(pop)
+
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(pop)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
 
 #endif // CPPTL_JSON_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/writer.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/writer.h
index 38d41e1..12fd36a 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/writer.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/include/json/writer.h
@@ -1,184 +1,368 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef JSON_WRITER_H_INCLUDED
-# define JSON_WRITER_H_INCLUDED
+#define JSON_WRITER_H_INCLUDED
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include "value.h"
+#include "value.h"
 #endif // if !defined(JSON_IS_AMALGAMATION)
-# include <vector>
-# include <string>
+#include <ostream>
+#include <string>
+#include <vector>
+
+// Disable warning C4251: <data member>: <type> needs to have dll-interface to
+// be used by...
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING) && defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4251)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+
+#pragma pack(push, 8)
 
 namespace Json {
 
-   class Value;
+class Value;
 
-   /** \brief Abstract class for writers.
+/**
+
+Usage:
+\code
+  using namespace Json;
+  void writeToStdout(StreamWriter::Factory const& factory, Value const& value) {
+    std::unique_ptr<StreamWriter> const writer(
+      factory.newStreamWriter());
+    writer->write(value, &std::cout);
+    std::cout << std::endl;  // add lf and flush
+  }
+\endcode
+*/
+class JSON_API StreamWriter {
+protected:
+  OStream* sout_; // not owned; will not delete
+public:
+  StreamWriter();
+  virtual ~StreamWriter();
+  /** Write Value into document as configured in sub-class.
+      Do not take ownership of sout, but maintain a reference during function.
+      \pre sout != NULL
+      \return zero on success (For now, we always return zero, so check the
+     stream instead.) \throw std::exception possibly, depending on configuration
+   */
+  virtual int write(Value const& root, OStream* sout) = 0;
+
+  /** \brief A simple abstract factory.
+   */
+  class JSON_API Factory {
+  public:
+    virtual ~Factory();
+    /** \brief Allocate a CharReader via operator new().
+     * \throw std::exception if something goes wrong (e.g. invalid settings)
+     */
+    virtual StreamWriter* newStreamWriter() const = 0;
+  }; // Factory
+};   // StreamWriter
+
+/** \brief Write into stringstream, then return string, for convenience.
+ * A StreamWriter will be created from the factory, used, and then deleted.
+ */
+String JSON_API writeString(StreamWriter::Factory const& factory,
+                            Value const& root);
+
+/** \brief Build a StreamWriter implementation.
+
+Usage:
+\code
+  using namespace Json;
+  Value value = ...;
+  StreamWriterBuilder builder;
+  builder["commentStyle"] = "None";
+  builder["indentation"] = "   ";  // or whatever you like
+  std::unique_ptr<Json::StreamWriter> writer(
+      builder.newStreamWriter());
+  writer->write(value, &std::cout);
+  std::cout << std::endl;  // add lf and flush
+\endcode
+*/
+class JSON_API StreamWriterBuilder : public StreamWriter::Factory {
+public:
+  // Note: We use a Json::Value so that we can add data-members to this class
+  // without a major version bump.
+  /** Configuration of this builder.
+    Available settings (case-sensitive):
+    - "commentStyle": "None" or "All"
+    - "indentation":  "<anything>".
+      - Setting this to an empty string also omits newline characters.
+    - "enableYAMLCompatibility": false or true
+      - slightly change the whitespace around colons
+    - "dropNullPlaceholders": false or true
+      - Drop the "null" string from the writer's output for nullValues.
+        Strictly speaking, this is not valid JSON. But when the output is being
+        fed to a browser's JavaScript, it makes for smaller output and the
+        browser can handle the output just fine.
+    - "useSpecialFloats": false or true
+      - If true, outputs non-finite floating point values in the following way:
+        NaN values as "NaN", positive infinity as "Infinity", and negative
+    infinity as "-Infinity".
+    - "precision": int
+      - Number of precision digits for formatting of real values.
+    - "precisionType": "significant"(default) or "decimal"
+      - Type of precision for formatting of real values.
+
+    You can examine 'settings_` yourself
+    to see the defaults. You can also write and read them just like any
+    JSON Value.
+    \sa setDefaults()
     */
-   class JSON_API Writer
-   {
-   public:
-      virtual ~Writer();
+  Json::Value settings_;
 
-      virtual std::string write( const Value &root ) = 0;
-   };
+  StreamWriterBuilder();
+  ~StreamWriterBuilder() override;
 
-   /** \brief Outputs a Value in <a HREF="http://www.json.org">JSON</a> format without formatting (not human friendly).
-    *
-    * The JSON document is written in a single line. It is not intended for 'human' consumption,
-    * but may be usefull to support feature such as RPC where bandwith is limited.
-    * \sa Reader, Value
-    */
-   class JSON_API FastWriter : public Writer
-   {
-   public:
-      FastWriter();
-      virtual ~FastWriter(){}
+  /**
+   * \throw std::exception if something goes wrong (e.g. invalid settings)
+   */
+  StreamWriter* newStreamWriter() const override;
 
-      void enableYAMLCompatibility();
+  /** \return true if 'settings' are legal and consistent;
+   *   otherwise, indicate bad settings via 'invalid'.
+   */
+  bool validate(Json::Value* invalid) const;
+  /** A simple way to update a specific setting.
+   */
+  Value& operator[](const String& key);
 
-   public: // overridden from Writer
-      virtual std::string write( const Value &root );
+  /** Called by ctor, but you can use this to reset settings_.
+   * \pre 'settings' != NULL (but Json::null is fine)
+   * \remark Defaults:
+   * \snippet src/lib_json/json_writer.cpp StreamWriterBuilderDefaults
+   */
+  static void setDefaults(Json::Value* settings);
+};
 
-   private:
-      void writeValue( const Value &value );
+/** \brief Abstract class for writers.
+ * \deprecated Use StreamWriter. (And really, this is an implementation detail.)
+ */
+class JSONCPP_DEPRECATED("Use StreamWriter instead") JSON_API Writer {
+public:
+  virtual ~Writer();
 
-      std::string document_;
-      bool yamlCompatiblityEnabled_;
-   };
+  virtual String write(const Value& root) = 0;
+};
 
-   /** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a human friendly way.
-    *
-    * The rules for line break and indent are as follow:
-    * - Object value:
-    *     - if empty then print {} without indent and line break
-    *     - if not empty the print '{', line break & indent, print one value per line
-    *       and then unindent and line break and print '}'.
-    * - Array value:
-    *     - if empty then print [] without indent and line break
-    *     - if the array contains no object value, empty array or some other value types,
-    *       and all the values fit on one lines, then print the array on a single line.
-    *     - otherwise, it the values do not fit on one line, or the array contains
-    *       object or non empty array, then print one value per line.
-    *
-    * If the Value have comments then they are outputed according to their #CommentPlacement.
-    *
-    * \sa Reader, Value, Value::setComment()
-    */
-   class JSON_API StyledWriter: public Writer
-   {
-   public:
-      StyledWriter();
-      virtual ~StyledWriter(){}
+/** \brief Outputs a Value in <a HREF="http://www.json.org">JSON</a> format
+ *without formatting (not human friendly).
+ *
+ * The JSON document is written in a single line. It is not intended for 'human'
+ *consumption,
+ * but may be useful to support feature such as RPC where bandwidth is limited.
+ * \sa Reader, Value
+ * \deprecated Use StreamWriterBuilder.
+ */
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4996) // Deriving from deprecated class
+#endif
+class JSONCPP_DEPRECATED("Use StreamWriterBuilder instead") JSON_API FastWriter
+    : public Writer {
+public:
+  FastWriter();
+  ~FastWriter() override = default;
 
-   public: // overridden from Writer
-      /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
-       * \param root Value to serialize.
-       * \return String containing the JSON document that represents the root value.
-       */
-      virtual std::string write( const Value &root );
+  void enableYAMLCompatibility();
 
-   private:
-      void writeValue( const Value &value );
-      void writeArrayValue( const Value &value );
-      bool isMultineArray( const Value &value );
-      void pushValue( const std::string &value );
-      void writeIndent();
-      void writeWithIndent( const std::string &value );
-      void indent();
-      void unindent();
-      void writeCommentBeforeValue( const Value &root );
-      void writeCommentAfterValueOnSameLine( const Value &root );
-      bool hasCommentForValue( const Value &value );
-      static std::string normalizeEOL( const std::string &text );
+  /** \brief Drop the "null" string from the writer's output for nullValues.
+   * Strictly speaking, this is not valid JSON. But when the output is being
+   * fed to a browser's JavaScript, it makes for smaller output and the
+   * browser can handle the output just fine.
+   */
+  void dropNullPlaceholders();
 
-      typedef std::vector<std::string> ChildValues;
+  void omitEndingLineFeed();
 
-      ChildValues childValues_;
-      std::string document_;
-      std::string indentString_;
-      int rightMargin_;
-      int indentSize_;
-      bool addChildValues_;
-   };
+public: // overridden from Writer
+  String write(const Value& root) override;
 
-   /** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a human friendly way,
-        to a stream rather than to a string.
-    *
-    * The rules for line break and indent are as follow:
-    * - Object value:
-    *     - if empty then print {} without indent and line break
-    *     - if not empty the print '{', line break & indent, print one value per line
-    *       and then unindent and line break and print '}'.
-    * - Array value:
-    *     - if empty then print [] without indent and line break
-    *     - if the array contains no object value, empty array or some other value types,
-    *       and all the values fit on one lines, then print the array on a single line.
-    *     - otherwise, it the values do not fit on one line, or the array contains
-    *       object or non empty array, then print one value per line.
-    *
-    * If the Value have comments then they are outputed according to their #CommentPlacement.
-    *
-    * \param indentation Each level will be indented by this amount extra.
-    * \sa Reader, Value, Value::setComment()
-    */
-   class JSON_API StyledStreamWriter
-   {
-   public:
-      StyledStreamWriter( std::string indentation="\t" );
-      ~StyledStreamWriter(){}
+private:
+  void writeValue(const Value& value);
 
-   public:
-      /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
-       * \param out Stream to write to. (Can be ostringstream, e.g.)
-       * \param root Value to serialize.
-       * \note There is no point in deriving from Writer, since write() should not return a value.
-       */
-      void write( std::ostream &out, const Value &root );
+  String document_;
+  bool yamlCompatibilityEnabled_{false};
+  bool dropNullPlaceholders_{false};
+  bool omitEndingLineFeed_{false};
+};
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
 
-   private:
-      void writeValue( const Value &value );
-      void writeArrayValue( const Value &value );
-      bool isMultineArray( const Value &value );
-      void pushValue( const std::string &value );
-      void writeIndent();
-      void writeWithIndent( const std::string &value );
-      void indent();
-      void unindent();
-      void writeCommentBeforeValue( const Value &root );
-      void writeCommentAfterValueOnSameLine( const Value &root );
-      bool hasCommentForValue( const Value &value );
-      static std::string normalizeEOL( const std::string &text );
+/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a
+ *human friendly way.
+ *
+ * The rules for line break and indent are as follow:
+ * - Object value:
+ *     - if empty then print {} without indent and line break
+ *     - if not empty the print '{', line break & indent, print one value per
+ *line
+ *       and then unindent and line break and print '}'.
+ * - Array value:
+ *     - if empty then print [] without indent and line break
+ *     - if the array contains no object value, empty array or some other value
+ *types,
+ *       and all the values fit on one lines, then print the array on a single
+ *line.
+ *     - otherwise, it the values do not fit on one line, or the array contains
+ *       object or non empty array, then print one value per line.
+ *
+ * If the Value have comments then they are outputed according to their
+ *#CommentPlacement.
+ *
+ * \sa Reader, Value, Value::setComment()
+ * \deprecated Use StreamWriterBuilder.
+ */
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4996) // Deriving from deprecated class
+#endif
+class JSONCPP_DEPRECATED("Use StreamWriterBuilder instead") JSON_API
+    StyledWriter : public Writer {
+public:
+  StyledWriter();
+  ~StyledWriter() override = default;
 
-      typedef std::vector<std::string> ChildValues;
+public: // overridden from Writer
+  /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
+   * \param root Value to serialize.
+   * \return String containing the JSON document that represents the root value.
+   */
+  String write(const Value& root) override;
 
-      ChildValues childValues_;
-      std::ostream* document_;
-      std::string indentString_;
-      int rightMargin_;
-      std::string indentation_;
-      bool addChildValues_;
-   };
+private:
+  void writeValue(const Value& value);
+  void writeArrayValue(const Value& value);
+  bool isMultilineArray(const Value& value);
+  void pushValue(const String& value);
+  void writeIndent();
+  void writeWithIndent(const String& value);
+  void indent();
+  void unindent();
+  void writeCommentBeforeValue(const Value& root);
+  void writeCommentAfterValueOnSameLine(const Value& root);
+  static bool hasCommentForValue(const Value& value);
+  static String normalizeEOL(const String& text);
 
-# if defined(JSON_HAS_INT64)
-   std::string JSON_API valueToString( Int value );
-   std::string JSON_API valueToString( UInt value );
-# endif // if defined(JSON_HAS_INT64)
-   std::string JSON_API valueToString( LargestInt value );
-   std::string JSON_API valueToString( LargestUInt value );
-   std::string JSON_API valueToString( double value );
-   std::string JSON_API valueToString( bool value );
-   std::string JSON_API valueToQuotedString( const char *value );
+  typedef std::vector<String> ChildValues;
 
-   /// \brief Output using the StyledStreamWriter.
-   /// \see Json::operator>>()
-   std::ostream& operator<<( std::ostream&, const Value &root );
+  ChildValues childValues_;
+  String document_;
+  String indentString_;
+  unsigned int rightMargin_{74};
+  unsigned int indentSize_{3};
+  bool addChildValues_{false};
+};
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a
+ human friendly way,
+     to a stream rather than to a string.
+ *
+ * The rules for line break and indent are as follow:
+ * - Object value:
+ *     - if empty then print {} without indent and line break
+ *     - if not empty the print '{', line break & indent, print one value per
+ line
+ *       and then unindent and line break and print '}'.
+ * - Array value:
+ *     - if empty then print [] without indent and line break
+ *     - if the array contains no object value, empty array or some other value
+ types,
+ *       and all the values fit on one lines, then print the array on a single
+ line.
+ *     - otherwise, it the values do not fit on one line, or the array contains
+ *       object or non empty array, then print one value per line.
+ *
+ * If the Value have comments then they are outputed according to their
+ #CommentPlacement.
+ *
+ * \sa Reader, Value, Value::setComment()
+ * \deprecated Use StreamWriterBuilder.
+ */
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4996) // Deriving from deprecated class
+#endif
+class JSONCPP_DEPRECATED("Use StreamWriterBuilder instead") JSON_API
+    StyledStreamWriter {
+public:
+  /**
+   * \param indentation Each level will be indented by this amount extra.
+   */
+  StyledStreamWriter(String indentation = "\t");
+  ~StyledStreamWriter() = default;
+
+public:
+  /** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
+   * \param out Stream to write to. (Can be ostringstream, e.g.)
+   * \param root Value to serialize.
+   * \note There is no point in deriving from Writer, since write() should not
+   * return a value.
+   */
+  void write(OStream& out, const Value& root);
+
+private:
+  void writeValue(const Value& value);
+  void writeArrayValue(const Value& value);
+  bool isMultilineArray(const Value& value);
+  void pushValue(const String& value);
+  void writeIndent();
+  void writeWithIndent(const String& value);
+  void indent();
+  void unindent();
+  void writeCommentBeforeValue(const Value& root);
+  void writeCommentAfterValueOnSameLine(const Value& root);
+  static bool hasCommentForValue(const Value& value);
+  static String normalizeEOL(const String& text);
+
+  typedef std::vector<String> ChildValues;
+
+  ChildValues childValues_;
+  OStream* document_;
+  String indentString_;
+  unsigned int rightMargin_{74};
+  String indentation_;
+  bool addChildValues_ : 1;
+  bool indented_ : 1;
+};
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+#if defined(JSON_HAS_INT64)
+String JSON_API valueToString(Int value);
+String JSON_API valueToString(UInt value);
+#endif // if defined(JSON_HAS_INT64)
+String JSON_API valueToString(LargestInt value);
+String JSON_API valueToString(LargestUInt value);
+String JSON_API
+valueToString(double value,
+              unsigned int precision = Value::defaultRealPrecision,
+              PrecisionType precisionType = PrecisionType::significantDigits);
+String JSON_API valueToString(bool value);
+String JSON_API valueToQuotedString(const char* value);
+
+/// \brief Output using the StyledStreamWriter.
+/// \see Json::operator>>()
+JSON_API OStream& operator<<(OStream&, const Value& root);
 
 } // namespace Json
 
+#pragma pack(pop)
 
+#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
+#pragma warning(pop)
+#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
 
 #endif // JSON_WRITER_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/jsoncpp.sln b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/jsoncpp.sln
deleted file mode 100644
index dd2f91b..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/jsoncpp.sln
+++ /dev/null
@@ -1,46 +0,0 @@
-Microsoft Visual Studio Solution File, Format Version 8.00
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lib_json", "lib_json.vcproj", "{B84F7231-16CE-41D8-8C08-7B523FF4225B}"
-	ProjectSection(ProjectDependencies) = postProject
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jsontest", "jsontest.vcproj", "{25AF2DD2-D396-4668-B188-488C33B8E620}"
-	ProjectSection(ProjectDependencies) = postProject
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B} = {B84F7231-16CE-41D8-8C08-7B523FF4225B}
-	EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_lib_json", "test_lib_json.vcproj", "{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}"
-	ProjectSection(ProjectDependencies) = postProject
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B} = {B84F7231-16CE-41D8-8C08-7B523FF4225B}
-	EndProjectSection
-EndProject
-Global
-	GlobalSection(SolutionConfiguration) = preSolution
-		Debug = Debug
-		dummy = dummy
-		Release = Release
-	EndGlobalSection
-	GlobalSection(ProjectConfiguration) = postSolution
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B}.Debug.ActiveCfg = Debug|Win32
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B}.Debug.Build.0 = Debug|Win32
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B}.dummy.ActiveCfg = dummy|Win32
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B}.dummy.Build.0 = dummy|Win32
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B}.Release.ActiveCfg = Release|Win32
-		{B84F7231-16CE-41D8-8C08-7B523FF4225B}.Release.Build.0 = Release|Win32
-		{25AF2DD2-D396-4668-B188-488C33B8E620}.Debug.ActiveCfg = Debug|Win32
-		{25AF2DD2-D396-4668-B188-488C33B8E620}.Debug.Build.0 = Debug|Win32
-		{25AF2DD2-D396-4668-B188-488C33B8E620}.dummy.ActiveCfg = Debug|Win32
-		{25AF2DD2-D396-4668-B188-488C33B8E620}.dummy.Build.0 = Debug|Win32
-		{25AF2DD2-D396-4668-B188-488C33B8E620}.Release.ActiveCfg = Release|Win32
-		{25AF2DD2-D396-4668-B188-488C33B8E620}.Release.Build.0 = Release|Win32
-		{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}.Debug.ActiveCfg = Debug|Win32
-		{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}.Debug.Build.0 = Debug|Win32
-		{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}.dummy.ActiveCfg = Debug|Win32
-		{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}.dummy.Build.0 = Debug|Win32
-		{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}.Release.ActiveCfg = Release|Win32
-		{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}.Release.Build.0 = Release|Win32
-	EndGlobalSection
-	GlobalSection(ExtensibilityGlobals) = postSolution
-	EndGlobalSection
-	GlobalSection(ExtensibilityAddIns) = postSolution
-	EndGlobalSection
-EndGlobal
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/jsontest.vcproj b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/jsontest.vcproj
deleted file mode 100644
index 562c71f..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/jsontest.vcproj
+++ /dev/null
@@ -1,119 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="7.10"
-	Name="jsontest"
-	ProjectGUID="{25AF2DD2-D396-4668-B188-488C33B8E620}"
-	Keyword="Win32Proj">
-	<Platforms>
-		<Platform
-			Name="Win32"/>
-	</Platforms>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			OutputDirectory="../../build/vs71/debug/jsontest"
-			IntermediateDirectory="../../build/vs71/debug/jsontest"
-			ConfigurationType="1"
-			CharacterSet="2">
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="0"
-				AdditionalIncludeDirectories="../../include"
-				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
-				MinimalRebuild="TRUE"
-				BasicRuntimeChecks="3"
-				RuntimeLibrary="1"
-				UsePrecompiledHeader="0"
-				WarningLevel="3"
-				Detect64BitPortabilityProblems="TRUE"
-				DebugInformationFormat="4"/>
-			<Tool
-				Name="VCCustomBuildTool"/>
-			<Tool
-				Name="VCLinkerTool"
-				OutputFile="$(OutDir)/jsontest.exe"
-				LinkIncremental="2"
-				GenerateDebugInformation="TRUE"
-				ProgramDatabaseFile="$(OutDir)/jsontest.pdb"
-				SubSystem="1"
-				TargetMachine="1"/>
-			<Tool
-				Name="VCMIDLTool"/>
-			<Tool
-				Name="VCPostBuildEventTool"/>
-			<Tool
-				Name="VCPreBuildEventTool"/>
-			<Tool
-				Name="VCPreLinkEventTool"/>
-			<Tool
-				Name="VCResourceCompilerTool"/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"/>
-			<Tool
-				Name="VCWebDeploymentTool"/>
-			<Tool
-				Name="VCManagedWrapperGeneratorTool"/>
-			<Tool
-				Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			OutputDirectory="../../build/vs71/release/jsontest"
-			IntermediateDirectory="../../build/vs71/release/jsontest"
-			ConfigurationType="1"
-			CharacterSet="2">
-			<Tool
-				Name="VCCLCompilerTool"
-				AdditionalIncludeDirectories="../../include"
-				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
-				RuntimeLibrary="0"
-				UsePrecompiledHeader="0"
-				WarningLevel="3"
-				Detect64BitPortabilityProblems="TRUE"
-				DebugInformationFormat="3"/>
-			<Tool
-				Name="VCCustomBuildTool"/>
-			<Tool
-				Name="VCLinkerTool"
-				OutputFile="$(OutDir)/jsontest.exe"
-				LinkIncremental="1"
-				GenerateDebugInformation="TRUE"
-				SubSystem="1"
-				OptimizeReferences="2"
-				EnableCOMDATFolding="2"
-				TargetMachine="1"/>
-			<Tool
-				Name="VCMIDLTool"/>
-			<Tool
-				Name="VCPostBuildEventTool"/>
-			<Tool
-				Name="VCPreBuildEventTool"/>
-			<Tool
-				Name="VCPreLinkEventTool"/>
-			<Tool
-				Name="VCResourceCompilerTool"/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"/>
-			<Tool
-				Name="VCWebDeploymentTool"/>
-			<Tool
-				Name="VCManagedWrapperGeneratorTool"/>
-			<Tool
-				Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\src\jsontestrunner\main.cpp">
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/lib_json.vcproj b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/lib_json.vcproj
deleted file mode 100644
index 1aa5978..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/lib_json.vcproj
+++ /dev/null
@@ -1,214 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="7.10"
-	Name="lib_json"
-	ProjectGUID="{B84F7231-16CE-41D8-8C08-7B523FF4225B}"
-	Keyword="Win32Proj">
-	<Platforms>
-		<Platform
-			Name="Win32"/>
-	</Platforms>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			OutputDirectory="../../build/vs71/debug/lib_json"
-			IntermediateDirectory="../../build/vs71/debug/lib_json"
-			ConfigurationType="4"
-			CharacterSet="2">
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="0"
-				AdditionalIncludeDirectories="../../include"
-				PreprocessorDefinitions="WIN32;_DEBUG;_LIB"
-				StringPooling="TRUE"
-				MinimalRebuild="TRUE"
-				BasicRuntimeChecks="3"
-				RuntimeLibrary="1"
-				EnableFunctionLevelLinking="TRUE"
-				DisableLanguageExtensions="TRUE"
-				ForceConformanceInForLoopScope="FALSE"
-				RuntimeTypeInfo="TRUE"
-				UsePrecompiledHeader="0"
-				WarningLevel="3"
-				Detect64BitPortabilityProblems="TRUE"
-				DebugInformationFormat="4"/>
-			<Tool
-				Name="VCCustomBuildTool"/>
-			<Tool
-				Name="VCLibrarianTool"
-				OutputFile="$(OutDir)/json_vc71_libmtd.lib"/>
-			<Tool
-				Name="VCMIDLTool"/>
-			<Tool
-				Name="VCPostBuildEventTool"/>
-			<Tool
-				Name="VCPreBuildEventTool"/>
-			<Tool
-				Name="VCPreLinkEventTool"/>
-			<Tool
-				Name="VCResourceCompilerTool"/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"/>
-			<Tool
-				Name="VCManagedWrapperGeneratorTool"/>
-			<Tool
-				Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			OutputDirectory="../../build/vs71/release/lib_json"
-			IntermediateDirectory="../../build/vs71/release/lib_json"
-			ConfigurationType="4"
-			CharacterSet="2"
-			WholeProgramOptimization="TRUE">
-			<Tool
-				Name="VCCLCompilerTool"
-				GlobalOptimizations="TRUE"
-				EnableIntrinsicFunctions="TRUE"
-				AdditionalIncludeDirectories="../../include"
-				PreprocessorDefinitions="WIN32;NDEBUG;_LIB"
-				StringPooling="TRUE"
-				RuntimeLibrary="0"
-				EnableFunctionLevelLinking="TRUE"
-				DisableLanguageExtensions="TRUE"
-				ForceConformanceInForLoopScope="FALSE"
-				RuntimeTypeInfo="TRUE"
-				UsePrecompiledHeader="0"
-				AssemblerOutput="4"
-				WarningLevel="3"
-				Detect64BitPortabilityProblems="TRUE"
-				DebugInformationFormat="3"/>
-			<Tool
-				Name="VCCustomBuildTool"/>
-			<Tool
-				Name="VCLibrarianTool"
-				OutputFile="$(OutDir)/json_vc71_libmt.lib"/>
-			<Tool
-				Name="VCMIDLTool"/>
-			<Tool
-				Name="VCPostBuildEventTool"/>
-			<Tool
-				Name="VCPreBuildEventTool"/>
-			<Tool
-				Name="VCPreLinkEventTool"/>
-			<Tool
-				Name="VCResourceCompilerTool"/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"/>
-			<Tool
-				Name="VCManagedWrapperGeneratorTool"/>
-			<Tool
-				Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
-		</Configuration>
-		<Configuration
-			Name="dummy|Win32"
-			OutputDirectory="$(ConfigurationName)"
-			IntermediateDirectory="$(ConfigurationName)"
-			ConfigurationType="2"
-			CharacterSet="2"
-			WholeProgramOptimization="TRUE">
-			<Tool
-				Name="VCCLCompilerTool"
-				GlobalOptimizations="TRUE"
-				EnableIntrinsicFunctions="TRUE"
-				AdditionalIncludeDirectories="../../include"
-				PreprocessorDefinitions="WIN32;NDEBUG;_LIB"
-				StringPooling="TRUE"
-				RuntimeLibrary="4"
-				EnableFunctionLevelLinking="TRUE"
-				DisableLanguageExtensions="TRUE"
-				ForceConformanceInForLoopScope="FALSE"
-				RuntimeTypeInfo="TRUE"
-				UsePrecompiledHeader="0"
-				AssemblerOutput="4"
-				WarningLevel="3"
-				Detect64BitPortabilityProblems="TRUE"
-				DebugInformationFormat="3"/>
-			<Tool
-				Name="VCCustomBuildTool"/>
-			<Tool
-				Name="VCLinkerTool"
-				GenerateDebugInformation="TRUE"
-				SubSystem="2"
-				OptimizeReferences="2"
-				EnableCOMDATFolding="2"
-				TargetMachine="1"/>
-			<Tool
-				Name="VCMIDLTool"/>
-			<Tool
-				Name="VCPostBuildEventTool"/>
-			<Tool
-				Name="VCPreBuildEventTool"/>
-			<Tool
-				Name="VCPreLinkEventTool"/>
-			<Tool
-				Name="VCResourceCompilerTool"/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"/>
-			<Tool
-				Name="VCWebDeploymentTool"/>
-			<Tool
-				Name="VCManagedWrapperGeneratorTool"/>
-			<Tool
-				Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\include\json\autolink.h">
-		</File>
-		<File
-			RelativePath="..\..\include\json\config.h">
-		</File>
-		<File
-			RelativePath="..\..\include\json\features.h">
-		</File>
-		<File
-			RelativePath="..\..\include\json\forwards.h">
-		</File>
-		<File
-			RelativePath="..\..\include\json\json.h">
-		</File>
-		<File
-			RelativePath="..\..\src\lib_json\json_batchallocator.h">
-		</File>
-		<File
-			RelativePath="..\..\src\lib_json\json_internalarray.inl">
-		</File>
-		<File
-			RelativePath="..\..\src\lib_json\json_internalmap.inl">
-		</File>
-		<File
-			RelativePath="..\..\src\lib_json\json_reader.cpp">
-		</File>
-		<File
-			RelativePath="..\..\src\lib_json\json_value.cpp">
-		</File>
-		<File
-			RelativePath="..\..\src\lib_json\json_valueiterator.inl">
-		</File>
-		<File
-			RelativePath="..\..\src\lib_json\json_writer.cpp">
-		</File>
-		<File
-			RelativePath="..\..\include\json\reader.h">
-		</File>
-		<File
-			RelativePath="..\..\include\json\value.h">
-		</File>
-		<File
-			RelativePath="..\..\include\json\writer.h">
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/test_lib_json.vcproj b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/test_lib_json.vcproj
deleted file mode 100644
index 9ebb986..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makefiles/vs71/test_lib_json.vcproj
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0" encoding="Windows-1252"?>
-<VisualStudioProject
-	ProjectType="Visual C++"
-	Version="7.10"
-	Name="test_lib_json"
-	ProjectGUID="{B7A96B78-2782-40D2-8F37-A2DEF2B9C26D}"
-	RootNamespace="test_lib_json"
-	Keyword="Win32Proj">
-	<Platforms>
-		<Platform
-			Name="Win32"/>
-	</Platforms>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			OutputDirectory="../../build/vs71/debug/test_lib_json"
-			IntermediateDirectory="../../build/vs71/debug/test_lib_json"
-			ConfigurationType="1"
-			CharacterSet="2">
-			<Tool
-				Name="VCCLCompilerTool"
-				Optimization="0"
-				AdditionalIncludeDirectories="../../include"
-				PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
-				MinimalRebuild="TRUE"
-				BasicRuntimeChecks="3"
-				RuntimeLibrary="1"
-				UsePrecompiledHeader="0"
-				WarningLevel="3"
-				Detect64BitPortabilityProblems="TRUE"
-				DebugInformationFormat="4"/>
-			<Tool
-				Name="VCCustomBuildTool"/>
-			<Tool
-				Name="VCLinkerTool"
-				OutputFile="$(OutDir)/test_lib_json.exe"
-				LinkIncremental="2"
-				GenerateDebugInformation="TRUE"
-				ProgramDatabaseFile="$(OutDir)/test_lib_json.pdb"
-				SubSystem="1"
-				TargetMachine="1"/>
-			<Tool
-				Name="VCMIDLTool"/>
-			<Tool
-				Name="VCPostBuildEventTool"
-				Description="Running all unit tests"
-				CommandLine="$(TargetPath)"/>
-			<Tool
-				Name="VCPreBuildEventTool"/>
-			<Tool
-				Name="VCPreLinkEventTool"/>
-			<Tool
-				Name="VCResourceCompilerTool"/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"/>
-			<Tool
-				Name="VCWebDeploymentTool"/>
-			<Tool
-				Name="VCManagedWrapperGeneratorTool"/>
-			<Tool
-				Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			OutputDirectory="../../build/vs71/release/test_lib_json"
-			IntermediateDirectory="../../build/vs71/release/test_lib_json"
-			ConfigurationType="1"
-			CharacterSet="2">
-			<Tool
-				Name="VCCLCompilerTool"
-				AdditionalIncludeDirectories="../../include"
-				PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
-				RuntimeLibrary="0"
-				UsePrecompiledHeader="0"
-				WarningLevel="3"
-				Detect64BitPortabilityProblems="TRUE"
-				DebugInformationFormat="3"/>
-			<Tool
-				Name="VCCustomBuildTool"/>
-			<Tool
-				Name="VCLinkerTool"
-				OutputFile="$(OutDir)/test_lib_json.exe"
-				LinkIncremental="1"
-				GenerateDebugInformation="TRUE"
-				SubSystem="1"
-				OptimizeReferences="2"
-				EnableCOMDATFolding="2"
-				TargetMachine="1"/>
-			<Tool
-				Name="VCMIDLTool"/>
-			<Tool
-				Name="VCPostBuildEventTool"
-				Description="Running all unit tests"
-				CommandLine="$(TargetPath)"/>
-			<Tool
-				Name="VCPreBuildEventTool"/>
-			<Tool
-				Name="VCPreLinkEventTool"/>
-			<Tool
-				Name="VCResourceCompilerTool"/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"/>
-			<Tool
-				Name="VCWebDeploymentTool"/>
-			<Tool
-				Name="VCManagedWrapperGeneratorTool"/>
-			<Tool
-				Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<File
-			RelativePath="..\..\src\test_lib_json\jsontest.cpp">
-		</File>
-		<File
-			RelativePath="..\..\src\test_lib_json\jsontest.h">
-		</File>
-		<File
-			RelativePath="..\..\src\test_lib_json\main.cpp">
-		</File>
-	</Files>
-	<Globals>
-	</Globals>
-</VisualStudioProject>
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makerelease.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makerelease.py
index 6b8eec3..bc716cb 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makerelease.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/makerelease.py
@@ -1,3 +1,8 @@
+# Copyright 2010 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
 """Tag the sandbox for release, make source and doc tarballs.
 
 Requires Python 2.6
@@ -10,7 +15,12 @@
 
 Example of invocation when doing a release:
 python makerelease.py 0.5.0 0.6.0-dev
+
+Note: This was for Subversion. Now that we are in GitHub, we do not
+need to build versioned tarballs anymore, so makerelease.py is defunct.
 """
+
+from __future__ import print_function
 import os.path
 import subprocess
 import sys
@@ -30,140 +40,140 @@
 SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
 SOURCEFORGE_PROJECT = 'jsoncpp'
 
-def set_version( version ):
+def set_version(version):
     with open('version','wb') as f:
-        f.write( version.strip() )
+        f.write(version.strip())
 
-def rmdir_if_exist( dir_path ):
-    if os.path.isdir( dir_path ):
-        shutil.rmtree( dir_path )
+def rmdir_if_exist(dir_path):
+    if os.path.isdir(dir_path):
+        shutil.rmtree(dir_path)
 
 class SVNError(Exception):
     pass
 
-def svn_command( command, *args ):
+def svn_command(command, *args):
     cmd = ['svn', '--non-interactive', command] + list(args)
-    print 'Running:', ' '.join( cmd )
-    process = subprocess.Popen( cmd,
+    print('Running:', ' '.join(cmd))
+    process = subprocess.Popen(cmd,
                                 stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT )
+                                stderr=subprocess.STDOUT)
     stdout = process.communicate()[0]
     if process.returncode:
-        error = SVNError( 'SVN command failed:\n' + stdout )
+        error = SVNError('SVN command failed:\n' + stdout)
         error.returncode = process.returncode
         raise error
     return stdout
 
 def check_no_pending_commit():
     """Checks that there is no pending commit in the sandbox."""
-    stdout = svn_command( 'status', '--xml' )
-    etree = ElementTree.fromstring( stdout )
+    stdout = svn_command('status', '--xml')
+    etree = ElementTree.fromstring(stdout)
     msg = []
-    for entry in etree.getiterator( 'entry' ):
+    for entry in etree.getiterator('entry'):
         path = entry.get('path')
         status = entry.find('wc-status').get('item')
         if status != 'unversioned' and path != 'version':
-            msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
+            msg.append('File "%s" has pending change (status="%s")' % (path, status))
     if msg:
-        msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
-    return '\n'.join( msg )
+        msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!')
+    return '\n'.join(msg)
 
-def svn_join_url( base_url, suffix ):
+def svn_join_url(base_url, suffix):
     if not base_url.endswith('/'):
         base_url += '/'
     if suffix.startswith('/'):
         suffix = suffix[1:]
     return base_url + suffix
 
-def svn_check_if_tag_exist( tag_url ):
+def svn_check_if_tag_exist(tag_url):
     """Checks if a tag exist.
     Returns: True if the tag exist, False otherwise.
     """
     try:
-        list_stdout = svn_command( 'list', tag_url )
-    except SVNError, e:
+        list_stdout = svn_command('list', tag_url)
+    except SVNError as e:
         if e.returncode != 1 or not str(e).find('tag_url'):
             raise e
         # otherwise ignore error, meaning tag does not exist
         return False
     return True
 
-def svn_commit( message ):
+def svn_commit(message):
     """Commit the sandbox, providing the specified comment.
     """
-    svn_command( 'ci', '-m', message )
+    svn_command('ci', '-m', message)
 
-def svn_tag_sandbox( tag_url, message ):
+def svn_tag_sandbox(tag_url, message):
     """Makes a tag based on the sandbox revisions.
     """
-    svn_command( 'copy', '-m', message, '.', tag_url )
+    svn_command('copy', '-m', message, '.', tag_url)
 
-def svn_remove_tag( tag_url, message ):
+def svn_remove_tag(tag_url, message):
     """Removes an existing tag.
     """
-    svn_command( 'delete', '-m', message, tag_url )
+    svn_command('delete', '-m', message, tag_url)
 
-def svn_export( tag_url, export_dir ):
+def svn_export(tag_url, export_dir):
     """Exports the tag_url revision to export_dir.
        Target directory, including its parent is created if it does not exist.
        If the directory export_dir exist, it is deleted before export proceed.
     """
-    rmdir_if_exist( export_dir )
-    svn_command( 'export', tag_url, export_dir )
+    rmdir_if_exist(export_dir)
+    svn_command('export', tag_url, export_dir)
 
-def fix_sources_eol( dist_dir ):
+def fix_sources_eol(dist_dir):
     """Set file EOL for tarball distribution.
     """
-    print 'Preparing exported source file EOL for distribution...'
+    print('Preparing exported source file EOL for distribution...')
     prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
-    win_sources = antglob.glob( dist_dir, 
+    win_sources = antglob.glob(dist_dir, 
         includes = '**/*.sln **/*.vcproj',
-        prune_dirs = prune_dirs )
-    unix_sources = antglob.glob( dist_dir,
+        prune_dirs = prune_dirs)
+    unix_sources = antglob.glob(dist_dir,
         includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
         sconscript *.json *.expected AUTHORS LICENSE''',
         excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
-        prune_dirs = prune_dirs )
+        prune_dirs = prune_dirs)
     for path in win_sources:
-        fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
+        fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\r\n')
     for path in unix_sources:
-        fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
+        fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\n')
 
-def download( url, target_path ):
+def download(url, target_path):
     """Download file represented by url to target_path.
     """
-    f = urllib2.urlopen( url )
+    f = urllib2.urlopen(url)
     try:
         data = f.read()
     finally:
         f.close()
-    fout = open( target_path, 'wb' )
+    fout = open(target_path, 'wb')
     try:
-        fout.write( data )
+        fout.write(data)
     finally:
         fout.close()
 
-def check_compile( distcheck_top_dir, platform ):
+def check_compile(distcheck_top_dir, platform):
     cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
-    print 'Running:', ' '.join( cmd )
-    log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
-    flog = open( log_path, 'wb' )
+    print('Running:', ' '.join(cmd))
+    log_path = os.path.join(distcheck_top_dir, 'build-%s.log' % platform)
+    flog = open(log_path, 'wb')
     try:
-        process = subprocess.Popen( cmd,
+        process = subprocess.Popen(cmd,
                                     stdout=flog,
                                     stderr=subprocess.STDOUT,
-                                    cwd=distcheck_top_dir )
+                                    cwd=distcheck_top_dir)
         stdout = process.communicate()[0]
         status = (process.returncode == 0)
     finally:
         flog.close()
     return (status, log_path)
 
-def write_tempfile( content, **kwargs ):
-    fd, path = tempfile.mkstemp( **kwargs )
-    f = os.fdopen( fd, 'wt' )
+def write_tempfile(content, **kwargs):
+    fd, path = tempfile.mkstemp(**kwargs)
+    f = os.fdopen(fd, 'wt')
     try:
-        f.write( content )
+        f.write(content)
     finally:
         f.close()
     return path
@@ -171,34 +181,34 @@
 class SFTPError(Exception):
     pass
 
-def run_sftp_batch( userhost, sftp, batch, retry=0 ):
-    path = write_tempfile( batch, suffix='.sftp', text=True )
+def run_sftp_batch(userhost, sftp, batch, retry=0):
+    path = write_tempfile(batch, suffix='.sftp', text=True)
     # psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
     cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
     error = None
-    for retry_index in xrange(0, max(1,retry)):
+    for retry_index in range(0, max(1,retry)):
         heading = retry_index == 0 and 'Running:' or 'Retrying:'
-        print heading, ' '.join( cmd )
-        process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+        print(heading, ' '.join(cmd))
+        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
         stdout = process.communicate()[0]
         if process.returncode != 0:
-            error = SFTPError( 'SFTP batch failed:\n' + stdout )
+            error = SFTPError('SFTP batch failed:\n' + stdout)
         else:
             break
     if error:
         raise error
     return stdout
 
-def sourceforge_web_synchro( sourceforge_project, doc_dir,
-                             user=None, sftp='sftp' ):
+def sourceforge_web_synchro(sourceforge_project, doc_dir,
+                             user=None, sftp='sftp'):
     """Notes: does not synchronize sub-directory of doc-dir.
     """
     userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
-    stdout = run_sftp_batch( userhost, sftp, """
+    stdout = run_sftp_batch(userhost, sftp, """
 cd htdocs
 dir
 exit
-""" )
+""")
     existing_paths = set()
     collect = 0
     for line in stdout.split('\n'):
@@ -212,36 +222,36 @@
         elif collect == 2:
             path = line.strip().split()[-1:]
             if path and path[0] not in ('.', '..'):
-                existing_paths.add( path[0] )
-    upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
+                existing_paths.add(path[0])
+    upload_paths = set([os.path.basename(p) for p in antglob.glob(doc_dir)])
     paths_to_remove = existing_paths - upload_paths
     if paths_to_remove:
-        print 'Removing the following file from web:'
-        print '\n'.join( paths_to_remove )
-        stdout = run_sftp_batch( userhost, sftp, """cd htdocs
+        print('Removing the following file from web:')
+        print('\n'.join(paths_to_remove))
+        stdout = run_sftp_batch(userhost, sftp, """cd htdocs
 rm %s
-exit""" % ' '.join(paths_to_remove) )
-    print 'Uploading %d files:' % len(upload_paths)
+exit""" % ' '.join(paths_to_remove))
+    print('Uploading %d files:' % len(upload_paths))
     batch_size = 10
     upload_paths = list(upload_paths)
     start_time = time.time()
-    for index in xrange(0,len(upload_paths),batch_size):
+    for index in range(0,len(upload_paths),batch_size):
         paths = upload_paths[index:index+batch_size]
         file_per_sec = (time.time() - start_time) / (index+1)
         remaining_files = len(upload_paths) - index
         remaining_sec = file_per_sec * remaining_files
-        print '%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec)
-        run_sftp_batch( userhost, sftp, """cd htdocs
+        print('%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec))
+        run_sftp_batch(userhost, sftp, """cd htdocs
 lcd %s
 mput %s
-exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
+exit""" % (doc_dir, ' '.join(paths)), retry=3)
 
-def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
+def sourceforge_release_tarball(sourceforge_project, paths, user=None, sftp='sftp'):
     userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
-    run_sftp_batch( userhost, sftp, """
+    run_sftp_batch(userhost, sftp, """
 mput %s
 exit
-""" % (' '.join(paths),) )
+""" % (' '.join(paths),))
 
 
 def main():
@@ -255,7 +265,7 @@
 
 Must be started in the project top directory.
 
-Warning: --force should only be used when developping/testing the release script.
+Warning: --force should only be used when developing/testing the release script.
 """
     from optparse import OptionParser
     parser = OptionParser(usage=usage)
@@ -282,99 +292,99 @@
     options, args = parser.parse_args()
 
     if len(args) != 2:
-        parser.error( 'release_version missing on command-line.' )
+        parser.error('release_version missing on command-line.')
     release_version = args[0]
     next_version = args[1]
 
     if not options.platforms and not options.no_test:
-        parser.error( 'You must specify either --platform or --no-test option.' )
+        parser.error('You must specify either --platform or --no-test option.')
 
     if options.ignore_pending_commit:
         msg = ''
     else:
         msg = check_no_pending_commit()
     if not msg:
-        print 'Setting version to', release_version
-        set_version( release_version )
-        svn_commit( 'Release ' + release_version )
-        tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
-        if svn_check_if_tag_exist( tag_url ):
+        print('Setting version to', release_version)
+        set_version(release_version)
+        svn_commit('Release ' + release_version)
+        tag_url = svn_join_url(SVN_TAG_ROOT, release_version)
+        if svn_check_if_tag_exist(tag_url):
             if options.retag_release:
-                svn_remove_tag( tag_url, 'Overwriting previous tag' )
+                svn_remove_tag(tag_url, 'Overwriting previous tag')
             else:
-                print 'Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url
-                sys.exit( 1 )
-        svn_tag_sandbox( tag_url, 'Release ' + release_version )
+                print('Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url)
+                sys.exit(1)
+        svn_tag_sandbox(tag_url, 'Release ' + release_version)
 
-        print 'Generated doxygen document...'
+        print('Generated doxygen document...')
 ##        doc_dirname = r'jsoncpp-api-html-0.5.0'
 ##        doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
-        doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
+        doc_tarball_path, doc_dirname = doxybuild.build_doc(options, make_release=True)
         doc_distcheck_dir = 'dist/doccheck'
-        tarball.decompress( doc_tarball_path, doc_distcheck_dir )
-        doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
+        tarball.decompress(doc_tarball_path, doc_distcheck_dir)
+        doc_distcheck_top_dir = os.path.join(doc_distcheck_dir, doc_dirname)
         
         export_dir = 'dist/export'
-        svn_export( tag_url, export_dir )
-        fix_sources_eol( export_dir )
+        svn_export(tag_url, export_dir)
+        fix_sources_eol(export_dir)
         
         source_dir = 'jsoncpp-src-' + release_version
         source_tarball_path = 'dist/%s.tar.gz' % source_dir
-        print 'Generating source tarball to', source_tarball_path
-        tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
+        print('Generating source tarball to', source_tarball_path)
+        tarball.make_tarball(source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir)
 
         amalgamation_tarball_path = 'dist/%s-amalgamation.tar.gz' % source_dir
-        print 'Generating amalgamation source tarball to', amalgamation_tarball_path
+        print('Generating amalgamation source tarball to', amalgamation_tarball_path)
         amalgamation_dir = 'dist/amalgamation'
-        amalgamate.amalgamate_source( export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h' )
+        amalgamate.amalgamate_source(export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h')
         amalgamation_source_dir = 'jsoncpp-src-amalgamation' + release_version
-        tarball.make_tarball( amalgamation_tarball_path, [amalgamation_dir],
-                              amalgamation_dir, prefix_dir=amalgamation_source_dir )
+        tarball.make_tarball(amalgamation_tarball_path, [amalgamation_dir],
+                              amalgamation_dir, prefix_dir=amalgamation_source_dir)
 
         # Decompress source tarball, download and install scons-local
         distcheck_dir = 'dist/distcheck'
         distcheck_top_dir = distcheck_dir + '/' + source_dir
-        print 'Decompressing source tarball to', distcheck_dir
-        rmdir_if_exist( distcheck_dir )
-        tarball.decompress( source_tarball_path, distcheck_dir )
+        print('Decompressing source tarball to', distcheck_dir)
+        rmdir_if_exist(distcheck_dir)
+        tarball.decompress(source_tarball_path, distcheck_dir)
         scons_local_path = 'dist/scons-local.tar.gz'
-        print 'Downloading scons-local to', scons_local_path
-        download( SCONS_LOCAL_URL, scons_local_path )
-        print 'Decompressing scons-local to', distcheck_top_dir
-        tarball.decompress( scons_local_path, distcheck_top_dir )
+        print('Downloading scons-local to', scons_local_path)
+        download(SCONS_LOCAL_URL, scons_local_path)
+        print('Decompressing scons-local to', distcheck_top_dir)
+        tarball.decompress(scons_local_path, distcheck_top_dir)
 
         # Run compilation
-        print 'Compiling decompressed tarball'
+        print('Compiling decompressed tarball')
         all_build_status = True
         for platform in options.platforms.split(','):
-            print 'Testing platform:', platform
-            build_status, log_path = check_compile( distcheck_top_dir, platform )
-            print 'see build log:', log_path
-            print build_status and '=> ok' or '=> FAILED'
+            print('Testing platform:', platform)
+            build_status, log_path = check_compile(distcheck_top_dir, platform)
+            print('see build log:', log_path)
+            print(build_status and '=> ok' or '=> FAILED')
             all_build_status = all_build_status and build_status
         if not build_status:
-            print 'Testing failed on at least one platform, aborting...'
-            svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
+            print('Testing failed on at least one platform, aborting...')
+            svn_remove_tag(tag_url, 'Removing tag due to failed testing')
             sys.exit(1)
         if options.user:
             if not options.no_web:
-                print 'Uploading documentation using user', options.user
-                sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
-                print 'Completed documentation upload'
-            print 'Uploading source and documentation tarballs for release using user', options.user
-            sourceforge_release_tarball( SOURCEFORGE_PROJECT,
+                print('Uploading documentation using user', options.user)
+                sourceforge_web_synchro(SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp)
+                print('Completed documentation upload')
+            print('Uploading source and documentation tarballs for release using user', options.user)
+            sourceforge_release_tarball(SOURCEFORGE_PROJECT,
                                          [source_tarball_path, doc_tarball_path],
-                                         user=options.user, sftp=options.sftp )
-            print 'Source and doc release tarballs uploaded'
+                                         user=options.user, sftp=options.sftp)
+            print('Source and doc release tarballs uploaded')
         else:
-            print 'No upload user specified. Web site and download tarbal were not uploaded.'
-            print 'Tarball can be found at:', doc_tarball_path
+            print('No upload user specified. Web site and download tarball were not uploaded.')
+            print('Tarball can be found at:', doc_tarball_path)
 
         # Set next version number and commit            
-        set_version( next_version )
-        svn_commit( 'Released ' + release_version )
+        set_version(next_version)
+        svn_commit('Released ' + release_version)
     else:
-        sys.stderr.write( msg + '\n' )
+        sys.stderr.write(msg + '\n')
  
 if __name__ == '__main__':
     main()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/meson.build b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/meson.build
new file mode 100644
index 0000000..8de947c
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/meson.build
@@ -0,0 +1,116 @@
+project(
+  'jsoncpp',
+  'cpp',
+  version : '1.9.0',
+  default_options : [
+    'buildtype=release',
+    'cpp_std=c++11',
+    'warning_level=1'],
+  license : 'Public Domain',
+  meson_version : '>= 0.50.0')
+
+jsoncpp_ver_arr = meson.project_version().split('.')
+jsoncpp_major_version = jsoncpp_ver_arr[0]
+jsoncpp_minor_version = jsoncpp_ver_arr[1]
+jsoncpp_patch_version = jsoncpp_ver_arr[2]
+
+jsoncpp_cdata = configuration_data()
+jsoncpp_cdata.set('JSONCPP_VERSION', meson.project_version())
+jsoncpp_cdata.set('JSONCPP_VERSION_MAJOR', jsoncpp_major_version)
+jsoncpp_cdata.set('JSONCPP_VERSION_MINOR', jsoncpp_minor_version)
+jsoncpp_cdata.set('JSONCPP_VERSION_PATCH', jsoncpp_patch_version)
+jsoncpp_cdata.set('JSONCPP_USE_SECURE_MEMORY',0)
+
+jsoncpp_gen_sources = configure_file(
+  input : 'src/lib_json/version.h.in',
+  output : 'version.h',
+  configuration : jsoncpp_cdata,
+  install : true,
+  install_dir : join_paths(get_option('prefix'), get_option('includedir'), 'json')
+)
+
+jsoncpp_headers = [
+  'include/json/allocator.h',
+  'include/json/assertions.h',
+  'include/json/autolink.h',
+  'include/json/config.h',
+  'include/json/features.h',
+  'include/json/forwards.h',
+  'include/json/json.h',
+  'include/json/reader.h',
+  'include/json/value.h',
+  'include/json/writer.h']
+jsoncpp_include_directories = include_directories('include')
+
+install_headers(
+  jsoncpp_headers,
+  subdir : 'json')
+
+if get_option('default_library') == 'shared' and meson.get_compiler('cpp').get_id() == 'msvc'
+  dll_export_flag = '-DJSON_DLL_BUILD'
+  dll_import_flag = '-DJSON_DLL'
+else
+  dll_export_flag = []
+  dll_import_flag = []
+endif
+
+jsoncpp_lib = library(
+  'jsoncpp',
+  [ jsoncpp_gen_sources,
+    jsoncpp_headers,
+    'src/lib_json/json_tool.h',
+    'src/lib_json/json_reader.cpp',
+    'src/lib_json/json_value.cpp',
+    'src/lib_json/json_writer.cpp'],
+  soversion : 21,
+  install : true,
+  include_directories : jsoncpp_include_directories,
+  cpp_args: dll_export_flag)
+
+import('pkgconfig').generate(
+  libraries : jsoncpp_lib,
+  version : meson.project_version(),
+  name : 'jsoncpp',
+  filebase : 'jsoncpp',
+  description : 'A C++ library for interacting with JSON')
+
+# for libraries bundling jsoncpp
+jsoncpp_dep = declare_dependency(
+  include_directories : jsoncpp_include_directories,
+  link_with : jsoncpp_lib,
+  version : meson.project_version(),
+  sources : jsoncpp_gen_sources)
+
+# tests
+python = import('python3').find_python()
+
+jsoncpp_test = executable(
+  'jsoncpp_test',
+  [ 'src/test_lib_json/jsontest.cpp',
+    'src/test_lib_json/jsontest.h',
+    'src/test_lib_json/main.cpp',
+    'src/test_lib_json/fuzz.cpp'],
+  include_directories : jsoncpp_include_directories,
+  link_with : jsoncpp_lib,
+  install : false,
+  cpp_args: dll_import_flag)
+test(
+  'unittest_jsoncpp_test',
+  jsoncpp_test)
+
+jsontestrunner = executable(
+  'jsontestrunner',
+  'src/jsontestrunner/main.cpp',
+  include_directories : jsoncpp_include_directories,
+  link_with : jsoncpp_lib,
+  install : false,
+  cpp_args: dll_import_flag)
+test(
+  'unittest_jsontestrunner',
+  python,
+  args : [
+    '-B',
+    join_paths(meson.current_source_dir(), 'test/runjsontests.py'),
+    jsontestrunner,
+    join_paths(meson.current_source_dir(), 'test/data')]
+  )
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/pkg-config/jsoncpp.pc.in b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/pkg-config/jsoncpp.pc.in
new file mode 100644
index 0000000..dea51f5
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/pkg-config/jsoncpp.pc.in
@@ -0,0 +1,9 @@
+libdir=@CMAKE_INSTALL_FULL_LIBDIR@
+includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
+
+Name: jsoncpp
+Description: A C++ library for interacting with JSON
+Version: @JSONCPP_VERSION@
+URL: https://github.com/open-source-parsers/jsoncpp
+Libs: -L${libdir} -ljsoncpp
+Cflags: -I${includedir}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/globtool.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/globtool.py
deleted file mode 100644
index 811140e..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/globtool.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import fnmatch
-import os
-
-def generate( env ): 
-   def Glob( env, includes = None, excludes = None, dir = '.' ):
-      """Adds Glob( includes = Split( '*' ), excludes = None, dir = '.')
-       helper function to environment.
-
-       Glob both the file-system files.
-
-       includes: list of file name pattern included in the return list when matched.
-       excludes: list of file name pattern exluced from the return list.
-
-       Example:
-       sources = env.Glob( ("*.cpp", '*.h'), "~*.cpp", "#src" )
-      """
-      def filterFilename(path):
-         abs_path = os.path.join( dir, path )
-         if not os.path.isfile(abs_path):
-            return 0
-         fn = os.path.basename(path)
-         match = 0
-         for include in includes:
-            if fnmatch.fnmatchcase( fn, include ):
-               match = 1
-               break
-         if match == 1 and not excludes is None:
-            for exclude in excludes:
-               if fnmatch.fnmatchcase( fn, exclude ):
-                  match = 0
-                  break
-         return match
-      if includes is None:
-         includes = ('*',)
-      elif type(includes) in ( type(''), type(u'') ):
-         includes = (includes,)
-      if type(excludes) in ( type(''), type(u'') ):
-         excludes = (excludes,)
-      dir = env.Dir(dir).abspath
-      paths = os.listdir( dir )
-      def makeAbsFileNode( path ):
-         return env.File( os.path.join( dir, path ) )
-      nodes = filter( filterFilename, paths )
-      return map( makeAbsFileNode, nodes )
-
-   from SCons.Script import Environment
-   Environment.Glob = Glob
-
-def exists(env):
-    """
-    Tool always exists.
-    """
-    return True
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/srcdist.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/srcdist.py
deleted file mode 100644
index 864ff40..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/srcdist.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import os
-import os.path
-from fnmatch import fnmatch
-import targz
-
-##def DoxyfileParse(file_contents):
-##   """
-##   Parse a Doxygen source file and return a dictionary of all the values.
-##   Values will be strings and lists of strings.
-##   """
-##   data = {}
-##
-##   import shlex
-##   lex = shlex.shlex(instream = file_contents, posix = True)
-##   lex.wordchars += "*+./-:"
-##   lex.whitespace = lex.whitespace.replace("\n", "")
-##   lex.escape = ""
-##
-##   lineno = lex.lineno
-##   last_backslash_lineno = lineno
-##   token = lex.get_token()
-##   key = token   # the first token should be a key
-##   last_token = ""
-##   key_token = False
-##   next_key = False
-##   new_data = True
-##
-##   def append_data(data, key, new_data, token):
-##      if new_data or len(data[key]) == 0:
-##         data[key].append(token)
-##      else:
-##         data[key][-1] += token
-##
-##   while token:
-##      if token in ['\n']:
-##         if last_token not in ['\\']:
-##            key_token = True
-##      elif token in ['\\']:
-##         pass
-##      elif key_token:
-##         key = token
-##         key_token = False
-##      else:
-##         if token == "+=":
-##            if not data.has_key(key):
-##               data[key] = list()
-##         elif token == "=":
-##            data[key] = list()
-##         else:
-##            append_data( data, key, new_data, token )
-##            new_data = True
-##
-##      last_token = token
-##      token = lex.get_token()
-##      
-##      if last_token == '\\' and token != '\n':
-##         new_data = False
-##         append_data( data, key, new_data, '\\' )
-##
-##   # compress lists of len 1 into single strings
-##   for (k, v) in data.items():
-##      if len(v) == 0:
-##         data.pop(k)
-##
-##      # items in the following list will be kept as lists and not converted to strings
-##      if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
-##         continue
-##
-##      if len(v) == 1:
-##         data[k] = v[0]
-##
-##   return data
-##
-##def DoxySourceScan(node, env, path):
-##   """
-##   Doxygen Doxyfile source scanner.  This should scan the Doxygen file and add
-##   any files used to generate docs to the list of source files.
-##   """
-##   default_file_patterns = [
-##      '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
-##      '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
-##      '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
-##      '*.py',
-##   ]
-##
-##   default_exclude_patterns = [
-##      '*~',
-##   ]
-##
-##   sources = []
-##
-##   data = DoxyfileParse(node.get_contents())
-##
-##   if data.get("RECURSIVE", "NO") == "YES":
-##      recursive = True
-##   else:
-##      recursive = False
-##
-##   file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
-##   exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
-##
-##   for node in data.get("INPUT", []):
-##      if os.path.isfile(node):
-##         sources.add(node)
-##      elif os.path.isdir(node):
-##         if recursive:
-##            for root, dirs, files in os.walk(node):
-##               for f in files:
-##                  filename = os.path.join(root, f)
-##
-##                  pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
-##                  exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
-##
-##                  if pattern_check and not exclude_check:
-##                     sources.append(filename)
-##         else:
-##            for pattern in file_patterns:
-##               sources.extend(glob.glob("/".join([node, pattern])))
-##   sources = map( lambda path: env.File(path), sources )
-##   return sources
-##
-##
-##def DoxySourceScanCheck(node, env):
-##   """Check if we should scan this file"""
-##   return os.path.isfile(node.path)
-
-def srcDistEmitter(source, target, env):
-##   """Doxygen Doxyfile emitter"""
-##   # possible output formats and their default values and output locations
-##   output_formats = {
-##      "HTML": ("YES", "html"),
-##      "LATEX": ("YES", "latex"),
-##      "RTF": ("NO", "rtf"),
-##      "MAN": ("YES", "man"),
-##      "XML": ("NO", "xml"),
-##   }
-##
-##   data = DoxyfileParse(source[0].get_contents())
-##
-##   targets = []
-##   out_dir = data.get("OUTPUT_DIRECTORY", ".")
-##
-##   # add our output locations
-##   for (k, v) in output_formats.items():
-##      if data.get("GENERATE_" + k, v[0]) == "YES":
-##         targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
-##
-##   # don't clobber targets
-##   for node in targets:
-##      env.Precious(node)
-##
-##   # set up cleaning stuff
-##   for node in targets:
-##      env.Clean(node, node)
-##
-##   return (targets, source)
-   return (target,source)
-
-def generate(env):
-   """
-   Add builders and construction variables for the
-   SrcDist tool.
-   """
-##   doxyfile_scanner = env.Scanner(
-##      DoxySourceScan,
-##      "DoxySourceScan",
-##      scan_check = DoxySourceScanCheck,
-##   )
-
-   if targz.exists(env):
-      srcdist_builder = targz.makeBuilder( srcDistEmitter )
-
-      env['BUILDERS']['SrcDist'] = srcdist_builder
-
-def exists(env):
-   """
-   Make sure srcdist exists.
-   """
-   return targz.exists(env)
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/substinfile.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/substinfile.py
deleted file mode 100644
index 4d30585..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/substinfile.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import re
-from SCons.Script import *  # the usual scons stuff you get in a SConscript
-
-def generate(env):
-    """
-    Add builders and construction variables for the
-    SubstInFile tool.
-
-    Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
-    from the source to the target.
-    The values of SUBST_DICT first have any construction variables expanded
-    (its keys are not expanded).
-    If a value of SUBST_DICT is a python callable function, it is called and
-    the result is expanded as the value.
-    If there's more than one source and more than one target, each target gets
-    substituted from the corresponding source.
-    """
-    def do_subst_in_file(targetfile, sourcefile, dict):
-        """Replace all instances of the keys of dict with their values.
-        For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
-        then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
-        """
-        try:
-            f = open(sourcefile, 'rb')
-            contents = f.read()
-            f.close()
-        except:
-            raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile
-        for (k,v) in dict.items():
-            contents = re.sub(k, v, contents)
-        try:
-            f = open(targetfile, 'wb')
-            f.write(contents)
-            f.close()
-        except:
-            raise SCons.Errors.UserError, "Can't write target file %s"%targetfile
-        return 0 # success
-
-    def subst_in_file(target, source, env):
-        if not env.has_key('SUBST_DICT'):
-            raise SCons.Errors.UserError, "SubstInFile requires SUBST_DICT to be set."
-        d = dict(env['SUBST_DICT']) # copy it
-        for (k,v) in d.items():
-            if callable(v):
-                d[k] = env.subst(v()).replace('\\','\\\\')
-            elif SCons.Util.is_String(v):
-                d[k] = env.subst(v).replace('\\','\\\\')
-            else:
-                raise SCons.Errors.UserError, "SubstInFile: key %s: %s must be a string or callable"%(k, repr(v))
-        for (t,s) in zip(target, source):
-            return do_subst_in_file(str(t), str(s), d)
-
-    def subst_in_file_string(target, source, env):
-        """This is what gets printed on the console."""
-        return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
-                          for (t,s) in zip(target, source)])
-
-    def subst_emitter(target, source, env):
-        """Add dependency from substituted SUBST_DICT to target.
-        Returns original target, source tuple unchanged.
-        """
-        d = env['SUBST_DICT'].copy() # copy it
-        for (k,v) in d.items():
-            if callable(v):
-                d[k] = env.subst(v())
-            elif SCons.Util.is_String(v):
-                d[k]=env.subst(v)
-        Depends(target, SCons.Node.Python.Value(d))
-        return target, source
-
-##    env.Append(TOOLS = 'substinfile')       # this should be automaticaly done by Scons ?!?
-    subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
-    env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
-
-def exists(env):
-    """
-    Make sure tool exists.
-    """
-    return True
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/targz.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/targz.py
deleted file mode 100644
index f543200..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/scons-tools/targz.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""tarball
-
-Tool-specific initialization for tarball.
-
-"""
-
-## Commands to tackle a command based implementation:
-##to unpack on the fly...
-##gunzip < FILE.tar.gz | tar xvf -
-##to pack on the fly...
-##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz 
-
-import os.path
-
-import SCons.Builder
-import SCons.Node.FS
-import SCons.Util
-
-try:
-    import gzip
-    import tarfile
-    internal_targz = 1
-except ImportError:
-    internal_targz = 0
-
-TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
-
-if internal_targz:
-    def targz(target, source, env):
-        def archive_name( path ):
-            path = os.path.normpath( os.path.abspath( path ) )
-            common_path = os.path.commonprefix( (base_dir, path) )
-            archive_name = path[len(common_path):]
-            return archive_name
-            
-        def visit(tar, dirname, names):
-            for name in names:
-                path = os.path.join(dirname, name)
-                if os.path.isfile(path):
-                    tar.add(path, archive_name(path) )
-        compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
-        base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
-        target_path = str(target[0])
-        fileobj = gzip.GzipFile( target_path, 'wb', compression )
-        tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
-        for source in source:
-            source_path = str(source)
-            if source.isdir():
-                os.path.walk(source_path, visit, tar)
-            else:
-                tar.add(source_path, archive_name(source_path) )      # filename, arcname
-        tar.close()
-
-    targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
-
-    def makeBuilder( emitter = None ):
-        return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
-                                     source_factory = SCons.Node.FS.Entry,
-                                     source_scanner = SCons.Defaults.DirScanner,
-                                     suffix = '$TARGZ_SUFFIX',
-                                     multi = 1)
-    TarGzBuilder = makeBuilder()
-
-    def generate(env):
-        """Add Builders and construction variables for zip to an Environment.
-           The following environnement variables may be set:
-           TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level).
-           TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative
-                          to something other than top-dir).
-        """
-        env['BUILDERS']['TarGz'] = TarGzBuilder
-        env['TARGZ_COM'] = targzAction
-        env['TARGZ_COMPRESSION_LEVEL'] = TARGZ_DEFAULT_COMPRESSION_LEVEL # range 0-9
-        env['TARGZ_SUFFIX']  = '.tar.gz'
-        env['TARGZ_BASEDIR'] = env.Dir('.')     # Sources archive name are made relative to that directory.
-else:
-    def generate(env):
-        pass
-
-
-def exists(env):
-    return internal_targz
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/CMakeLists.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/CMakeLists.txt
new file mode 100644
index 0000000..0f82a74
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/CMakeLists.txt
@@ -0,0 +1,5 @@
+add_subdirectory(lib_json)
+if(JSONCPP_WITH_TESTS)
+    add_subdirectory(jsontestrunner)
+    add_subdirectory(test_lib_json)
+endif()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/CMakeLists.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/CMakeLists.txt
new file mode 100644
index 0000000..023a44e
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/CMakeLists.txt
@@ -0,0 +1,36 @@
+find_package(PythonInterp 2.6)
+
+add_executable(jsontestrunner_exe
+               main.cpp
+               )
+
+if(BUILD_SHARED_LIBS)
+    add_compile_definitions( JSON_DLL )
+endif()
+target_link_libraries(jsontestrunner_exe jsoncpp_lib)
+
+set_target_properties(jsontestrunner_exe PROPERTIES OUTPUT_NAME jsontestrunner_exe)
+
+if(PYTHONINTERP_FOUND)
+    # Run end to end parser/writer tests
+    set(TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../test)
+    set(RUNJSONTESTS_PATH ${TEST_DIR}/runjsontests.py)
+
+    # Run unit tests in post-build
+    # (default cmake workflow hides away the test result into a file, resulting in poor dev workflow?!?)
+    add_custom_target(jsoncpp_readerwriter_tests
+                      "${PYTHON_EXECUTABLE}" -B "${RUNJSONTESTS_PATH}" $<TARGET_FILE:jsontestrunner_exe> "${TEST_DIR}/data"
+                      DEPENDS jsontestrunner_exe jsoncpp_test
+                      )
+    add_custom_target(jsoncpp_check DEPENDS jsoncpp_readerwriter_tests)
+
+    ## Create tests for dashboard submission, allows easy review of CI results https://my.cdash.org/index.php?project=jsoncpp
+    add_test(NAME jsoncpp_readerwriter
+             COMMAND "${PYTHON_EXECUTABLE}" -B "${RUNJSONTESTS_PATH}" $<TARGET_FILE:jsontestrunner_exe> "${TEST_DIR}/data"
+             WORKING_DIRECTORY "${TEST_DIR}/data"
+    )
+    add_test(NAME jsoncpp_readerwriter_json_checker
+             COMMAND "${PYTHON_EXECUTABLE}" -B "${RUNJSONTESTS_PATH}" --with-json-checker  $<TARGET_FILE:jsontestrunner_exe> "${TEST_DIR}/data"
+             WORKING_DIRECTORY "${TEST_DIR}/data"
+    )
+endif()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/main.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/main.cpp
index 74f0216..d2d41aa 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/main.cpp
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/main.cpp
@@ -1,293 +1,300 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#elif defined(_MSC_VER)
+#pragma warning(disable : 4996)
+#endif
+
 /* This executable is used for testing parser/writer using real JSON files.
  */
 
-
-#include <json/json.h>
 #include <algorithm> // sort
-#include <stdio.h>
+#include <cstdio>
+#include <json/json.h>
+#include <sstream>
 
-#if defined(_MSC_VER)  &&  _MSC_VER >= 1310
-# pragma warning( disable: 4996 )     // disable fopen deprecation warning
-#endif
+struct Options {
+  Json::String path;
+  Json::Features features;
+  bool parseOnly;
+  using writeFuncType = Json::String (*)(Json::Value const&);
+  writeFuncType write;
+};
 
-static std::string 
-normalizeFloatingPointStr( double value )
-{
-    char buffer[32];
-    sprintf( buffer, "%.16g", value );
-    buffer[sizeof(buffer)-1] = 0;
-    std::string s( buffer );
-    std::string::size_type index = s.find_last_of( "eE" );
-    if ( index != std::string::npos )
+static Json::String normalizeFloatingPointStr(double value) {
+  char buffer[32];
+  jsoncpp_snprintf(buffer, sizeof(buffer), "%.16g", value);
+  buffer[sizeof(buffer) - 1] = 0;
+  Json::String s(buffer);
+  Json::String::size_type index = s.find_last_of("eE");
+  if (index != Json::String::npos) {
+    Json::String::size_type hasSign =
+        (s[index + 1] == '+' || s[index + 1] == '-') ? 1 : 0;
+    Json::String::size_type exponentStartIndex = index + 1 + hasSign;
+    Json::String normalized = s.substr(0, exponentStartIndex);
+    Json::String::size_type indexDigit =
+        s.find_first_not_of('0', exponentStartIndex);
+    Json::String exponent = "0";
+    if (indexDigit != Json::String::npos) // There is an exponent different
+                                          // from 0
     {
-        std::string::size_type hasSign = (s[index+1] == '+' || s[index+1] == '-') ? 1 : 0;
-        std::string::size_type exponentStartIndex = index + 1 + hasSign;
-        std::string normalized = s.substr( 0, exponentStartIndex );
-        std::string::size_type indexDigit = s.find_first_not_of( '0', exponentStartIndex );
-        std::string exponent = "0";
-        if ( indexDigit != std::string::npos ) // There is an exponent different from 0
-        {
-            exponent = s.substr( indexDigit );
-        }
-        return normalized + exponent;
+      exponent = s.substr(indexDigit);
     }
-    return s;
+    return normalized + exponent;
+  }
+  return s;
 }
 
-
-static std::string
-readInputTestFile( const char *path )
-{
-   FILE *file = fopen( path, "rb" );
-   if ( !file )
-      return std::string("");
-   fseek( file, 0, SEEK_END );
-   long size = ftell( file );
-   fseek( file, 0, SEEK_SET );
-   std::string text;
-   char *buffer = new char[size+1];
-   buffer[size] = 0;
-   if ( fread( buffer, 1, size, file ) == (unsigned long)size )
-      text = buffer;
-   fclose( file );
-   delete[] buffer;
-   return text;
+static Json::String readInputTestFile(const char* path) {
+  FILE* file = fopen(path, "rb");
+  if (!file)
+    return "";
+  fseek(file, 0, SEEK_END);
+  long const size = ftell(file);
+  size_t const usize = static_cast<unsigned long>(size);
+  fseek(file, 0, SEEK_SET);
+  char* buffer = new char[size + 1];
+  buffer[size] = 0;
+  Json::String text;
+  if (fread(buffer, 1, usize, file) == usize)
+    text = buffer;
+  fclose(file);
+  delete[] buffer;
+  return text;
 }
 
 static void
-printValueTree( FILE *fout, Json::Value &value, const std::string &path = "." )
-{
-   switch ( value.type() )
-   {
-   case Json::nullValue:
-      fprintf( fout, "%s=null\n", path.c_str() );
-      break;
-   case Json::intValue:
-      fprintf( fout, "%s=%s\n", path.c_str(), Json::valueToString( value.asLargestInt() ).c_str() );
-      break;
-   case Json::uintValue:
-      fprintf( fout, "%s=%s\n", path.c_str(), Json::valueToString( value.asLargestUInt() ).c_str() );
-      break;
-   case Json::realValue:
-       fprintf( fout, "%s=%s\n", path.c_str(), normalizeFloatingPointStr(value.asDouble()).c_str() );
-      break;
-   case Json::stringValue:
-      fprintf( fout, "%s=\"%s\"\n", path.c_str(), value.asString().c_str() );
-      break;
-   case Json::booleanValue:
-      fprintf( fout, "%s=%s\n", path.c_str(), value.asBool() ? "true" : "false" );
-      break;
-   case Json::arrayValue:
-      {
-         fprintf( fout, "%s=[]\n", path.c_str() );
-         int size = value.size();
-         for ( int index =0; index < size; ++index )
-         {
-            static char buffer[16];
-            sprintf( buffer, "[%d]", index );
-            printValueTree( fout, value[index], path + buffer );
-         }
-      }
-      break;
-   case Json::objectValue:
-      {
-         fprintf( fout, "%s={}\n", path.c_str() );
-         Json::Value::Members members( value.getMemberNames() );
-         std::sort( members.begin(), members.end() );
-         std::string suffix = *(path.end()-1) == '.' ? "" : ".";
-         for ( Json::Value::Members::iterator it = members.begin(); 
-               it != members.end(); 
-               ++it )
-         {
-            const std::string &name = *it;
-            printValueTree( fout, value[name], path + suffix + name );
-         }
-      }
-      break;
-   default:
-      break;
-   }
+printValueTree(FILE* fout, Json::Value& value, const Json::String& path = ".") {
+  if (value.hasComment(Json::commentBefore)) {
+    fprintf(fout, "%s\n", value.getComment(Json::commentBefore).c_str());
+  }
+  switch (value.type()) {
+  case Json::nullValue:
+    fprintf(fout, "%s=null\n", path.c_str());
+    break;
+  case Json::intValue:
+    fprintf(fout, "%s=%s\n", path.c_str(),
+            Json::valueToString(value.asLargestInt()).c_str());
+    break;
+  case Json::uintValue:
+    fprintf(fout, "%s=%s\n", path.c_str(),
+            Json::valueToString(value.asLargestUInt()).c_str());
+    break;
+  case Json::realValue:
+    fprintf(fout, "%s=%s\n", path.c_str(),
+            normalizeFloatingPointStr(value.asDouble()).c_str());
+    break;
+  case Json::stringValue:
+    fprintf(fout, "%s=\"%s\"\n", path.c_str(), value.asString().c_str());
+    break;
+  case Json::booleanValue:
+    fprintf(fout, "%s=%s\n", path.c_str(), value.asBool() ? "true" : "false");
+    break;
+  case Json::arrayValue: {
+    fprintf(fout, "%s=[]\n", path.c_str());
+    Json::ArrayIndex size = value.size();
+    for (Json::ArrayIndex index = 0; index < size; ++index) {
+      static char buffer[16];
+      jsoncpp_snprintf(buffer, sizeof(buffer), "[%u]", index);
+      printValueTree(fout, value[index], path + buffer);
+    }
+  } break;
+  case Json::objectValue: {
+    fprintf(fout, "%s={}\n", path.c_str());
+    Json::Value::Members members(value.getMemberNames());
+    std::sort(members.begin(), members.end());
+    Json::String suffix = *(path.end() - 1) == '.' ? "" : ".";
+    for (auto name : members) {
+      printValueTree(fout, value[name], path + suffix + name);
+    }
+  } break;
+  default:
+    break;
+  }
+
+  if (value.hasComment(Json::commentAfter)) {
+    fprintf(fout, "%s\n", value.getComment(Json::commentAfter).c_str());
+  }
 }
 
-
-static int
-parseAndSaveValueTree( const std::string &input, 
-                       const std::string &actual,
-                       const std::string &kind,
-                       Json::Value &root,
-                       const Json::Features &features,
-                       bool parseOnly )
-{
-   Json::Reader reader( features );
-   bool parsingSuccessful = reader.parse( input, root );
-   if ( !parsingSuccessful )
-   {
-      printf( "Failed to parse %s file: \n%s\n", 
-              kind.c_str(),
-              reader.getFormattedErrorMessages().c_str() );
-      return 1;
-   }
-
-   if ( !parseOnly )
-   {
-      FILE *factual = fopen( actual.c_str(), "wt" );
-      if ( !factual )
-      {
-         printf( "Failed to create %s actual file.\n", kind.c_str() );
-         return 2;
-      }
-      printValueTree( factual, root );
-      fclose( factual );
-   }
-   return 0;
-}
-
-
-static int
-rewriteValueTree( const std::string &rewritePath, 
-                  const Json::Value &root, 
-                  std::string &rewrite )
-{
-   //Json::FastWriter writer;
-   //writer.enableYAMLCompatibility();
-   Json::StyledWriter writer;
-   rewrite = writer.write( root );
-   FILE *fout = fopen( rewritePath.c_str(), "wt" );
-   if ( !fout )
-   {
-      printf( "Failed to create rewrite file: %s\n", rewritePath.c_str() );
+static int parseAndSaveValueTree(const Json::String& input,
+                                 const Json::String& actual,
+                                 const Json::String& kind,
+                                 const Json::Features& features,
+                                 bool parseOnly,
+                                 Json::Value* root) {
+  Json::Reader reader(features);
+  bool parsingSuccessful =
+      reader.parse(input.data(), input.data() + input.size(), *root);
+  if (!parsingSuccessful) {
+    printf("Failed to parse %s file: \n%s\n", kind.c_str(),
+           reader.getFormattedErrorMessages().c_str());
+    return 1;
+  }
+  if (!parseOnly) {
+    FILE* factual = fopen(actual.c_str(), "wt");
+    if (!factual) {
+      printf("Failed to create %s actual file.\n", kind.c_str());
       return 2;
-   }
-   fprintf( fout, "%s\n", rewrite.c_str() );
-   fclose( fout );
-   return 0;
+    }
+    printValueTree(factual, *root);
+    fclose(factual);
+  }
+  return 0;
+}
+// static Json::String useFastWriter(Json::Value const& root) {
+//   Json::FastWriter writer;
+//   writer.enableYAMLCompatibility();
+//   return writer.write(root);
+// }
+static Json::String useStyledWriter(Json::Value const& root) {
+  Json::StyledWriter writer;
+  return writer.write(root);
+}
+static Json::String useStyledStreamWriter(Json::Value const& root) {
+  Json::StyledStreamWriter writer;
+  Json::OStringStream sout;
+  writer.write(sout, root);
+  return sout.str();
+}
+static Json::String useBuiltStyledStreamWriter(Json::Value const& root) {
+  Json::StreamWriterBuilder builder;
+  return Json::writeString(builder, root);
+}
+static int rewriteValueTree(const Json::String& rewritePath,
+                            const Json::Value& root,
+                            Options::writeFuncType write,
+                            Json::String* rewrite) {
+  *rewrite = write(root);
+  FILE* fout = fopen(rewritePath.c_str(), "wt");
+  if (!fout) {
+    printf("Failed to create rewrite file: %s\n", rewritePath.c_str());
+    return 2;
+  }
+  fprintf(fout, "%s\n", rewrite->c_str());
+  fclose(fout);
+  return 0;
 }
 
-
-static std::string
-removeSuffix( const std::string &path, 
-              const std::string &extension )
-{
-   if ( extension.length() >= path.length() )
-      return std::string("");
-   std::string suffix = path.substr( path.length() - extension.length() );
-   if ( suffix != extension )
-      return std::string("");
-   return path.substr( 0, path.length() - extension.length() );
+static Json::String removeSuffix(const Json::String& path,
+                                 const Json::String& extension) {
+  if (extension.length() >= path.length())
+    return Json::String("");
+  Json::String suffix = path.substr(path.length() - extension.length());
+  if (suffix != extension)
+    return Json::String("");
+  return path.substr(0, path.length() - extension.length());
 }
 
-
-static void
-printConfig()
-{
-   // Print the configuration used to compile JsonCpp
+static void printConfig() {
+// Print the configuration used to compile JsonCpp
 #if defined(JSON_NO_INT64)
-   printf( "JSON_NO_INT64=1\n" );
+  printf("JSON_NO_INT64=1\n");
 #else
-   printf( "JSON_NO_INT64=0\n" );
+  printf("JSON_NO_INT64=0\n");
 #endif
 }
 
-
-static int 
-printUsage( const char *argv[] )
-{
-   printf( "Usage: %s [--strict] input-json-file", argv[0] );
-   return 3;
+static int printUsage(const char* argv[]) {
+  printf("Usage: %s [--strict] input-json-file", argv[0]);
+  return 3;
 }
 
-
-int
-parseCommandLine( int argc, const char *argv[], 
-                  Json::Features &features, std::string &path,
-                  bool &parseOnly )
-{
-   parseOnly = false;
-   if ( argc < 2 )
-   {
-      return printUsage( argv );
-   }
-
-   int index = 1;
-   if ( std::string(argv[1]) == "--json-checker" )
-   {
-      features = Json::Features::strictMode();
-      parseOnly = true;
-      ++index;
-   }
-
-   if ( std::string(argv[1]) == "--json-config" )
-   {
-      printConfig();
-      return 3;
-   }
-
-   if ( index == argc  ||  index + 1 < argc )
-   {
-      return printUsage( argv );
-   }
-
-   path = argv[index];
-   return 0;
+static int parseCommandLine(int argc, const char* argv[], Options* opts) {
+  opts->parseOnly = false;
+  opts->write = &useStyledWriter;
+  if (argc < 2) {
+    return printUsage(argv);
+  }
+  int index = 1;
+  if (Json::String(argv[index]) == "--json-checker") {
+    opts->features = Json::Features::strictMode();
+    opts->parseOnly = true;
+    ++index;
+  }
+  if (Json::String(argv[index]) == "--json-config") {
+    printConfig();
+    return 3;
+  }
+  if (Json::String(argv[index]) == "--json-writer") {
+    ++index;
+    Json::String const writerName(argv[index++]);
+    if (writerName == "StyledWriter") {
+      opts->write = &useStyledWriter;
+    } else if (writerName == "StyledStreamWriter") {
+      opts->write = &useStyledStreamWriter;
+    } else if (writerName == "BuiltStyledStreamWriter") {
+      opts->write = &useBuiltStyledStreamWriter;
+    } else {
+      printf("Unknown '--json-writer %s'\n", writerName.c_str());
+      return 4;
+    }
+  }
+  if (index == argc || index + 1 < argc) {
+    return printUsage(argv);
+  }
+  opts->path = argv[index];
+  return 0;
 }
+static int runTest(Options const& opts) {
+  int exitCode = 0;
 
+  Json::String input = readInputTestFile(opts.path.c_str());
+  if (input.empty()) {
+    printf("Failed to read input or empty input: %s\n", opts.path.c_str());
+    return 3;
+  }
 
-int main( int argc, const char *argv[] )
-{
-   std::string path;
-   Json::Features features;
-   bool parseOnly;
-   int exitCode = parseCommandLine( argc, argv, features, path, parseOnly );
-   if ( exitCode != 0 )
-   {
+  Json::String basePath = removeSuffix(opts.path, ".json");
+  if (!opts.parseOnly && basePath.empty()) {
+    printf("Bad input path. Path does not end with '.expected':\n%s\n",
+           opts.path.c_str());
+    return 3;
+  }
+
+  Json::String const actualPath = basePath + ".actual";
+  Json::String const rewritePath = basePath + ".rewrite";
+  Json::String const rewriteActualPath = basePath + ".actual-rewrite";
+
+  Json::Value root;
+  exitCode = parseAndSaveValueTree(input, actualPath, "input", opts.features,
+                                   opts.parseOnly, &root);
+  if (exitCode || opts.parseOnly) {
+    return exitCode;
+  }
+  Json::String rewrite;
+  exitCode = rewriteValueTree(rewritePath, root, opts.write, &rewrite);
+  if (exitCode) {
+    return exitCode;
+  }
+  Json::Value rewriteRoot;
+  exitCode = parseAndSaveValueTree(rewrite, rewriteActualPath, "rewrite",
+                                   opts.features, opts.parseOnly, &rewriteRoot);
+  if (exitCode) {
+    return exitCode;
+  }
+  return 0;
+}
+int main(int argc, const char* argv[]) {
+  Options opts;
+  try {
+    int exitCode = parseCommandLine(argc, argv, &opts);
+    if (exitCode != 0) {
+      printf("Failed to parse command-line.");
       return exitCode;
-   }
-
-   try
-   {
-      std::string input = readInputTestFile( path.c_str() );
-      if ( input.empty() )
-      {
-         printf( "Failed to read input or empty input: %s\n", path.c_str() );
-         return 3;
-      }
-
-      std::string basePath = removeSuffix( argv[1], ".json" );
-      if ( !parseOnly  &&  basePath.empty() )
-      {
-         printf( "Bad input path. Path does not end with '.expected':\n%s\n", path.c_str() );
-         return 3;
-      }
-
-      std::string actualPath = basePath + ".actual";
-      std::string rewritePath = basePath + ".rewrite";
-      std::string rewriteActualPath = basePath + ".actual-rewrite";
-
-      Json::Value root;
-      exitCode = parseAndSaveValueTree( input, actualPath, "input", root, features, parseOnly );
-      if ( exitCode == 0  &&  !parseOnly )
-      {
-         std::string rewrite;
-         exitCode = rewriteValueTree( rewritePath, root, rewrite );
-         if ( exitCode == 0 )
-         {
-            Json::Value rewriteRoot;
-            exitCode = parseAndSaveValueTree( rewrite, rewriteActualPath, 
-               "rewrite", rewriteRoot, features, parseOnly );
-         }
-      }
-   }
-   catch ( const std::exception &e )
-   {
-      printf( "Unhandled exception:\n%s\n", e.what() );
-      exitCode = 1;
-   }
-
-   return exitCode;
+    }
+    return runTest(opts);
+  } catch (const std::exception& e) {
+    printf("Unhandled exception:\n%s\n", e.what());
+    return 1;
+  }
 }
 
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/sconscript b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/sconscript
deleted file mode 100644
index 6e68e31..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/jsontestrunner/sconscript
+++ /dev/null
@@ -1,9 +0,0 @@
-Import( 'env_testing buildJSONTests' )
-
-buildJSONTests( env_testing, Split( """
-    main.cpp
-     """ ),
-    'jsontestrunner' )
-
-# For 'check' to work, 'libs' must be built first.
-env_testing.Depends('jsontestrunner', '#libs')
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/CMakeLists.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/CMakeLists.txt
new file mode 100644
index 0000000..2392092
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/CMakeLists.txt
@@ -0,0 +1,146 @@
+if( CMAKE_COMPILER_IS_GNUCXX )
+    #Get compiler version.
+    execute_process( COMMAND ${CMAKE_CXX_COMPILER} -dumpversion
+                     OUTPUT_VARIABLE GNUCXX_VERSION )
+
+    #-Werror=* was introduced -after- GCC 4.1.2
+    if( GNUCXX_VERSION VERSION_GREATER 4.1.2 )
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror=strict-aliasing")
+    endif()
+endif( CMAKE_COMPILER_IS_GNUCXX )
+
+include(CheckIncludeFileCXX)
+include(CheckTypeSize)
+include(CheckStructHasMember)
+include(CheckCXXSymbolExists)
+
+check_include_file_cxx(clocale HAVE_CLOCALE)
+check_cxx_symbol_exists(localeconv clocale HAVE_LOCALECONV)
+
+if(CMAKE_VERSION VERSION_LESS 3.0.0)
+    # The "LANGUAGE CXX" parameter is not supported in CMake versions below 3,
+    # so the C compiler and header has to be used.
+    check_include_file(locale.h HAVE_LOCALE_H)
+    set(CMAKE_EXTRA_INCLUDE_FILES locale.h)
+    check_type_size("struct lconv" LCONV_SIZE)
+    unset(CMAKE_EXTRA_INCLUDE_FILES)
+    check_struct_has_member("struct lconv" decimal_point locale.h HAVE_DECIMAL_POINT)
+else()
+    set(CMAKE_EXTRA_INCLUDE_FILES clocale)
+    check_type_size(lconv LCONV_SIZE LANGUAGE CXX)
+    unset(CMAKE_EXTRA_INCLUDE_FILES)
+    check_struct_has_member(lconv decimal_point clocale HAVE_DECIMAL_POINT LANGUAGE CXX)
+endif()
+
+if(NOT (HAVE_CLOCALE AND HAVE_LCONV_SIZE AND HAVE_DECIMAL_POINT AND HAVE_LOCALECONV))
+    message(WARNING "Locale functionality is not supported")
+    add_compile_definitions(JSONCPP_NO_LOCALE_SUPPORT)
+endif()
+
+set( JSONCPP_INCLUDE_DIR ../../include )
+
+set( PUBLIC_HEADERS
+    ${JSONCPP_INCLUDE_DIR}/json/config.h
+    ${JSONCPP_INCLUDE_DIR}/json/forwards.h
+    ${JSONCPP_INCLUDE_DIR}/json/features.h
+    ${JSONCPP_INCLUDE_DIR}/json/value.h
+    ${JSONCPP_INCLUDE_DIR}/json/reader.h
+    ${JSONCPP_INCLUDE_DIR}/json/writer.h
+    ${JSONCPP_INCLUDE_DIR}/json/assertions.h
+    ${PROJECT_BINARY_DIR}/include/json/version.h
+    )
+
+source_group( "Public API" FILES ${PUBLIC_HEADERS} )
+
+set(jsoncpp_sources
+                json_tool.h
+                json_reader.cpp
+                json_valueiterator.inl
+                json_value.cpp
+                json_writer.cpp
+                version.h.in)
+
+# Install instructions for this target
+if(JSONCPP_WITH_CMAKE_PACKAGE)
+    set(INSTALL_EXPORT EXPORT jsoncpp)
+else(JSONCPP_WITH_CMAKE_PACKAGE)
+    set(INSTALL_EXPORT)
+endif()
+
+if(BUILD_SHARED_LIBS)
+    add_compile_definitions( JSON_DLL_BUILD )
+endif()
+
+
+add_library(jsoncpp_lib ${PUBLIC_HEADERS} ${jsoncpp_sources})
+set_target_properties( jsoncpp_lib PROPERTIES VERSION ${JSONCPP_VERSION} SOVERSION ${JSONCPP_SOVERSION})
+set_target_properties( jsoncpp_lib PROPERTIES OUTPUT_NAME jsoncpp
+                        DEBUG_OUTPUT_NAME jsoncpp${DEBUG_LIBNAME_SUFFIX} )
+set_target_properties( jsoncpp_lib PROPERTIES POSITION_INDEPENDENT_CODE ON)
+
+# Set library's runtime search path on OSX
+if(APPLE)
+    set_target_properties( jsoncpp_lib PROPERTIES INSTALL_RPATH "@loader_path/." )
+endif()
+
+# Specify compiler features required when compiling a given target.
+# See https://cmake.org/cmake/help/v3.1/prop_gbl/CMAKE_CXX_KNOWN_FEATURES.html#prop_gbl:CMAKE_CXX_KNOWN_FEATURES
+# for complete list of features available
+target_compile_features(jsoncpp_lib PUBLIC
+        cxx_std_11 # Compiler mode is aware of C++ 11.
+        #MSVC 1900 cxx_alignas # Alignment control alignas, as defined in N2341.
+        #MSVC 1900 cxx_alignof # Alignment control alignof, as defined in N2341.
+        #MSVC 1900 cxx_attributes # Generic attributes, as defined in N2761.
+        cxx_auto_type # Automatic type deduction, as defined in N1984.
+        #MSVC 1900 cxx_constexpr # Constant expressions, as defined in N2235.
+        cxx_decltype # Decltype, as defined in N2343.
+        cxx_default_function_template_args # Default template arguments for function templates, as defined in DR226
+        cxx_defaulted_functions # Defaulted functions, as defined in N2346.
+        #MSVC 1900 cxx_defaulted_move_initializers # Defaulted move initializers, as defined in N3053.
+        cxx_delegating_constructors # Delegating constructors, as defined in N1986.
+        #MSVC 1900 cxx_deleted_functions # Deleted functions, as defined in N2346.
+        cxx_enum_forward_declarations # Enum forward declarations, as defined in N2764.
+        cxx_explicit_conversions # Explicit conversion operators, as defined in N2437.
+        cxx_extended_friend_declarations # Extended friend declarations, as defined in N1791.
+        cxx_extern_templates # Extern templates, as defined in N1987.
+        cxx_final # Override control final keyword, as defined in N2928, N3206 and N3272.
+        #MSVC 1900 cxx_func_identifier # Predefined __func__ identifier, as defined in N2340.
+        #MSVC 1900 cxx_generalized_initializers # Initializer lists, as defined in N2672.
+        #MSVC 1900 cxx_inheriting_constructors # Inheriting constructors, as defined in N2540.
+        #MSVC 1900 cxx_inline_namespaces # Inline namespaces, as defined in N2535.
+        cxx_lambdas # Lambda functions, as defined in N2927.
+        #MSVC 1900 cxx_local_type_template_args # Local and unnamed types as template arguments, as defined in N2657.
+        cxx_long_long_type # long long type, as defined in N1811.
+        #MSVC 1900 cxx_noexcept # Exception specifications, as defined in N3050.
+        #MSVC 1900 cxx_nonstatic_member_init # Non-static data member initialization, as defined in N2756.
+        cxx_nullptr # Null pointer, as defined in N2431.
+        cxx_override # Override control override keyword, as defined in N2928, N3206 and N3272.
+        cxx_range_for # Range-based for, as defined in N2930.
+        cxx_raw_string_literals # Raw string literals, as defined in N2442.
+        #MSVC 1900 cxx_reference_qualified_functions # Reference qualified functions, as defined in N2439.
+        cxx_right_angle_brackets # Right angle bracket parsing, as defined in N1757.
+        cxx_rvalue_references # R-value references, as defined in N2118.
+        #MSVC 1900 cxx_sizeof_member # Size of non-static data members, as defined in N2253.
+        cxx_static_assert # Static assert, as defined in N1720.
+        cxx_strong_enums # Strongly typed enums, as defined in N2347.
+        #MSVC 1900 cxx_thread_local # Thread-local variables, as defined in N2659.
+        cxx_trailing_return_types # Automatic function return type, as defined in N2541.
+        #MSVC 1900 cxx_unicode_literals # Unicode string literals, as defined in N2442.
+        cxx_uniform_initialization # Uniform initialization, as defined in N2640.
+        #MSVC 1900 cxx_unrestricted_unions # Unrestricted unions, as defined in N2544.
+        #MSVC 1900 cxx_user_literals # User-defined literals, as defined in N2765.
+        cxx_variadic_macros # Variadic macros, as defined in N1653.
+        cxx_variadic_templates # Variadic templates, as defined in N2242.
+)
+
+install( TARGETS jsoncpp_lib ${INSTALL_EXPORT}
+          RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+          LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+          ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
+
+if(NOT CMAKE_VERSION VERSION_LESS 2.8.11)
+    target_include_directories( jsoncpp_lib PUBLIC
+                                $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+                                $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/${JSONCPP_INCLUDE_DIR}>
+                                $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/include/json>)
+endif()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_batchallocator.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_batchallocator.h
deleted file mode 100644
index 2a7c024..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_batchallocator.h
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2007-2010 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-#ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED
-# define JSONCPP_BATCHALLOCATOR_H_INCLUDED
-
-# include <stdlib.h>
-# include <assert.h>
-
-# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
-
-namespace Json {
-
-/* Fast memory allocator.
- *
- * This memory allocator allocates memory for a batch of object (specified by
- * the page size, the number of object in each page).
- *
- * It does not allow the destruction of a single object. All the allocated objects
- * can be destroyed at once. The memory can be either released or reused for future
- * allocation.
- * 
- * The in-place new operator must be used to construct the object using the pointer
- * returned by allocate.
- */
-template<typename AllocatedType
-        ,const unsigned int objectPerAllocation>
-class BatchAllocator
-{
-public:
-   BatchAllocator( unsigned int objectsPerPage = 255 )
-      : freeHead_( 0 )
-      , objectsPerPage_( objectsPerPage )
-   {
-//      printf( "Size: %d => %s\n", sizeof(AllocatedType), typeid(AllocatedType).name() );
-      assert( sizeof(AllocatedType) * objectPerAllocation >= sizeof(AllocatedType *) ); // We must be able to store a slist in the object free space.
-      assert( objectsPerPage >= 16 );
-      batches_ = allocateBatch( 0 );   // allocated a dummy page
-      currentBatch_ = batches_;
-   }
-
-   ~BatchAllocator()
-   {
-      for ( BatchInfo *batch = batches_; batch;  )
-      {
-         BatchInfo *nextBatch = batch->next_;
-         free( batch );
-         batch = nextBatch;
-      }
-   }
-
-   /// allocate space for an array of objectPerAllocation object.
-   /// @warning it is the responsability of the caller to call objects constructors.
-   AllocatedType *allocate()
-   {
-      if ( freeHead_ ) // returns node from free list.
-      {
-         AllocatedType *object = freeHead_;
-         freeHead_ = *(AllocatedType **)object;
-         return object;
-      }
-      if ( currentBatch_->used_ == currentBatch_->end_ )
-      {
-         currentBatch_ = currentBatch_->next_;
-         while ( currentBatch_  &&  currentBatch_->used_ == currentBatch_->end_ )
-            currentBatch_ = currentBatch_->next_;
-
-         if ( !currentBatch_  ) // no free batch found, allocate a new one
-         { 
-            currentBatch_ = allocateBatch( objectsPerPage_ );
-            currentBatch_->next_ = batches_; // insert at the head of the list
-            batches_ = currentBatch_;
-         }
-      }
-      AllocatedType *allocated = currentBatch_->used_;
-      currentBatch_->used_ += objectPerAllocation;
-      return allocated;
-   }
-
-   /// Release the object.
-   /// @warning it is the responsability of the caller to actually destruct the object.
-   void release( AllocatedType *object )
-   {
-      assert( object != 0 );
-      *(AllocatedType **)object = freeHead_;
-      freeHead_ = object;
-   }
-
-private:
-   struct BatchInfo
-   {
-      BatchInfo *next_;
-      AllocatedType *used_;
-      AllocatedType *end_;
-      AllocatedType buffer_[objectPerAllocation];
-   };
-
-   // disabled copy constructor and assignement operator.
-   BatchAllocator( const BatchAllocator & );
-   void operator =( const BatchAllocator &);
-
-   static BatchInfo *allocateBatch( unsigned int objectsPerPage )
-   {
-      const unsigned int mallocSize = sizeof(BatchInfo) - sizeof(AllocatedType)* objectPerAllocation
-                                + sizeof(AllocatedType) * objectPerAllocation * objectsPerPage;
-      BatchInfo *batch = static_cast<BatchInfo*>( malloc( mallocSize ) );
-      batch->next_ = 0;
-      batch->used_ = batch->buffer_;
-      batch->end_ = batch->buffer_ + objectsPerPage;
-      return batch;
-   }
-
-   BatchInfo *batches_;
-   BatchInfo *currentBatch_;
-   /// Head of a single linked list within the allocated space of freeed object
-   AllocatedType *freeHead_;
-   unsigned int objectsPerPage_;
-};
-
-
-} // namespace Json
-
-# endif // ifndef JSONCPP_DOC_INCLUDE_IMPLEMENTATION
-
-#endif // JSONCPP_BATCHALLOCATOR_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_internalarray.inl b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_internalarray.inl
deleted file mode 100644
index 5e8b8ef..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_internalarray.inl
+++ /dev/null
@@ -1,454 +0,0 @@
-// Copyright 2007-2010 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-// included by json_value.cpp
-
-namespace Json {
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class ValueInternalArray
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-
-ValueArrayAllocator::~ValueArrayAllocator()
-{
-}
-
-// //////////////////////////////////////////////////////////////////
-// class DefaultValueArrayAllocator
-// //////////////////////////////////////////////////////////////////
-#ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-class DefaultValueArrayAllocator : public ValueArrayAllocator
-{
-public: // overridden from ValueArrayAllocator
-   virtual ~DefaultValueArrayAllocator()
-   {
-   }
-
-   virtual ValueInternalArray *newArray()
-   {
-      return new ValueInternalArray();
-   }
-
-   virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other )
-   {
-      return new ValueInternalArray( other );
-   }
-
-   virtual void destructArray( ValueInternalArray *array )
-   {
-      delete array;
-   }
-
-   virtual void reallocateArrayPageIndex( Value **&indexes, 
-                                          ValueInternalArray::PageIndex &indexCount,
-                                          ValueInternalArray::PageIndex minNewIndexCount )
-   {
-      ValueInternalArray::PageIndex newIndexCount = (indexCount*3)/2 + 1;
-      if ( minNewIndexCount > newIndexCount )
-         newIndexCount = minNewIndexCount;
-      void *newIndexes = realloc( indexes, sizeof(Value*) * newIndexCount );
-      JSON_ASSERT_MESSAGE(newIndexes, "Couldn't realloc.");
-      indexCount = newIndexCount;
-      indexes = static_cast<Value **>( newIndexes );
-   }
-   virtual void releaseArrayPageIndex( Value **indexes, 
-                                       ValueInternalArray::PageIndex indexCount )
-   {
-      if ( indexes )
-         free( indexes );
-   }
-
-   virtual Value *allocateArrayPage()
-   {
-      return static_cast<Value *>( malloc( sizeof(Value) * ValueInternalArray::itemsPerPage ) );
-   }
-
-   virtual void releaseArrayPage( Value *value )
-   {
-      if ( value )
-         free( value );
-   }
-};
-
-#else // #ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-/// @todo make this thread-safe (lock when accessign batch allocator)
-class DefaultValueArrayAllocator : public ValueArrayAllocator
-{
-public: // overridden from ValueArrayAllocator
-   virtual ~DefaultValueArrayAllocator()
-   {
-   }
-
-   virtual ValueInternalArray *newArray()
-   {
-      ValueInternalArray *array = arraysAllocator_.allocate();
-      new (array) ValueInternalArray(); // placement new
-      return array;
-   }
-
-   virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other )
-   {
-      ValueInternalArray *array = arraysAllocator_.allocate();
-      new (array) ValueInternalArray( other ); // placement new
-      return array;
-   }
-
-   virtual void destructArray( ValueInternalArray *array )
-   {
-      if ( array )
-      {
-         array->~ValueInternalArray();
-         arraysAllocator_.release( array );
-      }
-   }
-
-   virtual void reallocateArrayPageIndex( Value **&indexes, 
-                                          ValueInternalArray::PageIndex &indexCount,
-                                          ValueInternalArray::PageIndex minNewIndexCount )
-   {
-      ValueInternalArray::PageIndex newIndexCount = (indexCount*3)/2 + 1;
-      if ( minNewIndexCount > newIndexCount )
-         newIndexCount = minNewIndexCount;
-      void *newIndexes = realloc( indexes, sizeof(Value*) * newIndexCount );
-      JSON_ASSERT_MESSAGE(newIndexes, "Couldn't realloc.");
-      indexCount = newIndexCount;
-      indexes = static_cast<Value **>( newIndexes );
-   }
-   virtual void releaseArrayPageIndex( Value **indexes, 
-                                       ValueInternalArray::PageIndex indexCount )
-   {
-      if ( indexes )
-         free( indexes );
-   }
-
-   virtual Value *allocateArrayPage()
-   {
-      return static_cast<Value *>( pagesAllocator_.allocate() );
-   }
-
-   virtual void releaseArrayPage( Value *value )
-   {
-      if ( value )
-         pagesAllocator_.release( value );
-   }
-private:
-   BatchAllocator<ValueInternalArray,1> arraysAllocator_;
-   BatchAllocator<Value,ValueInternalArray::itemsPerPage> pagesAllocator_;
-};
-#endif // #ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-
-static ValueArrayAllocator *&arrayAllocator()
-{
-   static DefaultValueArrayAllocator defaultAllocator;
-   static ValueArrayAllocator *arrayAllocator = &defaultAllocator;
-   return arrayAllocator;
-}
-
-static struct DummyArrayAllocatorInitializer {
-   DummyArrayAllocatorInitializer() 
-   {
-      arrayAllocator();      // ensure arrayAllocator() statics are initialized before main().
-   }
-} dummyArrayAllocatorInitializer;
-
-// //////////////////////////////////////////////////////////////////
-// class ValueInternalArray
-// //////////////////////////////////////////////////////////////////
-bool 
-ValueInternalArray::equals( const IteratorState &x, 
-                            const IteratorState &other )
-{
-   return x.array_ == other.array_  
-          &&  x.currentItemIndex_ == other.currentItemIndex_  
-          &&  x.currentPageIndex_ == other.currentPageIndex_;
-}
-
-
-void 
-ValueInternalArray::increment( IteratorState &it )
-{
-   JSON_ASSERT_MESSAGE( it.array_  &&
-      (it.currentPageIndex_ - it.array_->pages_)*itemsPerPage + it.currentItemIndex_
-      != it.array_->size_,
-      "ValueInternalArray::increment(): moving iterator beyond end" );
-   ++(it.currentItemIndex_);
-   if ( it.currentItemIndex_ == itemsPerPage )
-   {
-      it.currentItemIndex_ = 0;
-      ++(it.currentPageIndex_);
-   }
-}
-
-
-void 
-ValueInternalArray::decrement( IteratorState &it )
-{
-   JSON_ASSERT_MESSAGE( it.array_  &&  it.currentPageIndex_ == it.array_->pages_ 
-                        &&  it.currentItemIndex_ == 0,
-      "ValueInternalArray::decrement(): moving iterator beyond end" );
-   if ( it.currentItemIndex_ == 0 )
-   {
-      it.currentItemIndex_ = itemsPerPage-1;
-      --(it.currentPageIndex_);
-   }
-   else
-   {
-      --(it.currentItemIndex_);
-   }
-}
-
-
-Value &
-ValueInternalArray::unsafeDereference( const IteratorState &it )
-{
-   return (*(it.currentPageIndex_))[it.currentItemIndex_];
-}
-
-
-Value &
-ValueInternalArray::dereference( const IteratorState &it )
-{
-   JSON_ASSERT_MESSAGE( it.array_  &&
-      (it.currentPageIndex_ - it.array_->pages_)*itemsPerPage + it.currentItemIndex_
-      < it.array_->size_,
-      "ValueInternalArray::dereference(): dereferencing invalid iterator" );
-   return unsafeDereference( it );
-}
-
-void 
-ValueInternalArray::makeBeginIterator( IteratorState &it ) const
-{
-   it.array_ = const_cast<ValueInternalArray *>( this );
-   it.currentItemIndex_ = 0;
-   it.currentPageIndex_ = pages_;
-}
-
-
-void 
-ValueInternalArray::makeIterator( IteratorState &it, ArrayIndex index ) const
-{
-   it.array_ = const_cast<ValueInternalArray *>( this );
-   it.currentItemIndex_ = index % itemsPerPage;
-   it.currentPageIndex_ = pages_ + index / itemsPerPage;
-}
-
-
-void 
-ValueInternalArray::makeEndIterator( IteratorState &it ) const
-{
-   makeIterator( it, size_ );
-}
-
-
-ValueInternalArray::ValueInternalArray()
-   : pages_( 0 )
-   , size_( 0 )
-   , pageCount_( 0 )
-{
-}
-
-
-ValueInternalArray::ValueInternalArray( const ValueInternalArray &other )
-   : pages_( 0 )
-   , size_( other.size_ )
-   , pageCount_( 0 )
-{
-   PageIndex minNewPages = other.size_ / itemsPerPage;
-   arrayAllocator()->reallocateArrayPageIndex( pages_, pageCount_, minNewPages );
-   JSON_ASSERT_MESSAGE( pageCount_ >= minNewPages, 
-                        "ValueInternalArray::reserve(): bad reallocation" );
-   IteratorState itOther;
-   other.makeBeginIterator( itOther );
-   Value *value;
-   for ( ArrayIndex index = 0; index < size_; ++index, increment(itOther) )
-   {
-      if ( index % itemsPerPage == 0 )
-      {
-         PageIndex pageIndex = index / itemsPerPage;
-         value = arrayAllocator()->allocateArrayPage();
-         pages_[pageIndex] = value;
-      }
-      new (value) Value( dereference( itOther ) );
-   }
-}
-
-
-ValueInternalArray &
-ValueInternalArray::operator =( const ValueInternalArray &other )
-{
-   ValueInternalArray temp( other );
-   swap( temp );
-   return *this;
-}
-
-
-ValueInternalArray::~ValueInternalArray()
-{
-   // destroy all constructed items
-   IteratorState it;
-   IteratorState itEnd;
-   makeBeginIterator( it);
-   makeEndIterator( itEnd );
-   for ( ; !equals(it,itEnd); increment(it) )
-   {
-      Value *value = &dereference(it);
-      value->~Value();
-   }
-   // release all pages
-   PageIndex lastPageIndex = size_ / itemsPerPage;
-   for ( PageIndex pageIndex = 0; pageIndex < lastPageIndex; ++pageIndex )
-      arrayAllocator()->releaseArrayPage( pages_[pageIndex] );
-   // release pages index
-   arrayAllocator()->releaseArrayPageIndex( pages_, pageCount_ );
-}
-
-
-void 
-ValueInternalArray::swap( ValueInternalArray &other )
-{
-   Value **tempPages = pages_;
-   pages_ = other.pages_;
-   other.pages_ = tempPages;
-   ArrayIndex tempSize = size_;
-   size_ = other.size_;
-   other.size_ = tempSize;
-   PageIndex tempPageCount = pageCount_;
-   pageCount_ = other.pageCount_;
-   other.pageCount_ = tempPageCount;
-}
-
-void 
-ValueInternalArray::clear()
-{
-   ValueInternalArray dummy;
-   swap( dummy );
-}
-
-
-void 
-ValueInternalArray::resize( ArrayIndex newSize )
-{
-   if ( newSize == 0 )
-      clear();
-   else if ( newSize < size_ )
-   {
-      IteratorState it;
-      IteratorState itEnd;
-      makeIterator( it, newSize );
-      makeIterator( itEnd, size_ );
-      for ( ; !equals(it,itEnd); increment(it) )
-      {
-         Value *value = &dereference(it);
-         value->~Value();
-      }
-      PageIndex pageIndex = (newSize + itemsPerPage - 1) / itemsPerPage;
-      PageIndex lastPageIndex = size_ / itemsPerPage;
-      for ( ; pageIndex < lastPageIndex; ++pageIndex )
-         arrayAllocator()->releaseArrayPage( pages_[pageIndex] );
-      size_ = newSize;
-   }
-   else if ( newSize > size_ )
-      resolveReference( newSize );
-}
-
-
-void 
-ValueInternalArray::makeIndexValid( ArrayIndex index )
-{
-   // Need to enlarge page index ?
-   if ( index >= pageCount_ * itemsPerPage )
-   {
-      PageIndex minNewPages = (index + 1) / itemsPerPage;
-      arrayAllocator()->reallocateArrayPageIndex( pages_, pageCount_, minNewPages );
-      JSON_ASSERT_MESSAGE( pageCount_ >= minNewPages, "ValueInternalArray::reserve(): bad reallocation" );
-   }
-
-   // Need to allocate new pages ?
-   ArrayIndex nextPageIndex = 
-      (size_ % itemsPerPage) != 0 ? size_ - (size_%itemsPerPage) + itemsPerPage
-                                  : size_;
-   if ( nextPageIndex <= index )
-   {
-      PageIndex pageIndex = nextPageIndex / itemsPerPage;
-      PageIndex pageToAllocate = (index - nextPageIndex) / itemsPerPage + 1;
-      for ( ; pageToAllocate-- > 0; ++pageIndex )
-         pages_[pageIndex] = arrayAllocator()->allocateArrayPage();
-   }
-
-   // Initialize all new entries
-   IteratorState it;
-   IteratorState itEnd;
-   makeIterator( it, size_ );
-   size_ = index + 1;
-   makeIterator( itEnd, size_ );
-   for ( ; !equals(it,itEnd); increment(it) )
-   {
-      Value *value = &dereference(it);
-      new (value) Value(); // Construct a default value using placement new
-   }
-}
-
-Value &
-ValueInternalArray::resolveReference( ArrayIndex index )
-{
-   if ( index >= size_ )
-      makeIndexValid( index );
-   return pages_[index/itemsPerPage][index%itemsPerPage];
-}
-
-Value *
-ValueInternalArray::find( ArrayIndex index ) const
-{
-   if ( index >= size_ )
-      return 0;
-   return &(pages_[index/itemsPerPage][index%itemsPerPage]);
-}
-
-ValueInternalArray::ArrayIndex 
-ValueInternalArray::size() const
-{
-   return size_;
-}
-
-int 
-ValueInternalArray::distance( const IteratorState &x, const IteratorState &y )
-{
-   return indexOf(y) - indexOf(x);
-}
-
-
-ValueInternalArray::ArrayIndex 
-ValueInternalArray::indexOf( const IteratorState &iterator )
-{
-   if ( !iterator.array_ )
-      return ArrayIndex(-1);
-   return ArrayIndex(
-      (iterator.currentPageIndex_ - iterator.array_->pages_) * itemsPerPage 
-      + iterator.currentItemIndex_ );
-}
-
-
-int 
-ValueInternalArray::compare( const ValueInternalArray &other ) const
-{
-   int sizeDiff( size_ - other.size_ );
-   if ( sizeDiff != 0 )
-      return sizeDiff;
-   
-   for ( ArrayIndex index =0; index < size_; ++index )
-   {
-      int diff = pages_[index/itemsPerPage][index%itemsPerPage].compare( 
-         other.pages_[index/itemsPerPage][index%itemsPerPage] );
-      if ( diff != 0 )
-         return diff;
-   }
-   return 0;
-}
-
-} // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_internalmap.inl b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_internalmap.inl
deleted file mode 100644
index f2fa160..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_internalmap.inl
+++ /dev/null
@@ -1,615 +0,0 @@
-// Copyright 2007-2010 Baptiste Lepilleur
-// Distributed under MIT license, or public domain if desired and
-// recognized in your jurisdiction.
-// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
-
-// included by json_value.cpp
-
-namespace Json {
-
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class ValueInternalMap
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-
-/** \internal MUST be safely initialized using memset( this, 0, sizeof(ValueInternalLink) );
-   * This optimization is used by the fast allocator.
-   */
-ValueInternalLink::ValueInternalLink()
-   : previous_( 0 )
-   , next_( 0 )
-{
-}
-
-ValueInternalLink::~ValueInternalLink()
-{ 
-   for ( int index =0; index < itemPerLink; ++index )
-   {
-      if ( !items_[index].isItemAvailable() )
-      {
-         if ( !items_[index].isMemberNameStatic() )
-            free( keys_[index] );
-      }
-      else
-         break;
-   }
-}
-
-
-
-ValueMapAllocator::~ValueMapAllocator()
-{
-}
-
-#ifdef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-class DefaultValueMapAllocator : public ValueMapAllocator
-{
-public: // overridden from ValueMapAllocator
-   virtual ValueInternalMap *newMap()
-   {
-      return new ValueInternalMap();
-   }
-
-   virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other )
-   {
-      return new ValueInternalMap( other );
-   }
-
-   virtual void destructMap( ValueInternalMap *map )
-   {
-      delete map;
-   }
-
-   virtual ValueInternalLink *allocateMapBuckets( unsigned int size )
-   {
-      return new ValueInternalLink[size];
-   }
-
-   virtual void releaseMapBuckets( ValueInternalLink *links )
-   {
-      delete [] links;
-   }
-
-   virtual ValueInternalLink *allocateMapLink()
-   {
-      return new ValueInternalLink();
-   }
-
-   virtual void releaseMapLink( ValueInternalLink *link )
-   {
-      delete link;
-   }
-};
-#else
-/// @todo make this thread-safe (lock when accessign batch allocator)
-class DefaultValueMapAllocator : public ValueMapAllocator
-{
-public: // overridden from ValueMapAllocator
-   virtual ValueInternalMap *newMap()
-   {
-      ValueInternalMap *map = mapsAllocator_.allocate();
-      new (map) ValueInternalMap(); // placement new
-      return map;
-   }
-
-   virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other )
-   {
-      ValueInternalMap *map = mapsAllocator_.allocate();
-      new (map) ValueInternalMap( other ); // placement new
-      return map;
-   }
-
-   virtual void destructMap( ValueInternalMap *map )
-   {
-      if ( map )
-      {
-         map->~ValueInternalMap();
-         mapsAllocator_.release( map );
-      }
-   }
-
-   virtual ValueInternalLink *allocateMapBuckets( unsigned int size )
-   {
-      return new ValueInternalLink[size];
-   }
-
-   virtual void releaseMapBuckets( ValueInternalLink *links )
-   {
-      delete [] links;
-   }
-
-   virtual ValueInternalLink *allocateMapLink()
-   {
-      ValueInternalLink *link = linksAllocator_.allocate();
-      memset( link, 0, sizeof(ValueInternalLink) );
-      return link;
-   }
-
-   virtual void releaseMapLink( ValueInternalLink *link )
-   {
-      link->~ValueInternalLink();
-      linksAllocator_.release( link );
-   }
-private:
-   BatchAllocator<ValueInternalMap,1> mapsAllocator_;
-   BatchAllocator<ValueInternalLink,1> linksAllocator_;
-};
-#endif
-
-static ValueMapAllocator *&mapAllocator()
-{
-   static DefaultValueMapAllocator defaultAllocator;
-   static ValueMapAllocator *mapAllocator = &defaultAllocator;
-   return mapAllocator;
-}
-
-static struct DummyMapAllocatorInitializer {
-   DummyMapAllocatorInitializer() 
-   {
-      mapAllocator();      // ensure mapAllocator() statics are initialized before main().
-   }
-} dummyMapAllocatorInitializer;
-
-
-
-// h(K) = value * K >> w ; with w = 32 & K prime w.r.t. 2^32.
-
-/*
-use linked list hash map. 
-buckets array is a container.
-linked list element contains 6 key/values. (memory = (16+4) * 6 + 4 = 124)
-value have extra state: valid, available, deleted
-*/
-
-
-ValueInternalMap::ValueInternalMap()
-   : buckets_( 0 )
-   , tailLink_( 0 )
-   , bucketsSize_( 0 )
-   , itemCount_( 0 )
-{
-}
-
-
-ValueInternalMap::ValueInternalMap( const ValueInternalMap &other )
-   : buckets_( 0 )
-   , tailLink_( 0 )
-   , bucketsSize_( 0 )
-   , itemCount_( 0 )
-{
-   reserve( other.itemCount_ );
-   IteratorState it;
-   IteratorState itEnd;
-   other.makeBeginIterator( it );
-   other.makeEndIterator( itEnd );
-   for ( ; !equals(it,itEnd); increment(it) )
-   {
-      bool isStatic;
-      const char *memberName = key( it, isStatic );
-      const Value &aValue = value( it );
-      resolveReference(memberName, isStatic) = aValue;
-   }
-}
-
-
-ValueInternalMap &
-ValueInternalMap::operator =( const ValueInternalMap &other )
-{
-   ValueInternalMap dummy( other );
-   swap( dummy );
-   return *this;
-}
-
-
-ValueInternalMap::~ValueInternalMap()
-{
-   if ( buckets_ )
-   {
-      for ( BucketIndex bucketIndex =0; bucketIndex < bucketsSize_; ++bucketIndex )
-      {
-         ValueInternalLink *link = buckets_[bucketIndex].next_;
-         while ( link )
-         {
-            ValueInternalLink *linkToRelease = link;
-            link = link->next_;
-            mapAllocator()->releaseMapLink( linkToRelease );
-         }
-      }
-      mapAllocator()->releaseMapBuckets( buckets_ );
-   }
-}
-
-
-void 
-ValueInternalMap::swap( ValueInternalMap &other )
-{
-   ValueInternalLink *tempBuckets = buckets_;
-   buckets_ = other.buckets_;
-   other.buckets_ = tempBuckets;
-   ValueInternalLink *tempTailLink = tailLink_;
-   tailLink_ = other.tailLink_;
-   other.tailLink_ = tempTailLink;
-   BucketIndex tempBucketsSize = bucketsSize_;
-   bucketsSize_ = other.bucketsSize_;
-   other.bucketsSize_ = tempBucketsSize;
-   BucketIndex tempItemCount = itemCount_;
-   itemCount_ = other.itemCount_;
-   other.itemCount_ = tempItemCount;
-}
-
-
-void 
-ValueInternalMap::clear()
-{
-   ValueInternalMap dummy;
-   swap( dummy );
-}
-
-
-ValueInternalMap::BucketIndex 
-ValueInternalMap::size() const
-{
-   return itemCount_;
-}
-
-bool 
-ValueInternalMap::reserveDelta( BucketIndex growth )
-{
-   return reserve( itemCount_ + growth );
-}
-
-bool 
-ValueInternalMap::reserve( BucketIndex newItemCount )
-{
-   if ( !buckets_  &&  newItemCount > 0 )
-   {
-      buckets_ = mapAllocator()->allocateMapBuckets( 1 );
-      bucketsSize_ = 1;
-      tailLink_ = &buckets_[0];
-   }
-//   BucketIndex idealBucketCount = (newItemCount + ValueInternalLink::itemPerLink) / ValueInternalLink::itemPerLink;
-   return true;
-}
-
-
-const Value *
-ValueInternalMap::find( const char *key ) const
-{
-   if ( !bucketsSize_ )
-      return 0;
-   HashKey hashedKey = hash( key );
-   BucketIndex bucketIndex = hashedKey % bucketsSize_;
-   for ( const ValueInternalLink *current = &buckets_[bucketIndex]; 
-         current != 0; 
-         current = current->next_ )
-   {
-      for ( BucketIndex index=0; index < ValueInternalLink::itemPerLink; ++index )
-      {
-         if ( current->items_[index].isItemAvailable() )
-            return 0;
-         if ( strcmp( key, current->keys_[index] ) == 0 )
-            return &current->items_[index];
-      }
-   }
-   return 0;
-}
-
-
-Value *
-ValueInternalMap::find( const char *key )
-{
-   const ValueInternalMap *constThis = this;
-   return const_cast<Value *>( constThis->find( key ) );
-}
-
-
-Value &
-ValueInternalMap::resolveReference( const char *key,
-                                    bool isStatic )
-{
-   HashKey hashedKey = hash( key );
-   if ( bucketsSize_ )
-   {
-      BucketIndex bucketIndex = hashedKey % bucketsSize_;
-      ValueInternalLink **previous = 0;
-      BucketIndex index;
-      for ( ValueInternalLink *current = &buckets_[bucketIndex]; 
-            current != 0; 
-            previous = &current->next_, current = current->next_ )
-      {
-         for ( index=0; index < ValueInternalLink::itemPerLink; ++index )
-         {
-            if ( current->items_[index].isItemAvailable() )
-               return setNewItem( key, isStatic, current, index );
-            if ( strcmp( key, current->keys_[index] ) == 0 )
-               return current->items_[index];
-         }
-      }
-   }
-
-   reserveDelta( 1 );
-   return unsafeAdd( key, isStatic, hashedKey );
-}
-
-
-void 
-ValueInternalMap::remove( const char *key )
-{
-   HashKey hashedKey = hash( key );
-   if ( !bucketsSize_ )
-      return;
-   BucketIndex bucketIndex = hashedKey % bucketsSize_;
-   for ( ValueInternalLink *link = &buckets_[bucketIndex]; 
-         link != 0; 
-         link = link->next_ )
-   {
-      BucketIndex index;
-      for ( index =0; index < ValueInternalLink::itemPerLink; ++index )
-      {
-         if ( link->items_[index].isItemAvailable() )
-            return;
-         if ( strcmp( key, link->keys_[index] ) == 0 )
-         {
-            doActualRemove( link, index, bucketIndex );
-            return;
-         }
-      }
-   }
-}
-
-void 
-ValueInternalMap::doActualRemove( ValueInternalLink *link, 
-                                  BucketIndex index,
-                                  BucketIndex bucketIndex )
-{
-   // find last item of the bucket and swap it with the 'removed' one.
-   // set removed items flags to 'available'.
-   // if last page only contains 'available' items, then desallocate it (it's empty)
-   ValueInternalLink *&lastLink = getLastLinkInBucket( index );
-   BucketIndex lastItemIndex = 1; // a link can never be empty, so start at 1
-   for ( ;   
-         lastItemIndex < ValueInternalLink::itemPerLink; 
-         ++lastItemIndex ) // may be optimized with dicotomic search
-   {
-      if ( lastLink->items_[lastItemIndex].isItemAvailable() )
-         break;
-   }
-   
-   BucketIndex lastUsedIndex = lastItemIndex - 1;
-   Value *valueToDelete = &link->items_[index];
-   Value *valueToPreserve = &lastLink->items_[lastUsedIndex];
-   if ( valueToDelete != valueToPreserve )
-      valueToDelete->swap( *valueToPreserve );
-   if ( lastUsedIndex == 0 )  // page is now empty
-   {  // remove it from bucket linked list and delete it.
-      ValueInternalLink *linkPreviousToLast = lastLink->previous_;
-      if ( linkPreviousToLast != 0 )   // can not deleted bucket link.
-      {
-         mapAllocator()->releaseMapLink( lastLink );
-         linkPreviousToLast->next_ = 0;
-         lastLink = linkPreviousToLast;
-      }
-   }
-   else
-   {
-      Value dummy;
-      valueToPreserve->swap( dummy ); // restore deleted to default Value.
-      valueToPreserve->setItemUsed( false );
-   }
-   --itemCount_;
-}
-
-
-ValueInternalLink *&
-ValueInternalMap::getLastLinkInBucket( BucketIndex bucketIndex )
-{
-   if ( bucketIndex == bucketsSize_ - 1 )
-      return tailLink_;
-   ValueInternalLink *&previous = buckets_[bucketIndex+1].previous_;
-   if ( !previous )
-      previous = &buckets_[bucketIndex];
-   return previous;
-}
-
-
-Value &
-ValueInternalMap::setNewItem( const char *key, 
-                              bool isStatic,
-                              ValueInternalLink *link, 
-                              BucketIndex index )
-{
-   char *duplicatedKey = makeMemberName( key );
-   ++itemCount_;
-   link->keys_[index] = duplicatedKey;
-   link->items_[index].setItemUsed();
-   link->items_[index].setMemberNameIsStatic( isStatic );
-   return link->items_[index]; // items already default constructed.
-}
-
-
-Value &
-ValueInternalMap::unsafeAdd( const char *key, 
-                             bool isStatic, 
-                             HashKey hashedKey )
-{
-   JSON_ASSERT_MESSAGE( bucketsSize_ > 0, "ValueInternalMap::unsafeAdd(): internal logic error." );
-   BucketIndex bucketIndex = hashedKey % bucketsSize_;
-   ValueInternalLink *&previousLink = getLastLinkInBucket( bucketIndex );
-   ValueInternalLink *link = previousLink;
-   BucketIndex index;
-   for ( index =0; index < ValueInternalLink::itemPerLink; ++index )
-   {
-      if ( link->items_[index].isItemAvailable() )
-         break;
-   }
-   if ( index == ValueInternalLink::itemPerLink ) // need to add a new page
-   {
-      ValueInternalLink *newLink = mapAllocator()->allocateMapLink();
-      index = 0;
-      link->next_ = newLink;
-      previousLink = newLink;
-      link = newLink;
-   }
-   return setNewItem( key, isStatic, link, index );
-}
-
-
-ValueInternalMap::HashKey 
-ValueInternalMap::hash( const char *key ) const
-{
-   HashKey hash = 0;
-   while ( *key )
-      hash += *key++ * 37;
-   return hash;
-}
-
-
-int 
-ValueInternalMap::compare( const ValueInternalMap &other ) const
-{
-   int sizeDiff( itemCount_ - other.itemCount_ );
-   if ( sizeDiff != 0 )
-      return sizeDiff;
-   // Strict order guaranty is required. Compare all keys FIRST, then compare values.
-   IteratorState it;
-   IteratorState itEnd;
-   makeBeginIterator( it );
-   makeEndIterator( itEnd );
-   for ( ; !equals(it,itEnd); increment(it) )
-   {
-      if ( !other.find( key( it ) ) )
-         return 1;
-   }
-
-   // All keys are equals, let's compare values
-   makeBeginIterator( it );
-   for ( ; !equals(it,itEnd); increment(it) )
-   {
-      const Value *otherValue = other.find( key( it ) );
-      int valueDiff = value(it).compare( *otherValue );
-      if ( valueDiff != 0 )
-         return valueDiff;
-   }
-   return 0;
-}
-
-
-void 
-ValueInternalMap::makeBeginIterator( IteratorState &it ) const
-{
-   it.map_ = const_cast<ValueInternalMap *>( this );
-   it.bucketIndex_ = 0;
-   it.itemIndex_ = 0;
-   it.link_ = buckets_;
-}
-
-
-void 
-ValueInternalMap::makeEndIterator( IteratorState &it ) const
-{
-   it.map_ = const_cast<ValueInternalMap *>( this );
-   it.bucketIndex_ = bucketsSize_;
-   it.itemIndex_ = 0;
-   it.link_ = 0;
-}
-
-
-bool 
-ValueInternalMap::equals( const IteratorState &x, const IteratorState &other )
-{
-   return x.map_ == other.map_  
-          &&  x.bucketIndex_ == other.bucketIndex_  
-          &&  x.link_ == other.link_
-          &&  x.itemIndex_ == other.itemIndex_;
-}
-
-
-void 
-ValueInternalMap::incrementBucket( IteratorState &iterator )
-{
-   ++iterator.bucketIndex_;
-   JSON_ASSERT_MESSAGE( iterator.bucketIndex_ <= iterator.map_->bucketsSize_,
-      "ValueInternalMap::increment(): attempting to iterate beyond end." );
-   if ( iterator.bucketIndex_ == iterator.map_->bucketsSize_ )
-      iterator.link_ = 0;
-   else
-      iterator.link_ = &(iterator.map_->buckets_[iterator.bucketIndex_]);
-   iterator.itemIndex_ = 0;
-}
-
-
-void 
-ValueInternalMap::increment( IteratorState &iterator )
-{
-   JSON_ASSERT_MESSAGE( iterator.map_, "Attempting to iterator using invalid iterator." );
-   ++iterator.itemIndex_;
-   if ( iterator.itemIndex_ == ValueInternalLink::itemPerLink )
-   {
-      JSON_ASSERT_MESSAGE( iterator.link_ != 0,
-         "ValueInternalMap::increment(): attempting to iterate beyond end." );
-      iterator.link_ = iterator.link_->next_;
-      if ( iterator.link_ == 0 )
-         incrementBucket( iterator );
-   }
-   else if ( iterator.link_->items_[iterator.itemIndex_].isItemAvailable() )
-   {
-      incrementBucket( iterator );
-   }
-}
-
-
-void 
-ValueInternalMap::decrement( IteratorState &iterator )
-{
-   if ( iterator.itemIndex_ == 0 )
-   {
-      JSON_ASSERT_MESSAGE( iterator.map_, "Attempting to iterate using invalid iterator." );
-      if ( iterator.link_ == &iterator.map_->buckets_[iterator.bucketIndex_] )
-      {
-         JSON_ASSERT_MESSAGE( iterator.bucketIndex_ > 0, "Attempting to iterate beyond beginning." );
-         --(iterator.bucketIndex_);
-      }
-      iterator.link_ = iterator.link_->previous_;
-      iterator.itemIndex_ = ValueInternalLink::itemPerLink - 1;
-   }
-}
-
-
-const char *
-ValueInternalMap::key( const IteratorState &iterator )
-{
-   JSON_ASSERT_MESSAGE( iterator.link_, "Attempting to iterate using invalid iterator." );
-   return iterator.link_->keys_[iterator.itemIndex_];
-}
-
-const char *
-ValueInternalMap::key( const IteratorState &iterator, bool &isStatic )
-{
-   JSON_ASSERT_MESSAGE( iterator.link_, "Attempting to iterate using invalid iterator." );
-   isStatic = iterator.link_->items_[iterator.itemIndex_].isMemberNameStatic();
-   return iterator.link_->keys_[iterator.itemIndex_];
-}
-
-
-Value &
-ValueInternalMap::value( const IteratorState &iterator )
-{
-   JSON_ASSERT_MESSAGE( iterator.link_, "Attempting to iterate using invalid iterator." );
-   return iterator.link_->items_[iterator.itemIndex_];
-}
-
-
-int 
-ValueInternalMap::distance( const IteratorState &x, const IteratorState &y )
-{
-   int offset = 0;
-   IteratorState it = x;
-   while ( !equals( it, y ) )
-      increment( it );
-   return offset;
-}
-
-} // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_reader.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_reader.cpp
index 1f3873a..005ab26 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_reader.cpp
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_reader.cpp
@@ -1,918 +1,2017 @@
-// Copyright 2007-2011 Baptiste Lepilleur
+// Copyright 2007-2011 Baptiste Lepilleur and The JsonCpp Authors
+// Copyright (C) 2016 InfoTeCS JSC. All rights reserved.
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include <json/assertions.h>
-# include <json/reader.h>
-# include <json/value.h>
-# include "json_tool.h"
+#include "json_tool.h"
+#include <json/assertions.h>
+#include <json/reader.h>
+#include <json/value.h>
 #endif // if !defined(JSON_IS_AMALGAMATION)
-#include <utility>
-#include <cstdio>
 #include <cassert>
 #include <cstring>
-#include <stdexcept>
+#include <istream>
+#include <limits>
+#include <memory>
+#include <set>
+#include <sstream>
+#include <utility>
 
-#if _MSC_VER >= 1400 // VC++ 8.0
-#pragma warning( disable : 4996 )   // disable warning about strdup being deprecated.
+#include <cstdio>
+#if __cplusplus >= 201103L
+
+#if !defined(sscanf)
+#define sscanf std::sscanf
 #endif
 
+#endif //__cplusplus
+
+#if defined(_MSC_VER)
+#if !defined(_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES)
+#define _CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES 1
+#endif //_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES
+#endif //_MSC_VER
+
+#if defined(_MSC_VER)
+// Disable warning about strdup being deprecated.
+#pragma warning(disable : 4996)
+#endif
+
+// Define JSONCPP_DEPRECATED_STACK_LIMIT as an appropriate integer at compile
+// time to change the stack limit
+#if !defined(JSONCPP_DEPRECATED_STACK_LIMIT)
+#define JSONCPP_DEPRECATED_STACK_LIMIT 1000
+#endif
+
+static size_t const stackLimit_g =
+    JSONCPP_DEPRECATED_STACK_LIMIT; // see readValue()
+
 namespace Json {
 
+#if __cplusplus >= 201103L || (defined(_CPPLIB_VER) && _CPPLIB_VER >= 520)
+typedef std::unique_ptr<CharReader> CharReaderPtr;
+#else
+typedef std::auto_ptr<CharReader> CharReaderPtr;
+#endif
+
 // Implementation of class Features
 // ////////////////////////////////
 
-Features::Features()
-   : allowComments_( true )
-   , strictRoot_( false )
-{
-}
+Features::Features() = default;
 
+Features Features::all() { return {}; }
 
-Features 
-Features::all()
-{
-   return Features();
-}
-
-
-Features 
-Features::strictMode()
-{
-   Features features;
-   features.allowComments_ = false;
-   features.strictRoot_ = true;
-   return features;
+Features Features::strictMode() {
+  Features features;
+  features.allowComments_ = false;
+  features.strictRoot_ = true;
+  features.allowDroppedNullPlaceholders_ = false;
+  features.allowNumericKeys_ = false;
+  return features;
 }
 
 // Implementation of class Reader
 // ////////////////////////////////
 
-
-static inline bool 
-in( Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4 )
-{
-   return c == c1  ||  c == c2  ||  c == c3  ||  c == c4;
+bool Reader::containsNewLine(Reader::Location begin, Reader::Location end) {
+  for (; begin < end; ++begin)
+    if (*begin == '\n' || *begin == '\r')
+      return true;
+  return false;
 }
 
-static inline bool 
-in( Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4, Reader::Char c5 )
-{
-   return c == c1  ||  c == c2  ||  c == c3  ||  c == c4  ||  c == c5;
-}
-
-
-static bool 
-containsNewLine( Reader::Location begin, 
-                 Reader::Location end )
-{
-   for ( ;begin < end; ++begin )
-      if ( *begin == '\n'  ||  *begin == '\r' )
-         return true;
-   return false;
-}
-
-
 // Class Reader
 // //////////////////////////////////////////////////////////////////
 
 Reader::Reader()
-    : errors_(),
-      document_(),
-      begin_(),
-      end_(),
-      current_(),
-      lastValueEnd_(),
-      lastValue_(),
-      commentsBefore_(),
-      features_( Features::all() ),
-      collectComments_()
-{
+    : errors_(), document_(), commentsBefore_(), features_(Features::all()) {}
+
+Reader::Reader(const Features& features)
+    : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
+      lastValue_(), commentsBefore_(), features_(features), collectComments_() {
 }
 
-
-Reader::Reader( const Features &features )
-    : errors_(),
-      document_(),
-      begin_(),
-      end_(),
-      current_(),
-      lastValueEnd_(),
-      lastValue_(),
-      commentsBefore_(),
-      features_( features ),
-      collectComments_()
-{
+bool Reader::parse(const std::string& document,
+                   Value& root,
+                   bool collectComments) {
+  document_.assign(document.begin(), document.end());
+  const char* begin = document_.c_str();
+  const char* end = begin + document_.length();
+  return parse(begin, end, root, collectComments);
 }
 
+bool Reader::parse(std::istream& is, Value& root, bool collectComments) {
+  // std::istream_iterator<char> begin(is);
+  // std::istream_iterator<char> end;
+  // Those would allow streamed input from a file, if parse() were a
+  // template function.
 
-bool
-Reader::parse( const std::string &document, 
-               Value &root,
-               bool collectComments )
-{
-   document_ = document;
-   const char *begin = document_.c_str();
-   const char *end = begin + document_.length();
-   return parse( begin, end, root, collectComments );
+  // Since String is reference-counted, this at least does not
+  // create an extra copy.
+  String doc;
+  std::getline(is, doc, (char)EOF);
+  return parse(doc.data(), doc.data() + doc.size(), root, collectComments);
 }
 
+bool Reader::parse(const char* beginDoc,
+                   const char* endDoc,
+                   Value& root,
+                   bool collectComments) {
+  if (!features_.allowComments_) {
+    collectComments = false;
+  }
 
-bool
-Reader::parse( std::istream& sin,
-               Value &root,
-               bool collectComments )
-{
-   //std::istream_iterator<char> begin(sin);
-   //std::istream_iterator<char> end;
-   // Those would allow streamed input from a file, if parse() were a
-   // template function.
+  begin_ = beginDoc;
+  end_ = endDoc;
+  collectComments_ = collectComments;
+  current_ = begin_;
+  lastValueEnd_ = nullptr;
+  lastValue_ = nullptr;
+  commentsBefore_.clear();
+  errors_.clear();
+  while (!nodes_.empty())
+    nodes_.pop();
+  nodes_.push(&root);
 
-   // Since std::string is reference-counted, this at least does not
-   // create an extra copy.
-   std::string doc;
-   std::getline(sin, doc, (char)EOF);
-   return parse( doc, root, collectComments );
-}
-
-bool 
-Reader::parse( const char *beginDoc, const char *endDoc, 
-               Value &root,
-               bool collectComments )
-{
-   if ( !features_.allowComments_ )
-   {
-      collectComments = false;
-   }
-
-   begin_ = beginDoc;
-   end_ = endDoc;
-   collectComments_ = collectComments;
-   current_ = begin_;
-   lastValueEnd_ = 0;
-   lastValue_ = 0;
-   commentsBefore_ = "";
-   errors_.clear();
-   while ( !nodes_.empty() )
-      nodes_.pop();
-   nodes_.push( &root );
-   
-   bool successful = readValue();
-   Token token;
-   skipCommentTokens( token );
-   if ( collectComments_  &&  !commentsBefore_.empty() )
-      root.setComment( commentsBefore_, commentAfter );
-   if ( features_.strictRoot_ )
-   {
-      if ( !root.isArray()  &&  !root.isObject() )
-      {
-         // Set error location to start of doc, ideally should be first token found in doc
-         token.type_ = tokenError;
-         token.start_ = beginDoc;
-         token.end_ = endDoc;
-         addError( "A valid JSON document must be either an array or an object value.",
-                   token );
-         return false;
-      }
-   }
-   return successful;
-}
-
-
-bool
-Reader::readValue()
-{
-   Token token;
-   skipCommentTokens( token );
-   bool successful = true;
-
-   if ( collectComments_  &&  !commentsBefore_.empty() )
-   {
-      currentValue().setComment( commentsBefore_, commentBefore );
-      commentsBefore_ = "";
-   }
-
-
-   switch ( token.type_ )
-   {
-   case tokenObjectBegin:
-      successful = readObject( token );
-      break;
-   case tokenArrayBegin:
-      successful = readArray( token );
-      break;
-   case tokenNumber:
-      successful = decodeNumber( token );
-      break;
-   case tokenString:
-      successful = decodeString( token );
-      break;
-   case tokenTrue:
-      currentValue() = true;
-      break;
-   case tokenFalse:
-      currentValue() = false;
-      break;
-   case tokenNull:
-      currentValue() = Value();
-      break;
-   default:
-      return addError( "Syntax error: value, object or array expected.", token );
-   }
-
-   if ( collectComments_ )
-   {
-      lastValueEnd_ = current_;
-      lastValue_ = &currentValue();
-   }
-
-   return successful;
-}
-
-
-void 
-Reader::skipCommentTokens( Token &token )
-{
-   if ( features_.allowComments_ )
-   {
-      do
-      {
-         readToken( token );
-      }
-      while ( token.type_ == tokenComment );
-   }
-   else
-   {
-      readToken( token );
-   }
-}
-
-
-bool 
-Reader::expectToken( TokenType type, Token &token, const char *message )
-{
-   readToken( token );
-   if ( token.type_ != type )
-      return addError( message, token );
-   return true;
-}
-
-
-bool 
-Reader::readToken( Token &token )
-{
-   skipSpaces();
-   token.start_ = current_;
-   Char c = getNextChar();
-   bool ok = true;
-   switch ( c )
-   {
-   case '{':
-      token.type_ = tokenObjectBegin;
-      break;
-   case '}':
-      token.type_ = tokenObjectEnd;
-      break;
-   case '[':
-      token.type_ = tokenArrayBegin;
-      break;
-   case ']':
-      token.type_ = tokenArrayEnd;
-      break;
-   case '"':
-      token.type_ = tokenString;
-      ok = readString();
-      break;
-   case '/':
-      token.type_ = tokenComment;
-      ok = readComment();
-      break;
-   case '0':
-   case '1':
-   case '2':
-   case '3':
-   case '4':
-   case '5':
-   case '6':
-   case '7':
-   case '8':
-   case '9':
-   case '-':
-      token.type_ = tokenNumber;
-      readNumber();
-      break;
-   case 't':
-      token.type_ = tokenTrue;
-      ok = match( "rue", 3 );
-      break;
-   case 'f':
-      token.type_ = tokenFalse;
-      ok = match( "alse", 4 );
-      break;
-   case 'n':
-      token.type_ = tokenNull;
-      ok = match( "ull", 3 );
-      break;
-   case ',':
-      token.type_ = tokenArraySeparator;
-      break;
-   case ':':
-      token.type_ = tokenMemberSeparator;
-      break;
-   case 0:
-      token.type_ = tokenEndOfStream;
-      break;
-   default:
-      ok = false;
-      break;
-   }
-   if ( !ok )
+  bool successful = readValue();
+  Token token;
+  skipCommentTokens(token);
+  if (collectComments_ && !commentsBefore_.empty())
+    root.setComment(commentsBefore_, commentAfter);
+  if (features_.strictRoot_) {
+    if (!root.isArray() && !root.isObject()) {
+      // Set error location to start of doc, ideally should be first token found
+      // in doc
       token.type_ = tokenError;
-   token.end_ = current_;
-   return true;
-}
-
-
-void 
-Reader::skipSpaces()
-{
-   while ( current_ != end_ )
-   {
-      Char c = *current_;
-      if ( c == ' '  ||  c == '\t'  ||  c == '\r'  ||  c == '\n' )
-         ++current_;
-      else
-         break;
-   }
-}
-
-
-bool 
-Reader::match( Location pattern, 
-               int patternLength )
-{
-   if ( end_ - current_ < patternLength )
+      token.start_ = beginDoc;
+      token.end_ = endDoc;
+      addError(
+          "A valid JSON document must be either an array or an object value.",
+          token);
       return false;
-   int index = patternLength;
-   while ( index-- )
-      if ( current_[index] != pattern[index] )
-         return false;
-   current_ += patternLength;
-   return true;
+    }
+  }
+  return successful;
 }
 
+bool Reader::readValue() {
+  // readValue() may call itself only if it calls readObject() or ReadArray().
+  // These methods execute nodes_.push() just before and nodes_.pop)() just
+  // after calling readValue(). parse() executes one nodes_.push(), so > instead
+  // of >=.
+  if (nodes_.size() > stackLimit_g)
+    throwRuntimeError("Exceeded stackLimit in readValue().");
 
-bool
-Reader::readComment()
-{
-   Location commentBegin = current_ - 1;
-   Char c = getNextChar();
-   bool successful = false;
-   if ( c == '*' )
-      successful = readCStyleComment();
-   else if ( c == '/' )
-      successful = readCppStyleComment();
-   if ( !successful )
-      return false;
+  Token token;
+  skipCommentTokens(token);
+  bool successful = true;
 
-   if ( collectComments_ )
-   {
-      CommentPlacement placement = commentBefore;
-      if ( lastValueEnd_  &&  !containsNewLine( lastValueEnd_, commentBegin ) )
-      {
-         if ( c != '*'  ||  !containsNewLine( commentBegin, current_ ) )
-            placement = commentAfterOnSameLine;
-      }
+  if (collectComments_ && !commentsBefore_.empty()) {
+    currentValue().setComment(commentsBefore_, commentBefore);
+    commentsBefore_.clear();
+  }
 
-      addComment( commentBegin, current_, placement );
-   }
-   return true;
+  switch (token.type_) {
+  case tokenObjectBegin:
+    successful = readObject(token);
+    currentValue().setOffsetLimit(current_ - begin_);
+    break;
+  case tokenArrayBegin:
+    successful = readArray(token);
+    currentValue().setOffsetLimit(current_ - begin_);
+    break;
+  case tokenNumber:
+    successful = decodeNumber(token);
+    break;
+  case tokenString:
+    successful = decodeString(token);
+    break;
+  case tokenTrue: {
+    Value v(true);
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenFalse: {
+    Value v(false);
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenNull: {
+    Value v;
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenArraySeparator:
+  case tokenObjectEnd:
+  case tokenArrayEnd:
+    if (features_.allowDroppedNullPlaceholders_) {
+      // "Un-read" the current token and mark the current value as a null
+      // token.
+      current_--;
+      Value v;
+      currentValue().swapPayload(v);
+      currentValue().setOffsetStart(current_ - begin_ - 1);
+      currentValue().setOffsetLimit(current_ - begin_);
+      break;
+    } // Else, fall through...
+  default:
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+    return addError("Syntax error: value, object or array expected.", token);
+  }
+
+  if (collectComments_) {
+    lastValueEnd_ = current_;
+    lastValue_ = &currentValue();
+  }
+
+  return successful;
 }
 
-
-void 
-Reader::addComment( Location begin, 
-                    Location end, 
-                    CommentPlacement placement )
-{
-   assert( collectComments_ );
-   if ( placement == commentAfterOnSameLine )
-   {
-      assert( lastValue_ != 0 );
-      lastValue_->setComment( std::string( begin, end ), placement );
-   }
-   else
-   {
-      if ( !commentsBefore_.empty() )
-         commentsBefore_ += "\n";
-      commentsBefore_ += std::string( begin, end );
-   }
+void Reader::skipCommentTokens(Token& token) {
+  if (features_.allowComments_) {
+    do {
+      readToken(token);
+    } while (token.type_ == tokenComment);
+  } else {
+    readToken(token);
+  }
 }
 
-
-bool 
-Reader::readCStyleComment()
-{
-   while ( current_ != end_ )
-   {
-      Char c = getNextChar();
-      if ( c == '*'  &&  *current_ == '/' )
-         break;
-   }
-   return getNextChar() == '/';
+bool Reader::readToken(Token& token) {
+  skipSpaces();
+  token.start_ = current_;
+  Char c = getNextChar();
+  bool ok = true;
+  switch (c) {
+  case '{':
+    token.type_ = tokenObjectBegin;
+    break;
+  case '}':
+    token.type_ = tokenObjectEnd;
+    break;
+  case '[':
+    token.type_ = tokenArrayBegin;
+    break;
+  case ']':
+    token.type_ = tokenArrayEnd;
+    break;
+  case '"':
+    token.type_ = tokenString;
+    ok = readString();
+    break;
+  case '/':
+    token.type_ = tokenComment;
+    ok = readComment();
+    break;
+  case '0':
+  case '1':
+  case '2':
+  case '3':
+  case '4':
+  case '5':
+  case '6':
+  case '7':
+  case '8':
+  case '9':
+  case '-':
+    token.type_ = tokenNumber;
+    readNumber();
+    break;
+  case 't':
+    token.type_ = tokenTrue;
+    ok = match("rue", 3);
+    break;
+  case 'f':
+    token.type_ = tokenFalse;
+    ok = match("alse", 4);
+    break;
+  case 'n':
+    token.type_ = tokenNull;
+    ok = match("ull", 3);
+    break;
+  case ',':
+    token.type_ = tokenArraySeparator;
+    break;
+  case ':':
+    token.type_ = tokenMemberSeparator;
+    break;
+  case 0:
+    token.type_ = tokenEndOfStream;
+    break;
+  default:
+    ok = false;
+    break;
+  }
+  if (!ok)
+    token.type_ = tokenError;
+  token.end_ = current_;
+  return true;
 }
 
-
-bool 
-Reader::readCppStyleComment()
-{
-   while ( current_ != end_ )
-   {
-      Char c = getNextChar();
-      if (  c == '\r'  ||  c == '\n' )
-         break;
-   }
-   return true;
-}
-
-
-void 
-Reader::readNumber()
-{
-   while ( current_ != end_ )
-   {
-      if ( !(*current_ >= '0'  &&  *current_ <= '9')  &&
-           !in( *current_, '.', 'e', 'E', '+', '-' ) )
-         break;
+void Reader::skipSpaces() {
+  while (current_ != end_) {
+    Char c = *current_;
+    if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
       ++current_;
-   }
+    else
+      break;
+  }
 }
 
-bool
-Reader::readString()
-{
-   Char c = 0;
-   while ( current_ != end_ )
-   {
-      c = getNextChar();
-      if ( c == '\\' )
-         getNextChar();
-      else if ( c == '"' )
-         break;
-   }
-   return c == '"';
+bool Reader::match(Location pattern, int patternLength) {
+  if (end_ - current_ < patternLength)
+    return false;
+  int index = patternLength;
+  while (index--)
+    if (current_[index] != pattern[index])
+      return false;
+  current_ += patternLength;
+  return true;
 }
 
+bool Reader::readComment() {
+  Location commentBegin = current_ - 1;
+  Char c = getNextChar();
+  bool successful = false;
+  if (c == '*')
+    successful = readCStyleComment();
+  else if (c == '/')
+    successful = readCppStyleComment();
+  if (!successful)
+    return false;
 
-bool 
-Reader::readObject( Token &/*tokenStart*/ )
-{
-   Token tokenName;
-   std::string name;
-   currentValue() = Value( objectValue );
-   while ( readToken( tokenName ) )
-   {
-      bool initialTokenOk = true;
-      while ( tokenName.type_ == tokenComment  &&  initialTokenOk )
-         initialTokenOk = readToken( tokenName );
-      if  ( !initialTokenOk )
-         break;
-      if ( tokenName.type_ == tokenObjectEnd  &&  name.empty() )  // empty object
-         return true;
-      if ( tokenName.type_ != tokenString )
-         break;
-      
-      name = "";
-      if ( !decodeString( tokenName, name ) )
-         return recoverFromError( tokenObjectEnd );
+  if (collectComments_) {
+    CommentPlacement placement = commentBefore;
+    if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) {
+      if (c != '*' || !containsNewLine(commentBegin, current_))
+        placement = commentAfterOnSameLine;
+    }
 
-      Token colon;
-      if ( !readToken( colon ) ||  colon.type_ != tokenMemberSeparator )
-      {
-         return addErrorAndRecover( "Missing ':' after object member name", 
-                                    colon, 
-                                    tokenObjectEnd );
-      }
-      Value &value = currentValue()[ name ];
-      nodes_.push( &value );
-      bool ok = readValue();
-      nodes_.pop();
-      if ( !ok ) // error already set
-         return recoverFromError( tokenObjectEnd );
-
-      Token comma;
-      if ( !readToken( comma )
-            ||  ( comma.type_ != tokenObjectEnd  &&  
-                  comma.type_ != tokenArraySeparator &&
-                  comma.type_ != tokenComment ) )
-      {
-         return addErrorAndRecover( "Missing ',' or '}' in object declaration", 
-                                    comma, 
-                                    tokenObjectEnd );
-      }
-      bool finalizeTokenOk = true;
-      while ( comma.type_ == tokenComment &&
-              finalizeTokenOk )
-         finalizeTokenOk = readToken( comma );
-      if ( comma.type_ == tokenObjectEnd )
-         return true;
-   }
-   return addErrorAndRecover( "Missing '}' or object member name", 
-                              tokenName, 
-                              tokenObjectEnd );
+    addComment(commentBegin, current_, placement);
+  }
+  return true;
 }
 
+String Reader::normalizeEOL(Reader::Location begin, Reader::Location end) {
+  String normalized;
+  normalized.reserve(static_cast<size_t>(end - begin));
+  Reader::Location current = begin;
+  while (current != end) {
+    char c = *current++;
+    if (c == '\r') {
+      if (current != end && *current == '\n')
+        // convert dos EOL
+        ++current;
+      // convert Mac EOL
+      normalized += '\n';
+    } else {
+      normalized += c;
+    }
+  }
+  return normalized;
+}
 
-bool 
-Reader::readArray( Token &/*tokenStart*/ )
-{
-   currentValue() = Value( arrayValue );
-   skipSpaces();
-   if ( *current_ == ']' ) // empty array
-   {
-      Token endArray;
-      readToken( endArray );
+void Reader::addComment(Location begin,
+                        Location end,
+                        CommentPlacement placement) {
+  assert(collectComments_);
+  const String& normalized = normalizeEOL(begin, end);
+  if (placement == commentAfterOnSameLine) {
+    assert(lastValue_ != nullptr);
+    lastValue_->setComment(normalized, placement);
+  } else {
+    commentsBefore_ += normalized;
+  }
+}
+
+bool Reader::readCStyleComment() {
+  while ((current_ + 1) < end_) {
+    Char c = getNextChar();
+    if (c == '*' && *current_ == '/')
+      break;
+  }
+  return getNextChar() == '/';
+}
+
+bool Reader::readCppStyleComment() {
+  while (current_ != end_) {
+    Char c = getNextChar();
+    if (c == '\n')
+      break;
+    if (c == '\r') {
+      // Consume DOS EOL. It will be normalized in addComment.
+      if (current_ != end_ && *current_ == '\n')
+        getNextChar();
+      // Break on Moc OS 9 EOL.
+      break;
+    }
+  }
+  return true;
+}
+
+void Reader::readNumber() {
+  const char* p = current_;
+  char c = '0'; // stopgap for already consumed character
+  // integral part
+  while (c >= '0' && c <= '9')
+    c = (current_ = p) < end_ ? *p++ : '\0';
+  // fractional part
+  if (c == '.') {
+    c = (current_ = p) < end_ ? *p++ : '\0';
+    while (c >= '0' && c <= '9')
+      c = (current_ = p) < end_ ? *p++ : '\0';
+  }
+  // exponential part
+  if (c == 'e' || c == 'E') {
+    c = (current_ = p) < end_ ? *p++ : '\0';
+    if (c == '+' || c == '-')
+      c = (current_ = p) < end_ ? *p++ : '\0';
+    while (c >= '0' && c <= '9')
+      c = (current_ = p) < end_ ? *p++ : '\0';
+  }
+}
+
+bool Reader::readString() {
+  Char c = '\0';
+  while (current_ != end_) {
+    c = getNextChar();
+    if (c == '\\')
+      getNextChar();
+    else if (c == '"')
+      break;
+  }
+  return c == '"';
+}
+
+bool Reader::readObject(Token& token) {
+  Token tokenName;
+  String name;
+  Value init(objectValue);
+  currentValue().swapPayload(init);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  while (readToken(tokenName)) {
+    bool initialTokenOk = true;
+    while (tokenName.type_ == tokenComment && initialTokenOk)
+      initialTokenOk = readToken(tokenName);
+    if (!initialTokenOk)
+      break;
+    if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
       return true;
-   }
-   int index = 0;
-   for (;;)
-   {
-      Value &value = currentValue()[ index++ ];
-      nodes_.push( &value );
-      bool ok = readValue();
-      nodes_.pop();
-      if ( !ok ) // error already set
-         return recoverFromError( tokenArrayEnd );
+    name.clear();
+    if (tokenName.type_ == tokenString) {
+      if (!decodeString(tokenName, name))
+        return recoverFromError(tokenObjectEnd);
+    } else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) {
+      Value numberName;
+      if (!decodeNumber(tokenName, numberName))
+        return recoverFromError(tokenObjectEnd);
+      name = String(numberName.asCString());
+    } else {
+      break;
+    }
 
-      Token token;
-      // Accept Comment after last item in the array.
-      ok = readToken( token );
-      while ( token.type_ == tokenComment  &&  ok )
-      {
-         ok = readToken( token );
-      }
-      bool badTokenType = ( token.type_ != tokenArraySeparator  &&
-                            token.type_ != tokenArrayEnd );
-      if ( !ok  ||  badTokenType )
-      {
-         return addErrorAndRecover( "Missing ',' or ']' in array declaration", 
-                                    token, 
-                                    tokenArrayEnd );
-      }
-      if ( token.type_ == tokenArrayEnd )
-         break;
-   }
-   return true;
+    Token colon;
+    if (!readToken(colon) || colon.type_ != tokenMemberSeparator) {
+      return addErrorAndRecover("Missing ':' after object member name", colon,
+                                tokenObjectEnd);
+    }
+    Value& value = currentValue()[name];
+    nodes_.push(&value);
+    bool ok = readValue();
+    nodes_.pop();
+    if (!ok) // error already set
+      return recoverFromError(tokenObjectEnd);
+
+    Token comma;
+    if (!readToken(comma) ||
+        (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
+         comma.type_ != tokenComment)) {
+      return addErrorAndRecover("Missing ',' or '}' in object declaration",
+                                comma, tokenObjectEnd);
+    }
+    bool finalizeTokenOk = true;
+    while (comma.type_ == tokenComment && finalizeTokenOk)
+      finalizeTokenOk = readToken(comma);
+    if (comma.type_ == tokenObjectEnd)
+      return true;
+  }
+  return addErrorAndRecover("Missing '}' or object member name", tokenName,
+                            tokenObjectEnd);
 }
 
+bool Reader::readArray(Token& token) {
+  Value init(arrayValue);
+  currentValue().swapPayload(init);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  skipSpaces();
+  if (current_ != end_ && *current_ == ']') // empty array
+  {
+    Token endArray;
+    readToken(endArray);
+    return true;
+  }
+  int index = 0;
+  for (;;) {
+    Value& value = currentValue()[index++];
+    nodes_.push(&value);
+    bool ok = readValue();
+    nodes_.pop();
+    if (!ok) // error already set
+      return recoverFromError(tokenArrayEnd);
 
-bool 
-Reader::decodeNumber( Token &token )
-{
-   bool isDouble = false;
-   for ( Location inspect = token.start_; inspect != token.end_; ++inspect )
-   {
-      isDouble = isDouble  
-                 ||  in( *inspect, '.', 'e', 'E', '+' )  
-                 ||  ( *inspect == '-'  &&  inspect != token.start_ );
-   }
-   if ( isDouble )
-      return decodeDouble( token );
-   // Attempts to parse the number as an integer. If the number is
-   // larger than the maximum supported value of an integer then
-   // we decode the number as a double.
-   Location current = token.start_;
-   bool isNegative = *current == '-';
-   if ( isNegative )
-      ++current;
-   Value::LargestUInt maxIntegerValue = isNegative ? Value::LargestUInt(-Value::minLargestInt) 
-                                                   : Value::maxLargestUInt;
-   Value::LargestUInt threshold = maxIntegerValue / 10;
-   Value::LargestUInt value = 0;
-   while ( current < token.end_ )
-   {
-      Char c = *current++;
-      if ( c < '0'  ||  c > '9' )
-         return addError( "'" + std::string( token.start_, token.end_ ) + "' is not a number.", token );
-      Value::UInt digit(c - '0');
-      if ( value >= threshold )
-      {
-         // We've hit or exceeded the max value divided by 10 (rounded down). If
-         // a) we've only just touched the limit, b) this is the last digit, and
-         // c) it's small enough to fit in that rounding delta, we're okay.
-         // Otherwise treat this number as a double to avoid overflow.
-         if (value > threshold ||
-             current != token.end_ ||
-             digit > maxIntegerValue % 10)
-         {
-            return decodeDouble( token );
-         }
+    Token currentToken;
+    // Accept Comment after last item in the array.
+    ok = readToken(currentToken);
+    while (currentToken.type_ == tokenComment && ok) {
+      ok = readToken(currentToken);
+    }
+    bool badTokenType = (currentToken.type_ != tokenArraySeparator &&
+                         currentToken.type_ != tokenArrayEnd);
+    if (!ok || badTokenType) {
+      return addErrorAndRecover("Missing ',' or ']' in array declaration",
+                                currentToken, tokenArrayEnd);
+    }
+    if (currentToken.type_ == tokenArrayEnd)
+      break;
+  }
+  return true;
+}
+
+bool Reader::decodeNumber(Token& token) {
+  Value decoded;
+  if (!decodeNumber(token, decoded))
+    return false;
+  currentValue().swapPayload(decoded);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  currentValue().setOffsetLimit(token.end_ - begin_);
+  return true;
+}
+
+bool Reader::decodeNumber(Token& token, Value& decoded) {
+  // Attempts to parse the number as an integer. If the number is
+  // larger than the maximum supported value of an integer then
+  // we decode the number as a double.
+  Location current = token.start_;
+  bool isNegative = *current == '-';
+  if (isNegative)
+    ++current;
+  // TODO: Help the compiler do the div and mod at compile time or get rid of
+  // them.
+  Value::LargestUInt maxIntegerValue =
+      isNegative ? Value::LargestUInt(Value::maxLargestInt) + 1
+                 : Value::maxLargestUInt;
+  Value::LargestUInt threshold = maxIntegerValue / 10;
+  Value::LargestUInt value = 0;
+  while (current < token.end_) {
+    Char c = *current++;
+    if (c < '0' || c > '9')
+      return decodeDouble(token, decoded);
+    auto digit(static_cast<Value::UInt>(c - '0'));
+    if (value >= threshold) {
+      // We've hit or exceeded the max value divided by 10 (rounded down). If
+      // a) we've only just touched the limit, b) this is the last digit, and
+      // c) it's small enough to fit in that rounding delta, we're okay.
+      // Otherwise treat this number as a double to avoid overflow.
+      if (value > threshold || current != token.end_ ||
+          digit > maxIntegerValue % 10) {
+        return decodeDouble(token, decoded);
       }
-      value = value * 10 + digit;
-   }
-   if ( isNegative )
-      currentValue() = -Value::LargestInt( value );
-   else if ( value <= Value::LargestUInt(Value::maxInt) )
-      currentValue() = Value::LargestInt( value );
-   else
-      currentValue() = value;
-   return true;
+    }
+    value = value * 10 + digit;
+  }
+  if (isNegative && value == maxIntegerValue)
+    decoded = Value::minLargestInt;
+  else if (isNegative)
+    decoded = -Value::LargestInt(value);
+  else if (value <= Value::LargestUInt(Value::maxInt))
+    decoded = Value::LargestInt(value);
+  else
+    decoded = value;
+  return true;
 }
 
-
-bool 
-Reader::decodeDouble( Token &token )
-{
-   double value = 0;
-   const int bufferSize = 32;
-   int count;
-   int length = int(token.end_ - token.start_);
-
-   // Sanity check to avoid buffer overflow exploits.
-   if (length < 0) {
-      return addError( "Unable to parse token length", token );
-   }
-
-   // Avoid using a string constant for the format control string given to
-   // sscanf, as this can cause hard to debug crashes on OS X. See here for more
-   // info:
-   //
-   //     http://developer.apple.com/library/mac/#DOCUMENTATION/DeveloperTools/gcc-4.0.1/gcc/Incompatibilities.html
-   char format[] = "%lf";
-
-   if ( length <= bufferSize )
-   {
-      Char buffer[bufferSize+1];
-      memcpy( buffer, token.start_, length );
-      buffer[length] = 0;
-      count = sscanf( buffer, format, &value );
-   }
-   else
-   {
-      std::string buffer( token.start_, token.end_ );
-      count = sscanf( buffer.c_str(), format, &value );
-   }
-
-   if ( count != 1 )
-      return addError( "'" + std::string( token.start_, token.end_ ) + "' is not a number.", token );
-   currentValue() = value;
-   return true;
+bool Reader::decodeDouble(Token& token) {
+  Value decoded;
+  if (!decodeDouble(token, decoded))
+    return false;
+  currentValue().swapPayload(decoded);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  currentValue().setOffsetLimit(token.end_ - begin_);
+  return true;
 }
 
-
-bool 
-Reader::decodeString( Token &token )
-{
-   std::string decoded;
-   if ( !decodeString( token, decoded ) )
-      return false;
-   currentValue() = decoded;
-   return true;
+bool Reader::decodeDouble(Token& token, Value& decoded) {
+  double value = 0;
+  String buffer(token.start_, token.end_);
+  IStringStream is(buffer);
+  if (!(is >> value))
+    return addError(
+        "'" + String(token.start_, token.end_) + "' is not a number.", token);
+  decoded = value;
+  return true;
 }
 
+bool Reader::decodeString(Token& token) {
+  String decoded_string;
+  if (!decodeString(token, decoded_string))
+    return false;
+  Value decoded(decoded_string);
+  currentValue().swapPayload(decoded);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  currentValue().setOffsetLimit(token.end_ - begin_);
+  return true;
+}
 
-bool 
-Reader::decodeString( Token &token, std::string &decoded )
-{
-   decoded.reserve( token.end_ - token.start_ - 2 );
-   Location current = token.start_ + 1; // skip '"'
-   Location end = token.end_ - 1;      // do not include '"'
-   while ( current != end )
-   {
-      Char c = *current++;
-      if ( c == '"' )
-         break;
-      else if ( c == '\\' )
-      {
-         if ( current == end )
-            return addError( "Empty escape sequence in string", token, current );
-         Char escape = *current++;
-         switch ( escape )
-         {
-         case '"': decoded += '"'; break;
-         case '/': decoded += '/'; break;
-         case '\\': decoded += '\\'; break;
-         case 'b': decoded += '\b'; break;
-         case 'f': decoded += '\f'; break;
-         case 'n': decoded += '\n'; break;
-         case 'r': decoded += '\r'; break;
-         case 't': decoded += '\t'; break;
-         case 'u':
-            {
-               unsigned int unicode;
-               if ( !decodeUnicodeCodePoint( token, current, end, unicode ) )
-                  return false;
-               decoded += codePointToUTF8(unicode);
-            }
-            break;
-         default:
-            return addError( "Bad escape sequence in string", token, current );
-         }
+bool Reader::decodeString(Token& token, String& decoded) {
+  decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2));
+  Location current = token.start_ + 1; // skip '"'
+  Location end = token.end_ - 1;       // do not include '"'
+  while (current != end) {
+    Char c = *current++;
+    if (c == '"')
+      break;
+    else if (c == '\\') {
+      if (current == end)
+        return addError("Empty escape sequence in string", token, current);
+      Char escape = *current++;
+      switch (escape) {
+      case '"':
+        decoded += '"';
+        break;
+      case '/':
+        decoded += '/';
+        break;
+      case '\\':
+        decoded += '\\';
+        break;
+      case 'b':
+        decoded += '\b';
+        break;
+      case 'f':
+        decoded += '\f';
+        break;
+      case 'n':
+        decoded += '\n';
+        break;
+      case 'r':
+        decoded += '\r';
+        break;
+      case 't':
+        decoded += '\t';
+        break;
+      case 'u': {
+        unsigned int unicode;
+        if (!decodeUnicodeCodePoint(token, current, end, unicode))
+          return false;
+        decoded += codePointToUTF8(unicode);
+      } break;
+      default:
+        return addError("Bad escape sequence in string", token, current);
       }
-      else
-      {
-         decoded += c;
-      }
-   }
-   return true;
+    } else {
+      decoded += c;
+    }
+  }
+  return true;
 }
 
-bool
-Reader::decodeUnicodeCodePoint( Token &token, 
-                                     Location &current, 
-                                     Location end, 
-                                     unsigned int &unicode )
-{
+bool Reader::decodeUnicodeCodePoint(Token& token,
+                                    Location& current,
+                                    Location end,
+                                    unsigned int& unicode) {
 
-   if ( !decodeUnicodeEscapeSequence( token, current, end, unicode ) )
-      return false;
-   if (unicode >= 0xD800 && unicode <= 0xDBFF)
-   {
-      // surrogate pairs
-      if (end - current < 6)
-         return addError( "additional six characters expected to parse unicode surrogate pair.", token, current );
+  if (!decodeUnicodeEscapeSequence(token, current, end, unicode))
+    return false;
+  if (unicode >= 0xD800 && unicode <= 0xDBFF) {
+    // surrogate pairs
+    if (end - current < 6)
+      return addError(
+          "additional six characters expected to parse unicode surrogate pair.",
+          token, current);
+    if (*(current++) == '\\' && *(current++) == 'u') {
       unsigned int surrogatePair;
-      if (*(current++) == '\\' && *(current++)== 'u')
-      {
-         if (decodeUnicodeEscapeSequence( token, current, end, surrogatePair ))
-         {
-            unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
-         } 
-         else
-            return false;
-      } 
-      else
-         return addError( "expecting another \\u token to begin the second half of a unicode surrogate pair", token, current );
-   }
-   return true;
+      if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) {
+        unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
+      } else
+        return false;
+    } else
+      return addError("expecting another \\u token to begin the second half of "
+                      "a unicode surrogate pair",
+                      token, current);
+  }
+  return true;
 }
 
-bool 
-Reader::decodeUnicodeEscapeSequence( Token &token, 
-                                     Location &current, 
-                                     Location end, 
-                                     unsigned int &unicode )
-{
-   if ( end - current < 4 )
-      return addError( "Bad unicode escape sequence in string: four digits expected.", token, current );
-   unicode = 0;
-   for ( int index =0; index < 4; ++index )
-   {
-      Char c = *current++;
-      unicode *= 16;
-      if ( c >= '0'  &&  c <= '9' )
-         unicode += c - '0';
-      else if ( c >= 'a'  &&  c <= 'f' )
-         unicode += c - 'a' + 10;
-      else if ( c >= 'A'  &&  c <= 'F' )
-         unicode += c - 'A' + 10;
-      else
-         return addError( "Bad unicode escape sequence in string: hexadecimal digit expected.", token, current );
-   }
-   return true;
+bool Reader::decodeUnicodeEscapeSequence(Token& token,
+                                         Location& current,
+                                         Location end,
+                                         unsigned int& ret_unicode) {
+  if (end - current < 4)
+    return addError(
+        "Bad unicode escape sequence in string: four digits expected.", token,
+        current);
+  int unicode = 0;
+  for (int index = 0; index < 4; ++index) {
+    Char c = *current++;
+    unicode *= 16;
+    if (c >= '0' && c <= '9')
+      unicode += c - '0';
+    else if (c >= 'a' && c <= 'f')
+      unicode += c - 'a' + 10;
+    else if (c >= 'A' && c <= 'F')
+      unicode += c - 'A' + 10;
+    else
+      return addError(
+          "Bad unicode escape sequence in string: hexadecimal digit expected.",
+          token, current);
+  }
+  ret_unicode = static_cast<unsigned int>(unicode);
+  return true;
 }
 
-
-bool 
-Reader::addError( const std::string &message, 
-                  Token &token,
-                  Location extra )
-{
-   ErrorInfo info;
-   info.token_ = token;
-   info.message_ = message;
-   info.extra_ = extra;
-   errors_.push_back( info );
-   return false;
+bool Reader::addError(const String& message, Token& token, Location extra) {
+  ErrorInfo info;
+  info.token_ = token;
+  info.message_ = message;
+  info.extra_ = extra;
+  errors_.push_back(info);
+  return false;
 }
 
-
-bool 
-Reader::recoverFromError( TokenType skipUntilToken )
-{
-   int errorCount = int(errors_.size());
-   Token skip;
-   for (;;)
-   {
-      if ( !readToken(skip) )
-         errors_.resize( errorCount ); // discard errors caused by recovery
-      if ( skip.type_ == skipUntilToken  ||  skip.type_ == tokenEndOfStream )
-         break;
-   }
-   errors_.resize( errorCount );
-   return false;
+bool Reader::recoverFromError(TokenType skipUntilToken) {
+  size_t const errorCount = errors_.size();
+  Token skip;
+  for (;;) {
+    if (!readToken(skip))
+      errors_.resize(errorCount); // discard errors caused by recovery
+    if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream)
+      break;
+  }
+  errors_.resize(errorCount);
+  return false;
 }
 
-
-bool 
-Reader::addErrorAndRecover( const std::string &message, 
-                            Token &token,
-                            TokenType skipUntilToken )
-{
-   addError( message, token );
-   return recoverFromError( skipUntilToken );
+bool Reader::addErrorAndRecover(const String& message,
+                                Token& token,
+                                TokenType skipUntilToken) {
+  addError(message, token);
+  return recoverFromError(skipUntilToken);
 }
 
+Value& Reader::currentValue() { return *(nodes_.top()); }
 
-Value &
-Reader::currentValue()
-{
-   return *(nodes_.top());
+Reader::Char Reader::getNextChar() {
+  if (current_ == end_)
+    return 0;
+  return *current_++;
 }
 
-
-Reader::Char 
-Reader::getNextChar()
-{
-   if ( current_ == end_ )
-      return 0;
-   return *current_++;
+void Reader::getLocationLineAndColumn(Location location,
+                                      int& line,
+                                      int& column) const {
+  Location current = begin_;
+  Location lastLineStart = current;
+  line = 0;
+  while (current < location && current != end_) {
+    Char c = *current++;
+    if (c == '\r') {
+      if (*current == '\n')
+        ++current;
+      lastLineStart = current;
+      ++line;
+    } else if (c == '\n') {
+      lastLineStart = current;
+      ++line;
+    }
+  }
+  // column & line start at 1
+  column = int(location - lastLineStart) + 1;
+  ++line;
 }
 
-
-void 
-Reader::getLocationLineAndColumn( Location location,
-                                  int &line,
-                                  int &column ) const
-{
-   Location current = begin_;
-   Location lastLineStart = current;
-   line = 0;
-   while ( current < location  &&  current != end_ )
-   {
-      Char c = *current++;
-      if ( c == '\r' )
-      {
-         if ( *current == '\n' )
-            ++current;
-         lastLineStart = current;
-         ++line;
-      }
-      else if ( c == '\n' )
-      {
-         lastLineStart = current;
-         ++line;
-      }
-   }
-   // column & line start at 1
-   column = int(location - lastLineStart) + 1;
-   ++line;
+String Reader::getLocationLineAndColumn(Location location) const {
+  int line, column;
+  getLocationLineAndColumn(location, line, column);
+  char buffer[18 + 16 + 16 + 1];
+  jsoncpp_snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
+  return buffer;
 }
 
-
-std::string
-Reader::getLocationLineAndColumn( Location location ) const
-{
-   int line, column;
-   getLocationLineAndColumn( location, line, column );
-   char buffer[18+16+16+1];
-   sprintf( buffer, "Line %d, Column %d", line, column );
-   return buffer;
-}
-
-
 // Deprecated. Preserved for backward compatibility
-std::string 
-Reader::getFormatedErrorMessages() const
-{
-    return getFormattedErrorMessages();
+String Reader::getFormatedErrorMessages() const {
+  return getFormattedErrorMessages();
 }
 
-
-std::string 
-Reader::getFormattedErrorMessages() const
-{
-   std::string formattedMessage;
-   for ( Errors::const_iterator itError = errors_.begin();
-         itError != errors_.end();
-         ++itError )
-   {
-      const ErrorInfo &error = *itError;
-      formattedMessage += "* " + getLocationLineAndColumn( error.token_.start_ ) + "\n";
-      formattedMessage += "  " + error.message_ + "\n";
-      if ( error.extra_ )
-         formattedMessage += "See " + getLocationLineAndColumn( error.extra_ ) + " for detail.\n";
-   }
-   return formattedMessage;
+String Reader::getFormattedErrorMessages() const {
+  String formattedMessage;
+  for (const auto& error : errors_) {
+    formattedMessage +=
+        "* " + getLocationLineAndColumn(error.token_.start_) + "\n";
+    formattedMessage += "  " + error.message_ + "\n";
+    if (error.extra_)
+      formattedMessage +=
+          "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
+  }
+  return formattedMessage;
 }
 
+std::vector<Reader::StructuredError> Reader::getStructuredErrors() const {
+  std::vector<Reader::StructuredError> allErrors;
+  for (const auto& error : errors_) {
+    Reader::StructuredError structured;
+    structured.offset_start = error.token_.start_ - begin_;
+    structured.offset_limit = error.token_.end_ - begin_;
+    structured.message = error.message_;
+    allErrors.push_back(structured);
+  }
+  return allErrors;
+}
 
-std::istream& operator>>( std::istream &sin, Value &root )
-{
-    Json::Reader reader;
-    bool ok = reader.parse(sin, root, true);
-    if (!ok) {
-      fprintf(
-          stderr,
-          "Error from reader: %s",
-          reader.getFormattedErrorMessages().c_str());
+bool Reader::pushError(const Value& value, const String& message) {
+  ptrdiff_t const length = end_ - begin_;
+  if (value.getOffsetStart() > length || value.getOffsetLimit() > length)
+    return false;
+  Token token;
+  token.type_ = tokenError;
+  token.start_ = begin_ + value.getOffsetStart();
+  token.end_ = begin_ + value.getOffsetLimit();
+  ErrorInfo info;
+  info.token_ = token;
+  info.message_ = message;
+  info.extra_ = nullptr;
+  errors_.push_back(info);
+  return true;
+}
 
-      JSON_FAIL_MESSAGE("reader error");
+bool Reader::pushError(const Value& value,
+                       const String& message,
+                       const Value& extra) {
+  ptrdiff_t const length = end_ - begin_;
+  if (value.getOffsetStart() > length || value.getOffsetLimit() > length ||
+      extra.getOffsetLimit() > length)
+    return false;
+  Token token;
+  token.type_ = tokenError;
+  token.start_ = begin_ + value.getOffsetStart();
+  token.end_ = begin_ + value.getOffsetLimit();
+  ErrorInfo info;
+  info.token_ = token;
+  info.message_ = message;
+  info.extra_ = begin_ + extra.getOffsetStart();
+  errors_.push_back(info);
+  return true;
+}
+
+bool Reader::good() const { return errors_.empty(); }
+
+// Originally copied from the Features class (now deprecated), used internally
+// for features implementation.
+class OurFeatures {
+public:
+  static OurFeatures all();
+  bool allowComments_;
+  bool strictRoot_;
+  bool allowDroppedNullPlaceholders_;
+  bool allowNumericKeys_;
+  bool allowSingleQuotes_;
+  bool failIfExtra_;
+  bool rejectDupKeys_;
+  bool allowSpecialFloats_;
+  size_t stackLimit_;
+}; // OurFeatures
+
+OurFeatures OurFeatures::all() { return {}; }
+
+// Implementation of class Reader
+// ////////////////////////////////
+
+// Originally copied from the Reader class (now deprecated), used internally
+// for implementing JSON reading.
+class OurReader {
+public:
+  typedef char Char;
+  typedef const Char* Location;
+  struct StructuredError {
+    ptrdiff_t offset_start;
+    ptrdiff_t offset_limit;
+    String message;
+  };
+
+  OurReader(OurFeatures const& features);
+  bool parse(const char* beginDoc,
+             const char* endDoc,
+             Value& root,
+             bool collectComments = true);
+  String getFormattedErrorMessages() const;
+  std::vector<StructuredError> getStructuredErrors() const;
+  bool pushError(const Value& value, const String& message);
+  bool pushError(const Value& value, const String& message, const Value& extra);
+  bool good() const;
+
+private:
+  OurReader(OurReader const&);      // no impl
+  void operator=(OurReader const&); // no impl
+
+  enum TokenType {
+    tokenEndOfStream = 0,
+    tokenObjectBegin,
+    tokenObjectEnd,
+    tokenArrayBegin,
+    tokenArrayEnd,
+    tokenString,
+    tokenNumber,
+    tokenTrue,
+    tokenFalse,
+    tokenNull,
+    tokenNaN,
+    tokenPosInf,
+    tokenNegInf,
+    tokenArraySeparator,
+    tokenMemberSeparator,
+    tokenComment,
+    tokenError
+  };
+
+  class Token {
+  public:
+    TokenType type_;
+    Location start_;
+    Location end_;
+  };
+
+  class ErrorInfo {
+  public:
+    Token token_;
+    String message_;
+    Location extra_;
+  };
+
+  typedef std::deque<ErrorInfo> Errors;
+
+  bool readToken(Token& token);
+  void skipSpaces();
+  bool match(Location pattern, int patternLength);
+  bool readComment();
+  bool readCStyleComment();
+  bool readCppStyleComment();
+  bool readString();
+  bool readStringSingleQuote();
+  bool readNumber(bool checkInf);
+  bool readValue();
+  bool readObject(Token& token);
+  bool readArray(Token& token);
+  bool decodeNumber(Token& token);
+  bool decodeNumber(Token& token, Value& decoded);
+  bool decodeString(Token& token);
+  bool decodeString(Token& token, String& decoded);
+  bool decodeDouble(Token& token);
+  bool decodeDouble(Token& token, Value& decoded);
+  bool decodeUnicodeCodePoint(Token& token,
+                              Location& current,
+                              Location end,
+                              unsigned int& unicode);
+  bool decodeUnicodeEscapeSequence(Token& token,
+                                   Location& current,
+                                   Location end,
+                                   unsigned int& unicode);
+  bool addError(const String& message, Token& token, Location extra = nullptr);
+  bool recoverFromError(TokenType skipUntilToken);
+  bool addErrorAndRecover(const String& message,
+                          Token& token,
+                          TokenType skipUntilToken);
+  void skipUntilSpace();
+  Value& currentValue();
+  Char getNextChar();
+  void
+  getLocationLineAndColumn(Location location, int& line, int& column) const;
+  String getLocationLineAndColumn(Location location) const;
+  void addComment(Location begin, Location end, CommentPlacement placement);
+  void skipCommentTokens(Token& token);
+
+  static String normalizeEOL(Location begin, Location end);
+  static bool containsNewLine(Location begin, Location end);
+
+  typedef std::stack<Value*> Nodes;
+  Nodes nodes_;
+  Errors errors_;
+  String document_;
+  Location begin_;
+  Location end_;
+  Location current_;
+  Location lastValueEnd_;
+  Value* lastValue_;
+  String commentsBefore_;
+
+  OurFeatures const features_;
+  bool collectComments_;
+}; // OurReader
+
+// complete copy of Read impl, for OurReader
+
+bool OurReader::containsNewLine(OurReader::Location begin,
+                                OurReader::Location end) {
+  for (; begin < end; ++begin)
+    if (*begin == '\n' || *begin == '\r')
+      return true;
+  return false;
+}
+
+OurReader::OurReader(OurFeatures const& features)
+    : errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
+      lastValue_(), commentsBefore_(), features_(features), collectComments_() {
+}
+
+bool OurReader::parse(const char* beginDoc,
+                      const char* endDoc,
+                      Value& root,
+                      bool collectComments) {
+  if (!features_.allowComments_) {
+    collectComments = false;
+  }
+
+  begin_ = beginDoc;
+  end_ = endDoc;
+  collectComments_ = collectComments;
+  current_ = begin_;
+  lastValueEnd_ = nullptr;
+  lastValue_ = nullptr;
+  commentsBefore_.clear();
+  errors_.clear();
+  while (!nodes_.empty())
+    nodes_.pop();
+  nodes_.push(&root);
+
+  bool successful = readValue();
+  nodes_.pop();
+  Token token;
+  skipCommentTokens(token);
+  if (features_.failIfExtra_) {
+    if ((features_.strictRoot_ || token.type_ != tokenError) &&
+        token.type_ != tokenEndOfStream) {
+      addError("Extra non-whitespace after JSON value.", token);
+      return false;
     }
-    return sin;
+  }
+  if (collectComments_ && !commentsBefore_.empty())
+    root.setComment(commentsBefore_, commentAfter);
+  if (features_.strictRoot_) {
+    if (!root.isArray() && !root.isObject()) {
+      // Set error location to start of doc, ideally should be first token found
+      // in doc
+      token.type_ = tokenError;
+      token.start_ = beginDoc;
+      token.end_ = endDoc;
+      addError(
+          "A valid JSON document must be either an array or an object value.",
+          token);
+      return false;
+    }
+  }
+  return successful;
 }
 
+bool OurReader::readValue() {
+  //  To preserve the old behaviour we cast size_t to int.
+  if (nodes_.size() > features_.stackLimit_)
+    throwRuntimeError("Exceeded stackLimit in readValue().");
+  Token token;
+  skipCommentTokens(token);
+  bool successful = true;
+
+  if (collectComments_ && !commentsBefore_.empty()) {
+    currentValue().setComment(commentsBefore_, commentBefore);
+    commentsBefore_.clear();
+  }
+
+  switch (token.type_) {
+  case tokenObjectBegin:
+    successful = readObject(token);
+    currentValue().setOffsetLimit(current_ - begin_);
+    break;
+  case tokenArrayBegin:
+    successful = readArray(token);
+    currentValue().setOffsetLimit(current_ - begin_);
+    break;
+  case tokenNumber:
+    successful = decodeNumber(token);
+    break;
+  case tokenString:
+    successful = decodeString(token);
+    break;
+  case tokenTrue: {
+    Value v(true);
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenFalse: {
+    Value v(false);
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenNull: {
+    Value v;
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenNaN: {
+    Value v(std::numeric_limits<double>::quiet_NaN());
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenPosInf: {
+    Value v(std::numeric_limits<double>::infinity());
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenNegInf: {
+    Value v(-std::numeric_limits<double>::infinity());
+    currentValue().swapPayload(v);
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+  } break;
+  case tokenArraySeparator:
+  case tokenObjectEnd:
+  case tokenArrayEnd:
+    if (features_.allowDroppedNullPlaceholders_) {
+      // "Un-read" the current token and mark the current value as a null
+      // token.
+      current_--;
+      Value v;
+      currentValue().swapPayload(v);
+      currentValue().setOffsetStart(current_ - begin_ - 1);
+      currentValue().setOffsetLimit(current_ - begin_);
+      break;
+    } // else, fall through ...
+  default:
+    currentValue().setOffsetStart(token.start_ - begin_);
+    currentValue().setOffsetLimit(token.end_ - begin_);
+    return addError("Syntax error: value, object or array expected.", token);
+  }
+
+  if (collectComments_) {
+    lastValueEnd_ = current_;
+    lastValue_ = &currentValue();
+  }
+
+  return successful;
+}
+
+void OurReader::skipCommentTokens(Token& token) {
+  if (features_.allowComments_) {
+    do {
+      readToken(token);
+    } while (token.type_ == tokenComment);
+  } else {
+    readToken(token);
+  }
+}
+
+bool OurReader::readToken(Token& token) {
+  skipSpaces();
+  token.start_ = current_;
+  Char c = getNextChar();
+  bool ok = true;
+  switch (c) {
+  case '{':
+    token.type_ = tokenObjectBegin;
+    break;
+  case '}':
+    token.type_ = tokenObjectEnd;
+    break;
+  case '[':
+    token.type_ = tokenArrayBegin;
+    break;
+  case ']':
+    token.type_ = tokenArrayEnd;
+    break;
+  case '"':
+    token.type_ = tokenString;
+    ok = readString();
+    break;
+  case '\'':
+    if (features_.allowSingleQuotes_) {
+      token.type_ = tokenString;
+      ok = readStringSingleQuote();
+      break;
+    } // else fall through
+  case '/':
+    token.type_ = tokenComment;
+    ok = readComment();
+    break;
+  case '0':
+  case '1':
+  case '2':
+  case '3':
+  case '4':
+  case '5':
+  case '6':
+  case '7':
+  case '8':
+  case '9':
+    token.type_ = tokenNumber;
+    readNumber(false);
+    break;
+  case '-':
+    if (readNumber(true)) {
+      token.type_ = tokenNumber;
+    } else {
+      token.type_ = tokenNegInf;
+      ok = features_.allowSpecialFloats_ && match("nfinity", 7);
+    }
+    break;
+  case 't':
+    token.type_ = tokenTrue;
+    ok = match("rue", 3);
+    break;
+  case 'f':
+    token.type_ = tokenFalse;
+    ok = match("alse", 4);
+    break;
+  case 'n':
+    token.type_ = tokenNull;
+    ok = match("ull", 3);
+    break;
+  case 'N':
+    if (features_.allowSpecialFloats_) {
+      token.type_ = tokenNaN;
+      ok = match("aN", 2);
+    } else {
+      ok = false;
+    }
+    break;
+  case 'I':
+    if (features_.allowSpecialFloats_) {
+      token.type_ = tokenPosInf;
+      ok = match("nfinity", 7);
+    } else {
+      ok = false;
+    }
+    break;
+  case ',':
+    token.type_ = tokenArraySeparator;
+    break;
+  case ':':
+    token.type_ = tokenMemberSeparator;
+    break;
+  case 0:
+    token.type_ = tokenEndOfStream;
+    break;
+  default:
+    ok = false;
+    break;
+  }
+  if (!ok)
+    token.type_ = tokenError;
+  token.end_ = current_;
+  return true;
+}
+
+void OurReader::skipSpaces() {
+  while (current_ != end_) {
+    Char c = *current_;
+    if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
+      ++current_;
+    else
+      break;
+  }
+}
+
+bool OurReader::match(Location pattern, int patternLength) {
+  if (end_ - current_ < patternLength)
+    return false;
+  int index = patternLength;
+  while (index--)
+    if (current_[index] != pattern[index])
+      return false;
+  current_ += patternLength;
+  return true;
+}
+
+bool OurReader::readComment() {
+  Location commentBegin = current_ - 1;
+  Char c = getNextChar();
+  bool successful = false;
+  if (c == '*')
+    successful = readCStyleComment();
+  else if (c == '/')
+    successful = readCppStyleComment();
+  if (!successful)
+    return false;
+
+  if (collectComments_) {
+    CommentPlacement placement = commentBefore;
+    if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) {
+      if (c != '*' || !containsNewLine(commentBegin, current_))
+        placement = commentAfterOnSameLine;
+    }
+
+    addComment(commentBegin, current_, placement);
+  }
+  return true;
+}
+
+String OurReader::normalizeEOL(OurReader::Location begin,
+                               OurReader::Location end) {
+  String normalized;
+  normalized.reserve(static_cast<size_t>(end - begin));
+  OurReader::Location current = begin;
+  while (current != end) {
+    char c = *current++;
+    if (c == '\r') {
+      if (current != end && *current == '\n')
+        // convert dos EOL
+        ++current;
+      // convert Mac EOL
+      normalized += '\n';
+    } else {
+      normalized += c;
+    }
+  }
+  return normalized;
+}
+
+void OurReader::addComment(Location begin,
+                           Location end,
+                           CommentPlacement placement) {
+  assert(collectComments_);
+  const String& normalized = normalizeEOL(begin, end);
+  if (placement == commentAfterOnSameLine) {
+    assert(lastValue_ != nullptr);
+    lastValue_->setComment(normalized, placement);
+  } else {
+    commentsBefore_ += normalized;
+  }
+}
+
+bool OurReader::readCStyleComment() {
+  while ((current_ + 1) < end_) {
+    Char c = getNextChar();
+    if (c == '*' && *current_ == '/')
+      break;
+  }
+  return getNextChar() == '/';
+}
+
+bool OurReader::readCppStyleComment() {
+  while (current_ != end_) {
+    Char c = getNextChar();
+    if (c == '\n')
+      break;
+    if (c == '\r') {
+      // Consume DOS EOL. It will be normalized in addComment.
+      if (current_ != end_ && *current_ == '\n')
+        getNextChar();
+      // Break on Moc OS 9 EOL.
+      break;
+    }
+  }
+  return true;
+}
+
+bool OurReader::readNumber(bool checkInf) {
+  const char* p = current_;
+  if (checkInf && p != end_ && *p == 'I') {
+    current_ = ++p;
+    return false;
+  }
+  char c = '0'; // stopgap for already consumed character
+  // integral part
+  while (c >= '0' && c <= '9')
+    c = (current_ = p) < end_ ? *p++ : '\0';
+  // fractional part
+  if (c == '.') {
+    c = (current_ = p) < end_ ? *p++ : '\0';
+    while (c >= '0' && c <= '9')
+      c = (current_ = p) < end_ ? *p++ : '\0';
+  }
+  // exponential part
+  if (c == 'e' || c == 'E') {
+    c = (current_ = p) < end_ ? *p++ : '\0';
+    if (c == '+' || c == '-')
+      c = (current_ = p) < end_ ? *p++ : '\0';
+    while (c >= '0' && c <= '9')
+      c = (current_ = p) < end_ ? *p++ : '\0';
+  }
+  return true;
+}
+bool OurReader::readString() {
+  Char c = 0;
+  while (current_ != end_) {
+    c = getNextChar();
+    if (c == '\\')
+      getNextChar();
+    else if (c == '"')
+      break;
+  }
+  return c == '"';
+}
+
+bool OurReader::readStringSingleQuote() {
+  Char c = 0;
+  while (current_ != end_) {
+    c = getNextChar();
+    if (c == '\\')
+      getNextChar();
+    else if (c == '\'')
+      break;
+  }
+  return c == '\'';
+}
+
+bool OurReader::readObject(Token& token) {
+  Token tokenName;
+  String name;
+  Value init(objectValue);
+  currentValue().swapPayload(init);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  while (readToken(tokenName)) {
+    bool initialTokenOk = true;
+    while (tokenName.type_ == tokenComment && initialTokenOk)
+      initialTokenOk = readToken(tokenName);
+    if (!initialTokenOk)
+      break;
+    if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
+      return true;
+    name.clear();
+    if (tokenName.type_ == tokenString) {
+      if (!decodeString(tokenName, name))
+        return recoverFromError(tokenObjectEnd);
+    } else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) {
+      Value numberName;
+      if (!decodeNumber(tokenName, numberName))
+        return recoverFromError(tokenObjectEnd);
+      name = numberName.asString();
+    } else {
+      break;
+    }
+    if (name.length() >= (1U << 30))
+      throwRuntimeError("keylength >= 2^30");
+    if (features_.rejectDupKeys_ && currentValue().isMember(name)) {
+      String msg = "Duplicate key: '" + name + "'";
+      return addErrorAndRecover(msg, tokenName, tokenObjectEnd);
+    }
+
+    Token colon;
+    if (!readToken(colon) || colon.type_ != tokenMemberSeparator) {
+      return addErrorAndRecover("Missing ':' after object member name", colon,
+                                tokenObjectEnd);
+    }
+    Value& value = currentValue()[name];
+    nodes_.push(&value);
+    bool ok = readValue();
+    nodes_.pop();
+    if (!ok) // error already set
+      return recoverFromError(tokenObjectEnd);
+
+    Token comma;
+    if (!readToken(comma) ||
+        (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
+         comma.type_ != tokenComment)) {
+      return addErrorAndRecover("Missing ',' or '}' in object declaration",
+                                comma, tokenObjectEnd);
+    }
+    bool finalizeTokenOk = true;
+    while (comma.type_ == tokenComment && finalizeTokenOk)
+      finalizeTokenOk = readToken(comma);
+    if (comma.type_ == tokenObjectEnd)
+      return true;
+  }
+  return addErrorAndRecover("Missing '}' or object member name", tokenName,
+                            tokenObjectEnd);
+}
+
+bool OurReader::readArray(Token& token) {
+  Value init(arrayValue);
+  currentValue().swapPayload(init);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  skipSpaces();
+  if (current_ != end_ && *current_ == ']') // empty array
+  {
+    Token endArray;
+    readToken(endArray);
+    return true;
+  }
+  int index = 0;
+  for (;;) {
+    Value& value = currentValue()[index++];
+    nodes_.push(&value);
+    bool ok = readValue();
+    nodes_.pop();
+    if (!ok) // error already set
+      return recoverFromError(tokenArrayEnd);
+
+    Token currentToken;
+    // Accept Comment after last item in the array.
+    ok = readToken(currentToken);
+    while (currentToken.type_ == tokenComment && ok) {
+      ok = readToken(currentToken);
+    }
+    bool badTokenType = (currentToken.type_ != tokenArraySeparator &&
+                         currentToken.type_ != tokenArrayEnd);
+    if (!ok || badTokenType) {
+      return addErrorAndRecover("Missing ',' or ']' in array declaration",
+                                currentToken, tokenArrayEnd);
+    }
+    if (currentToken.type_ == tokenArrayEnd)
+      break;
+  }
+  return true;
+}
+
+bool OurReader::decodeNumber(Token& token) {
+  Value decoded;
+  if (!decodeNumber(token, decoded))
+    return false;
+  currentValue().swapPayload(decoded);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  currentValue().setOffsetLimit(token.end_ - begin_);
+  return true;
+}
+
+bool OurReader::decodeNumber(Token& token, Value& decoded) {
+  // Attempts to parse the number as an integer. If the number is
+  // larger than the maximum supported value of an integer then
+  // we decode the number as a double.
+  Location current = token.start_;
+  bool isNegative = *current == '-';
+  if (isNegative)
+    ++current;
+
+  // TODO(issue #960): Change to constexpr
+  static const auto positive_threshold = Value::maxLargestUInt / 10;
+  static const auto positive_last_digit = Value::maxLargestUInt % 10;
+  static const auto negative_threshold =
+      Value::LargestUInt(Value::minLargestInt) / 10;
+  static const auto negative_last_digit =
+      Value::LargestUInt(Value::minLargestInt) % 10;
+
+  const auto threshold = isNegative ? negative_threshold : positive_threshold;
+  const auto last_digit =
+      isNegative ? negative_last_digit : positive_last_digit;
+
+  Value::LargestUInt value = 0;
+  while (current < token.end_) {
+    Char c = *current++;
+    if (c < '0' || c > '9')
+      return decodeDouble(token, decoded);
+
+    const auto digit(static_cast<Value::UInt>(c - '0'));
+    if (value >= threshold) {
+      // We've hit or exceeded the max value divided by 10 (rounded down). If
+      // a) we've only just touched the limit, meaing value == threshold,
+      // b) this is the last digit, or
+      // c) it's small enough to fit in that rounding delta, we're okay.
+      // Otherwise treat this number as a double to avoid overflow.
+      if (value > threshold || current != token.end_ || digit > last_digit) {
+        return decodeDouble(token, decoded);
+      }
+    }
+    value = value * 10 + digit;
+  }
+
+  if (isNegative)
+    decoded = -Value::LargestInt(value);
+  else if (value <= Value::LargestUInt(Value::maxLargestInt))
+    decoded = Value::LargestInt(value);
+  else
+    decoded = value;
+
+  return true;
+}
+
+bool OurReader::decodeDouble(Token& token) {
+  Value decoded;
+  if (!decodeDouble(token, decoded))
+    return false;
+  currentValue().swapPayload(decoded);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  currentValue().setOffsetLimit(token.end_ - begin_);
+  return true;
+}
+
+bool OurReader::decodeDouble(Token& token, Value& decoded) {
+  double value = 0;
+  const int bufferSize = 32;
+  int count;
+  ptrdiff_t const length = token.end_ - token.start_;
+
+  // Sanity check to avoid buffer overflow exploits.
+  if (length < 0) {
+    return addError("Unable to parse token length", token);
+  }
+  auto const ulength = static_cast<size_t>(length);
+
+  // Avoid using a string constant for the format control string given to
+  // sscanf, as this can cause hard to debug crashes on OS X. See here for more
+  // info:
+  //
+  //     http://developer.apple.com/library/mac/#DOCUMENTATION/DeveloperTools/gcc-4.0.1/gcc/Incompatibilities.html
+  char format[] = "%lf";
+
+  if (length <= bufferSize) {
+    Char buffer[bufferSize + 1];
+    memcpy(buffer, token.start_, ulength);
+    buffer[length] = 0;
+    fixNumericLocaleInput(buffer, buffer + length);
+    count = sscanf(buffer, format, &value);
+  } else {
+    String buffer(token.start_, token.end_);
+    count = sscanf(buffer.c_str(), format, &value);
+  }
+
+  if (count != 1)
+    return addError(
+        "'" + String(token.start_, token.end_) + "' is not a number.", token);
+  decoded = value;
+  return true;
+}
+
+bool OurReader::decodeString(Token& token) {
+  String decoded_string;
+  if (!decodeString(token, decoded_string))
+    return false;
+  Value decoded(decoded_string);
+  currentValue().swapPayload(decoded);
+  currentValue().setOffsetStart(token.start_ - begin_);
+  currentValue().setOffsetLimit(token.end_ - begin_);
+  return true;
+}
+
+bool OurReader::decodeString(Token& token, String& decoded) {
+  decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2));
+  Location current = token.start_ + 1; // skip '"'
+  Location end = token.end_ - 1;       // do not include '"'
+  while (current != end) {
+    Char c = *current++;
+    if (c == '"')
+      break;
+    else if (c == '\\') {
+      if (current == end)
+        return addError("Empty escape sequence in string", token, current);
+      Char escape = *current++;
+      switch (escape) {
+      case '"':
+        decoded += '"';
+        break;
+      case '/':
+        decoded += '/';
+        break;
+      case '\\':
+        decoded += '\\';
+        break;
+      case 'b':
+        decoded += '\b';
+        break;
+      case 'f':
+        decoded += '\f';
+        break;
+      case 'n':
+        decoded += '\n';
+        break;
+      case 'r':
+        decoded += '\r';
+        break;
+      case 't':
+        decoded += '\t';
+        break;
+      case 'u': {
+        unsigned int unicode;
+        if (!decodeUnicodeCodePoint(token, current, end, unicode))
+          return false;
+        decoded += codePointToUTF8(unicode);
+      } break;
+      default:
+        return addError("Bad escape sequence in string", token, current);
+      }
+    } else {
+      decoded += c;
+    }
+  }
+  return true;
+}
+
+bool OurReader::decodeUnicodeCodePoint(Token& token,
+                                       Location& current,
+                                       Location end,
+                                       unsigned int& unicode) {
+
+  if (!decodeUnicodeEscapeSequence(token, current, end, unicode))
+    return false;
+  if (unicode >= 0xD800 && unicode <= 0xDBFF) {
+    // surrogate pairs
+    if (end - current < 6)
+      return addError(
+          "additional six characters expected to parse unicode surrogate pair.",
+          token, current);
+    if (*(current++) == '\\' && *(current++) == 'u') {
+      unsigned int surrogatePair;
+      if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) {
+        unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
+      } else
+        return false;
+    } else
+      return addError("expecting another \\u token to begin the second half of "
+                      "a unicode surrogate pair",
+                      token, current);
+  }
+  return true;
+}
+
+bool OurReader::decodeUnicodeEscapeSequence(Token& token,
+                                            Location& current,
+                                            Location end,
+                                            unsigned int& ret_unicode) {
+  if (end - current < 4)
+    return addError(
+        "Bad unicode escape sequence in string: four digits expected.", token,
+        current);
+  int unicode = 0;
+  for (int index = 0; index < 4; ++index) {
+    Char c = *current++;
+    unicode *= 16;
+    if (c >= '0' && c <= '9')
+      unicode += c - '0';
+    else if (c >= 'a' && c <= 'f')
+      unicode += c - 'a' + 10;
+    else if (c >= 'A' && c <= 'F')
+      unicode += c - 'A' + 10;
+    else
+      return addError(
+          "Bad unicode escape sequence in string: hexadecimal digit expected.",
+          token, current);
+  }
+  ret_unicode = static_cast<unsigned int>(unicode);
+  return true;
+}
+
+bool OurReader::addError(const String& message, Token& token, Location extra) {
+  ErrorInfo info;
+  info.token_ = token;
+  info.message_ = message;
+  info.extra_ = extra;
+  errors_.push_back(info);
+  return false;
+}
+
+bool OurReader::recoverFromError(TokenType skipUntilToken) {
+  size_t errorCount = errors_.size();
+  Token skip;
+  for (;;) {
+    if (!readToken(skip))
+      errors_.resize(errorCount); // discard errors caused by recovery
+    if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream)
+      break;
+  }
+  errors_.resize(errorCount);
+  return false;
+}
+
+bool OurReader::addErrorAndRecover(const String& message,
+                                   Token& token,
+                                   TokenType skipUntilToken) {
+  addError(message, token);
+  return recoverFromError(skipUntilToken);
+}
+
+Value& OurReader::currentValue() { return *(nodes_.top()); }
+
+OurReader::Char OurReader::getNextChar() {
+  if (current_ == end_)
+    return 0;
+  return *current_++;
+}
+
+void OurReader::getLocationLineAndColumn(Location location,
+                                         int& line,
+                                         int& column) const {
+  Location current = begin_;
+  Location lastLineStart = current;
+  line = 0;
+  while (current < location && current != end_) {
+    Char c = *current++;
+    if (c == '\r') {
+      if (*current == '\n')
+        ++current;
+      lastLineStart = current;
+      ++line;
+    } else if (c == '\n') {
+      lastLineStart = current;
+      ++line;
+    }
+  }
+  // column & line start at 1
+  column = int(location - lastLineStart) + 1;
+  ++line;
+}
+
+String OurReader::getLocationLineAndColumn(Location location) const {
+  int line, column;
+  getLocationLineAndColumn(location, line, column);
+  char buffer[18 + 16 + 16 + 1];
+  jsoncpp_snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
+  return buffer;
+}
+
+String OurReader::getFormattedErrorMessages() const {
+  String formattedMessage;
+  for (const auto& error : errors_) {
+    formattedMessage +=
+        "* " + getLocationLineAndColumn(error.token_.start_) + "\n";
+    formattedMessage += "  " + error.message_ + "\n";
+    if (error.extra_)
+      formattedMessage +=
+          "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
+  }
+  return formattedMessage;
+}
+
+std::vector<OurReader::StructuredError> OurReader::getStructuredErrors() const {
+  std::vector<OurReader::StructuredError> allErrors;
+  for (const auto& error : errors_) {
+    OurReader::StructuredError structured;
+    structured.offset_start = error.token_.start_ - begin_;
+    structured.offset_limit = error.token_.end_ - begin_;
+    structured.message = error.message_;
+    allErrors.push_back(structured);
+  }
+  return allErrors;
+}
+
+bool OurReader::pushError(const Value& value, const String& message) {
+  ptrdiff_t length = end_ - begin_;
+  if (value.getOffsetStart() > length || value.getOffsetLimit() > length)
+    return false;
+  Token token;
+  token.type_ = tokenError;
+  token.start_ = begin_ + value.getOffsetStart();
+  token.end_ = begin_ + value.getOffsetLimit();
+  ErrorInfo info;
+  info.token_ = token;
+  info.message_ = message;
+  info.extra_ = nullptr;
+  errors_.push_back(info);
+  return true;
+}
+
+bool OurReader::pushError(const Value& value,
+                          const String& message,
+                          const Value& extra) {
+  ptrdiff_t length = end_ - begin_;
+  if (value.getOffsetStart() > length || value.getOffsetLimit() > length ||
+      extra.getOffsetLimit() > length)
+    return false;
+  Token token;
+  token.type_ = tokenError;
+  token.start_ = begin_ + value.getOffsetStart();
+  token.end_ = begin_ + value.getOffsetLimit();
+  ErrorInfo info;
+  info.token_ = token;
+  info.message_ = message;
+  info.extra_ = begin_ + extra.getOffsetStart();
+  errors_.push_back(info);
+  return true;
+}
+
+bool OurReader::good() const { return errors_.empty(); }
+
+class OurCharReader : public CharReader {
+  bool const collectComments_;
+  OurReader reader_;
+
+public:
+  OurCharReader(bool collectComments, OurFeatures const& features)
+      : collectComments_(collectComments), reader_(features) {}
+  bool parse(char const* beginDoc,
+             char const* endDoc,
+             Value* root,
+             String* errs) override {
+    bool ok = reader_.parse(beginDoc, endDoc, *root, collectComments_);
+    if (errs) {
+      *errs = reader_.getFormattedErrorMessages();
+    }
+    return ok;
+  }
+};
+
+CharReaderBuilder::CharReaderBuilder() { setDefaults(&settings_); }
+CharReaderBuilder::~CharReaderBuilder() = default;
+CharReader* CharReaderBuilder::newCharReader() const {
+  bool collectComments = settings_["collectComments"].asBool();
+  OurFeatures features = OurFeatures::all();
+  features.allowComments_ = settings_["allowComments"].asBool();
+  features.strictRoot_ = settings_["strictRoot"].asBool();
+  features.allowDroppedNullPlaceholders_ =
+      settings_["allowDroppedNullPlaceholders"].asBool();
+  features.allowNumericKeys_ = settings_["allowNumericKeys"].asBool();
+  features.allowSingleQuotes_ = settings_["allowSingleQuotes"].asBool();
+
+  // Stack limit is always a size_t, so we get this as an unsigned int
+  // regardless of it we have 64-bit integer support enabled.
+  features.stackLimit_ = static_cast<size_t>(settings_["stackLimit"].asUInt());
+  features.failIfExtra_ = settings_["failIfExtra"].asBool();
+  features.rejectDupKeys_ = settings_["rejectDupKeys"].asBool();
+  features.allowSpecialFloats_ = settings_["allowSpecialFloats"].asBool();
+  return new OurCharReader(collectComments, features);
+}
+static void getValidReaderKeys(std::set<String>* valid_keys) {
+  valid_keys->clear();
+  valid_keys->insert("collectComments");
+  valid_keys->insert("allowComments");
+  valid_keys->insert("strictRoot");
+  valid_keys->insert("allowDroppedNullPlaceholders");
+  valid_keys->insert("allowNumericKeys");
+  valid_keys->insert("allowSingleQuotes");
+  valid_keys->insert("stackLimit");
+  valid_keys->insert("failIfExtra");
+  valid_keys->insert("rejectDupKeys");
+  valid_keys->insert("allowSpecialFloats");
+}
+bool CharReaderBuilder::validate(Json::Value* invalid) const {
+  Json::Value my_invalid;
+  if (!invalid)
+    invalid = &my_invalid; // so we do not need to test for NULL
+  Json::Value& inv = *invalid;
+  std::set<String> valid_keys;
+  getValidReaderKeys(&valid_keys);
+  Value::Members keys = settings_.getMemberNames();
+  size_t n = keys.size();
+  for (size_t i = 0; i < n; ++i) {
+    String const& key = keys[i];
+    if (valid_keys.find(key) == valid_keys.end()) {
+      inv[key] = settings_[key];
+    }
+  }
+  return inv.empty();
+}
+Value& CharReaderBuilder::operator[](const String& key) {
+  return settings_[key];
+}
+// static
+void CharReaderBuilder::strictMode(Json::Value* settings) {
+  //! [CharReaderBuilderStrictMode]
+  (*settings)["allowComments"] = false;
+  (*settings)["strictRoot"] = true;
+  (*settings)["allowDroppedNullPlaceholders"] = false;
+  (*settings)["allowNumericKeys"] = false;
+  (*settings)["allowSingleQuotes"] = false;
+  (*settings)["stackLimit"] = 1000;
+  (*settings)["failIfExtra"] = true;
+  (*settings)["rejectDupKeys"] = true;
+  (*settings)["allowSpecialFloats"] = false;
+  //! [CharReaderBuilderStrictMode]
+}
+// static
+void CharReaderBuilder::setDefaults(Json::Value* settings) {
+  //! [CharReaderBuilderDefaults]
+  (*settings)["collectComments"] = true;
+  (*settings)["allowComments"] = true;
+  (*settings)["strictRoot"] = false;
+  (*settings)["allowDroppedNullPlaceholders"] = false;
+  (*settings)["allowNumericKeys"] = false;
+  (*settings)["allowSingleQuotes"] = false;
+  (*settings)["stackLimit"] = 1000;
+  (*settings)["failIfExtra"] = false;
+  (*settings)["rejectDupKeys"] = false;
+  (*settings)["allowSpecialFloats"] = false;
+  //! [CharReaderBuilderDefaults]
+}
+
+//////////////////////////////////
+// global functions
+
+bool parseFromStream(CharReader::Factory const& fact,
+                     IStream& sin,
+                     Value* root,
+                     String* errs) {
+  OStringStream ssin;
+  ssin << sin.rdbuf();
+  String doc = ssin.str();
+  char const* begin = doc.data();
+  char const* end = begin + doc.size();
+  // Note that we do not actually need a null-terminator.
+  CharReaderPtr const reader(fact.newCharReader());
+  return reader->parse(begin, end, root, errs);
+}
+
+IStream& operator>>(IStream& sin, Value& root) {
+  CharReaderBuilder b;
+  String errs;
+  bool ok = parseFromStream(b, sin, &root, &errs);
+  if (!ok) {
+    throwRuntimeError(errs);
+  }
+  return sin;
+}
 
 } // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_tool.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_tool.h
index 658031b..5c13f1f 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_tool.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_tool.h
@@ -1,10 +1,23 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef LIB_JSONCPP_JSON_TOOL_H_INCLUDED
-# define LIB_JSONCPP_JSON_TOOL_H_INCLUDED
+#define LIB_JSONCPP_JSON_TOOL_H_INCLUDED
+
+#if !defined(JSON_IS_AMALGAMATION)
+#include <json/config.h>
+#endif
+
+// Also support old flag NO_LOCALE_SUPPORT
+#ifdef NO_LOCALE_SUPPORT
+#define JSONCPP_NO_LOCALE_SUPPORT
+#endif
+
+#ifndef JSONCPP_NO_LOCALE_SUPPORT
+#include <clocale>
+#endif
 
 /* This header provides common string manipulation support, such as UTF-8,
  * portable conversion from/to string...
@@ -13,81 +26,109 @@
  */
 
 namespace Json {
+static inline char getDecimalPoint() {
+#ifdef JSONCPP_NO_LOCALE_SUPPORT
+  return '\0';
+#else
+  struct lconv* lc = localeconv();
+  return lc ? *(lc->decimal_point) : '\0';
+#endif
+}
 
 /// Converts a unicode code-point to UTF-8.
-static inline std::string 
-codePointToUTF8(unsigned int cp)
-{
-   std::string result;
-   
-   // based on description from http://en.wikipedia.org/wiki/UTF-8
+static inline String codePointToUTF8(unsigned int cp) {
+  String result;
 
-   if (cp <= 0x7f) 
-   {
-      result.resize(1);
-      result[0] = static_cast<char>(cp);
-   } 
-   else if (cp <= 0x7FF) 
-   {
-      result.resize(2);
-      result[1] = static_cast<char>(0x80 | (0x3f & cp));
-      result[0] = static_cast<char>(0xC0 | (0x1f & (cp >> 6)));
-   } 
-   else if (cp <= 0xFFFF) 
-   {
-      result.resize(3);
-      result[2] = static_cast<char>(0x80 | (0x3f & cp));
-      result[1] = 0x80 | static_cast<char>((0x3f & (cp >> 6)));
-      result[0] = 0xE0 | static_cast<char>((0xf & (cp >> 12)));
-   }
-   else if (cp <= 0x10FFFF) 
-   {
-      result.resize(4);
-      result[3] = static_cast<char>(0x80 | (0x3f & cp));
-      result[2] = static_cast<char>(0x80 | (0x3f & (cp >> 6)));
-      result[1] = static_cast<char>(0x80 | (0x3f & (cp >> 12)));
-      result[0] = static_cast<char>(0xF0 | (0x7 & (cp >> 18)));
-   }
+  // based on description from http://en.wikipedia.org/wiki/UTF-8
 
-   return result;
+  if (cp <= 0x7f) {
+    result.resize(1);
+    result[0] = static_cast<char>(cp);
+  } else if (cp <= 0x7FF) {
+    result.resize(2);
+    result[1] = static_cast<char>(0x80 | (0x3f & cp));
+    result[0] = static_cast<char>(0xC0 | (0x1f & (cp >> 6)));
+  } else if (cp <= 0xFFFF) {
+    result.resize(3);
+    result[2] = static_cast<char>(0x80 | (0x3f & cp));
+    result[1] = static_cast<char>(0x80 | (0x3f & (cp >> 6)));
+    result[0] = static_cast<char>(0xE0 | (0xf & (cp >> 12)));
+  } else if (cp <= 0x10FFFF) {
+    result.resize(4);
+    result[3] = static_cast<char>(0x80 | (0x3f & cp));
+    result[2] = static_cast<char>(0x80 | (0x3f & (cp >> 6)));
+    result[1] = static_cast<char>(0x80 | (0x3f & (cp >> 12)));
+    result[0] = static_cast<char>(0xF0 | (0x7 & (cp >> 18)));
+  }
+
+  return result;
 }
 
-
-/// Returns true if ch is a control character (in range [0,32[).
-static inline bool 
-isControlCharacter(char ch)
-{
-   return ch > 0 && ch <= 0x1F;
-}
-
-
-enum { 
-   /// Constant that specify the size of the buffer that must be passed to uintToString.
-   uintToStringBufferSize = 3*sizeof(LargestUInt)+1 
+enum {
+  /// Constant that specify the size of the buffer that must be passed to
+  /// uintToString.
+  uintToStringBufferSize = 3 * sizeof(LargestUInt) + 1
 };
 
 // Defines a char buffer for use with uintToString().
 typedef char UIntToStringBuffer[uintToStringBufferSize];
 
-
 /** Converts an unsigned integer to string.
- * @param value Unsigned interger to convert to string
- * @param current Input/Output string buffer. 
+ * @param value Unsigned integer to convert to string
+ * @param current Input/Output string buffer.
  *        Must have at least uintToStringBufferSize chars free.
  */
-static inline void 
-uintToString( LargestUInt value, 
-              char *&current )
-{
-   *--current = 0;
-   do
-   {
-      *--current = char(value % 10) + '0';
-      value /= 10;
-   }
-   while ( value != 0 );
+static inline void uintToString(LargestUInt value, char*& current) {
+  *--current = 0;
+  do {
+    *--current = static_cast<char>(value % 10U + static_cast<unsigned>('0'));
+    value /= 10;
+  } while (value != 0);
 }
 
-} // namespace Json {
+/** Change ',' to '.' everywhere in buffer.
+ *
+ * We had a sophisticated way, but it did not work in WinCE.
+ * @see https://github.com/open-source-parsers/jsoncpp/pull/9
+ */
+template <typename Iter> Iter fixNumericLocale(Iter begin, Iter end) {
+  for (; begin != end; ++begin) {
+    if (*begin == ',') {
+      *begin = '.';
+    }
+  }
+  return begin;
+}
+
+template <typename Iter> void fixNumericLocaleInput(Iter begin, Iter end) {
+  char decimalPoint = getDecimalPoint();
+  if (decimalPoint == '\0' || decimalPoint == '.') {
+    return;
+  }
+  for (; begin != end; ++begin) {
+    if (*begin == '.') {
+      *begin = decimalPoint;
+    }
+  }
+}
+
+/**
+ * Return iterator that would be the new end of the range [begin,end), if we
+ * were to delete zeros in the end of string, but not the last zero before '.'.
+ */
+template <typename Iter> Iter fixZerosInTheEnd(Iter begin, Iter end) {
+  for (; begin != end; --end) {
+    if (*(end - 1) != '0') {
+      return end;
+    }
+    // Don't delete the last zero before the decimal point.
+    if (begin != (end - 1) && *(end - 2) == '.') {
+      return end;
+    }
+  }
+  return end;
+}
+
+} // namespace Json
 
 #endif // LIB_JSONCPP_JSON_TOOL_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_value.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_value.cpp
index 91f312e..d813c16 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_value.cpp
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_value.cpp
@@ -1,76 +1,135 @@
-// Copyright 2011 Baptiste Lepilleur
+// Copyright 2011 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include <json/assertions.h>
-# include <json/value.h>
-# include <json/writer.h>
-# ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
-#  include "json_batchallocator.h"
-# endif // #ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR
+#include <json/assertions.h>
+#include <json/value.h>
+#include <json/writer.h>
 #endif // if !defined(JSON_IS_AMALGAMATION)
-#include <math.h>
+#include <cassert>
+#include <cmath>
+#include <cstring>
 #include <sstream>
 #include <utility>
-#include <stdexcept>
-#include <cstring>
-#include <cassert>
 #ifdef JSON_USE_CPPTL
-# include <cpptl/conststring.h>
+#include <cpptl/conststring.h>
 #endif
-#include <cstddef>    // size_t
+#include <algorithm> // min()
+#include <cstddef>   // size_t
 
-#define JSON_ASSERT_UNREACHABLE assert( false )
+// Provide implementation equivalent of std::snprintf for older _MSC compilers
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#include <stdarg.h>
+static int msvc_pre1900_c99_vsnprintf(char* outBuf,
+                                      size_t size,
+                                      const char* format,
+                                      va_list ap) {
+  int count = -1;
+  if (size != 0)
+    count = _vsnprintf_s(outBuf, size, _TRUNCATE, format, ap);
+  if (count == -1)
+    count = _vscprintf(format, ap);
+  return count;
+}
+
+int JSON_API msvc_pre1900_c99_snprintf(char* outBuf,
+                                       size_t size,
+                                       const char* format,
+                                       ...) {
+  va_list ap;
+  va_start(ap, format);
+  const int count = msvc_pre1900_c99_vsnprintf(outBuf, size, format, ap);
+  va_end(ap);
+  return count;
+}
+#endif
+
+// Disable warning C4702 : unreachable code
+#if defined(_MSC_VER)
+#pragma warning(disable : 4702)
+#endif
+
+#define JSON_ASSERT_UNREACHABLE assert(false)
 
 namespace Json {
+template <typename T>
+static std::unique_ptr<T> cloneUnique(const std::unique_ptr<T>& p) {
+  std::unique_ptr<T> r;
+  if (p) {
+    r = std::unique_ptr<T>(new T(*p));
+  }
+  return r;
+}
 
-const Value Value::null;
-const Int Value::minInt = Int( ~(UInt(-1)/2) );
-const Int Value::maxInt = Int( UInt(-1)/2 );
+// This is a walkaround to avoid the static initialization of Value::null.
+// kNull must be word-aligned to avoid crashing on ARM.  We use an alignment of
+// 8 (instead of 4) as a bit of future-proofing.
+#if defined(__ARMEL__)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#else
+#define ALIGNAS(byte_alignment)
+#endif
+
+// static
+Value const& Value::nullSingleton() {
+  static Value const nullStatic;
+  return nullStatic;
+}
+
+#if JSON_USE_NULLREF
+// for backwards compatibility, we'll leave these global references around, but
+// DO NOT use them in JSONCPP library code any more!
+// static
+Value const& Value::null = Value::nullSingleton();
+
+// static
+Value const& Value::nullRef = Value::nullSingleton();
+#endif
+
+const Int Value::minInt = Int(~(UInt(-1) / 2));
+const Int Value::maxInt = Int(UInt(-1) / 2);
 const UInt Value::maxUInt = UInt(-1);
-# if defined(JSON_HAS_INT64)
-const Int64 Value::minInt64 = Int64( ~(UInt64(-1)/2) );
-const Int64 Value::maxInt64 = Int64( UInt64(-1)/2 );
+#if defined(JSON_HAS_INT64)
+const Int64 Value::minInt64 = Int64(~(UInt64(-1) / 2));
+const Int64 Value::maxInt64 = Int64(UInt64(-1) / 2);
 const UInt64 Value::maxUInt64 = UInt64(-1);
 // The constant is hard-coded because some compiler have trouble
 // converting Value::maxUInt64 to a double correctly (AIX/xlC).
 // Assumes that UInt64 is a 64 bits integer.
 static const double maxUInt64AsDouble = 18446744073709551615.0;
 #endif // defined(JSON_HAS_INT64)
-const LargestInt Value::minLargestInt = LargestInt( ~(LargestUInt(-1)/2) );
-const LargestInt Value::maxLargestInt = LargestInt( LargestUInt(-1)/2 );
+const LargestInt Value::minLargestInt = LargestInt(~(LargestUInt(-1) / 2));
+const LargestInt Value::maxLargestInt = LargestInt(LargestUInt(-1) / 2);
 const LargestUInt Value::maxLargestUInt = LargestUInt(-1);
 
-
-/// Unknown size marker
-static const unsigned int unknown = (unsigned)-1;
+const UInt Value::defaultRealPrecision = 17;
 
 #if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
 template <typename T, typename U>
 static inline bool InRange(double d, T min, U max) {
-   return d >= min && d <= max;
+  // The casts can lose precision, but we are looking only for
+  // an approximate range. Might fail on edge cases though. ~cdunn
+  // return d >= static_cast<double>(min) && d <= static_cast<double>(max);
+  return d >= min && d <= max;
 }
-#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-static inline double integerToDouble( Json::UInt64 value )
-{
-    return static_cast<double>( Int64(value/2) ) * 2.0 + Int64(value & 1);
+#else  // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+static inline double integerToDouble(Json::UInt64 value) {
+  return static_cast<double>(Int64(value / 2)) * 2.0 +
+         static_cast<double>(Int64(value & 1));
 }
 
-template<typename T>
-static inline double integerToDouble( T value )
-{
-    return static_cast<double>( value );
+template <typename T> static inline double integerToDouble(T value) {
+  return static_cast<double>(value);
 }
 
 template <typename T, typename U>
 static inline bool InRange(double d, T min, U max) {
-   return d >= integerToDouble(min) && d <= integerToDouble(max);
+  return d >= integerToDouble(min) && d <= integerToDouble(max);
 }
 #endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
 
-
 /** Duplicates the specified string value.
  * @param value Pointer to the string to duplicate. Must be zero-terminated if
  *              length is "unknown".
@@ -78,38 +137,81 @@
  *               computed using strlen(value).
  * @return Pointer on the duplicate instance of string.
  */
-static inline char *
-duplicateStringValue( const char *value, 
-                      unsigned int length = unknown )
-{
-   if ( length == unknown )
-      length = (unsigned int)strlen(value);
+static inline char* duplicateStringValue(const char* value, size_t length) {
+  // Avoid an integer overflow in the call to malloc below by limiting length
+  // to a sane value.
+  if (length >= static_cast<size_t>(Value::maxInt))
+    length = Value::maxInt - 1;
 
-   // Avoid an integer overflow in the call to malloc below by limiting length
-   // to a sane value.
-   if (length >= (unsigned)Value::maxInt)
-      length = Value::maxInt - 1;
-
-   char *newString = static_cast<char *>( malloc( length + 1 ) );
-   JSON_ASSERT_MESSAGE( newString != 0, "Failed to allocate string value buffer" );
-   memcpy( newString, value, length );
-   newString[length] = 0;
-   return newString;
+  char* newString = static_cast<char*>(malloc(length + 1));
+  if (newString == nullptr) {
+    throwRuntimeError("in Json::Value::duplicateStringValue(): "
+                      "Failed to allocate string value buffer");
+  }
+  memcpy(newString, value, length);
+  newString[length] = 0;
+  return newString;
 }
 
-
-/** Free the string duplicated by duplicateStringValue().
+/* Record the length as a prefix.
  */
-static inline void 
-releaseStringValue( char *value )
-{
-   if ( value )
-      free( value );
+static inline char* duplicateAndPrefixStringValue(const char* value,
+                                                  unsigned int length) {
+  // Avoid an integer overflow in the call to malloc below by limiting length
+  // to a sane value.
+  JSON_ASSERT_MESSAGE(length <= static_cast<unsigned>(Value::maxInt) -
+                                    sizeof(unsigned) - 1U,
+                      "in Json::Value::duplicateAndPrefixStringValue(): "
+                      "length too big for prefixing");
+  unsigned actualLength = length + static_cast<unsigned>(sizeof(unsigned)) + 1U;
+  char* newString = static_cast<char*>(malloc(actualLength));
+  if (newString == nullptr) {
+    throwRuntimeError("in Json::Value::duplicateAndPrefixStringValue(): "
+                      "Failed to allocate string value buffer");
+  }
+  *reinterpret_cast<unsigned*>(newString) = length;
+  memcpy(newString + sizeof(unsigned), value, length);
+  newString[actualLength - 1U] =
+      0; // to avoid buffer over-run accidents by users later
+  return newString;
 }
+inline static void decodePrefixedString(bool isPrefixed,
+                                        char const* prefixed,
+                                        unsigned* length,
+                                        char const** value) {
+  if (!isPrefixed) {
+    *length = static_cast<unsigned>(strlen(prefixed));
+    *value = prefixed;
+  } else {
+    *length = *reinterpret_cast<unsigned const*>(prefixed);
+    *value = prefixed + sizeof(unsigned);
+  }
+}
+/** Free the string duplicated by
+ * duplicateStringValue()/duplicateAndPrefixStringValue().
+ */
+#if JSONCPP_USING_SECURE_MEMORY
+static inline void releasePrefixedStringValue(char* value) {
+  unsigned length = 0;
+  char const* valueDecoded;
+  decodePrefixedString(true, value, &length, &valueDecoded);
+  size_t const size = sizeof(unsigned) + length + 1U;
+  memset(value, 0, size);
+  free(value);
+}
+static inline void releaseStringValue(char* value, unsigned length) {
+  // length==0 => we allocated the strings memory
+  size_t size = (length == 0) ? strlen(value) : length;
+  memset(value, 0, size);
+  free(value);
+}
+#else  // !JSONCPP_USING_SECURE_MEMORY
+static inline void releasePrefixedStringValue(char* value) { free(value); }
+static inline void releaseStringValue(char* value, unsigned) { free(value); }
+#endif // JSONCPP_USING_SECURE_MEMORY
 
 } // namespace Json
 
-
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
@@ -118,48 +220,28 @@
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 #if !defined(JSON_IS_AMALGAMATION)
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-#  include "json_internalarray.inl"
-#  include "json_internalmap.inl"
-# endif // JSON_VALUE_USE_INTERNAL_MAP
 
-# include "json_valueiterator.inl"
+#include "json_valueiterator.inl"
 #endif // if !defined(JSON_IS_AMALGAMATION)
 
 namespace Json {
 
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// class Value::CommentInfo
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-// //////////////////////////////////////////////////////////////////
-
-
-Value::CommentInfo::CommentInfo()
-   : comment_( 0 )
-{
+#if JSON_USE_EXCEPTION
+Exception::Exception(String msg) : msg_(std::move(msg)) {}
+Exception::~Exception() JSONCPP_NOEXCEPT {}
+char const* Exception::what() const JSONCPP_NOEXCEPT { return msg_.c_str(); }
+RuntimeError::RuntimeError(String const& msg) : Exception(msg) {}
+LogicError::LogicError(String const& msg) : Exception(msg) {}
+[[noreturn]] void throwRuntimeError(String const& msg) {
+  throw RuntimeError(msg);
 }
-
-Value::CommentInfo::~CommentInfo()
-{
-   if ( comment_ )
-      releaseStringValue( comment_ );
+[[noreturn]] void throwLogicError(String const& msg) {
+  throw LogicError(msg);
 }
-
-
-void 
-Value::CommentInfo::setComment( const char *text )
-{
-   if ( comment_ )
-      releaseStringValue( comment_ );
-   JSON_ASSERT( text != 0 );
-   JSON_ASSERT_MESSAGE( text[0]=='\0' || text[0]=='/', "Comments must start with /");
-   // It seems that /**/ style comments are acceptable as well.
-   comment_ = duplicateStringValue( text );
-}
-
+#else // !JSON_USE_EXCEPTION
+[[noreturn]] void throwRuntimeError(String const& msg) { abort(); }
+[[noreturn]] void throwLogicError(String const& msg) { abort(); }
+#endif
 
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
@@ -168,93 +250,110 @@
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
-# ifndef JSON_VALUE_USE_INTERNAL_MAP
 
-// Notes: index_ indicates if the string was allocated when
+// Notes: policy_ indicates if the string was allocated when
 // a string is stored.
 
-Value::CZString::CZString( ArrayIndex index )
-   : cstr_( 0 )
-   , index_( index )
-{
+Value::CZString::CZString(ArrayIndex index) : cstr_(nullptr), index_(index) {}
+
+Value::CZString::CZString(char const* str,
+                          unsigned length,
+                          DuplicationPolicy allocate)
+    : cstr_(str) {
+  // allocate != duplicate
+  storage_.policy_ = allocate & 0x3;
+  storage_.length_ = length & 0x3FFFFFFF;
 }
 
-Value::CZString::CZString( const char *cstr, DuplicationPolicy allocate )
-   : cstr_( allocate == duplicate ? duplicateStringValue(cstr) 
-                                  : cstr )
-   , index_( allocate )
-{
+Value::CZString::CZString(const CZString& other) {
+  cstr_ = (other.storage_.policy_ != noDuplication && other.cstr_ != nullptr
+               ? duplicateStringValue(other.cstr_, other.storage_.length_)
+               : other.cstr_);
+  storage_.policy_ =
+      static_cast<unsigned>(
+          other.cstr_
+              ? (static_cast<DuplicationPolicy>(other.storage_.policy_) ==
+                         noDuplication
+                     ? noDuplication
+                     : duplicate)
+              : static_cast<DuplicationPolicy>(other.storage_.policy_)) &
+      3U;
+  storage_.length_ = other.storage_.length_;
 }
 
-Value::CZString::CZString( const CZString &other )
-: cstr_( other.index_ != noDuplication &&  other.cstr_ != 0
-                ?  duplicateStringValue( other.cstr_ )
-                : other.cstr_ )
-   , index_( other.cstr_ ? (other.index_ == noDuplication ? noDuplication : duplicate)
-                         : other.index_ )
-{
+Value::CZString::CZString(CZString&& other)
+    : cstr_(other.cstr_), index_(other.index_) {
+  other.cstr_ = nullptr;
 }
 
-Value::CZString::~CZString()
-{
-   if ( cstr_  &&  index_ == duplicate )
-      releaseStringValue( const_cast<char *>( cstr_ ) );
+Value::CZString::~CZString() {
+  if (cstr_ && storage_.policy_ == duplicate) {
+    releaseStringValue(const_cast<char*>(cstr_),
+                       storage_.length_ + 1u); // +1 for null terminating
+                                               // character for sake of
+                                               // completeness but not actually
+                                               // necessary
+  }
 }
 
-void 
-Value::CZString::swap( CZString &other )
-{
-   std::swap( cstr_, other.cstr_ );
-   std::swap( index_, other.index_ );
+void Value::CZString::swap(CZString& other) {
+  std::swap(cstr_, other.cstr_);
+  std::swap(index_, other.index_);
 }
 
-Value::CZString &
-Value::CZString::operator =( const CZString &other )
-{
-   CZString temp( other );
-   swap( temp );
-   return *this;
+Value::CZString& Value::CZString::operator=(const CZString& other) {
+  cstr_ = other.cstr_;
+  index_ = other.index_;
+  return *this;
 }
 
-bool 
-Value::CZString::operator<( const CZString &other ) const 
-{
-   if ( cstr_ )
-      return strcmp( cstr_, other.cstr_ ) < 0;
-   return index_ < other.index_;
+Value::CZString& Value::CZString::operator=(CZString&& other) {
+  cstr_ = other.cstr_;
+  index_ = other.index_;
+  other.cstr_ = nullptr;
+  return *this;
 }
 
-bool 
-Value::CZString::operator==( const CZString &other ) const 
-{
-   if ( cstr_ )
-      return strcmp( cstr_, other.cstr_ ) == 0;
-   return index_ == other.index_;
+bool Value::CZString::operator<(const CZString& other) const {
+  if (!cstr_)
+    return index_ < other.index_;
+  // return strcmp(cstr_, other.cstr_) < 0;
+  // Assume both are strings.
+  unsigned this_len = this->storage_.length_;
+  unsigned other_len = other.storage_.length_;
+  unsigned min_len = std::min<unsigned>(this_len, other_len);
+  JSON_ASSERT(this->cstr_ && other.cstr_);
+  int comp = memcmp(this->cstr_, other.cstr_, min_len);
+  if (comp < 0)
+    return true;
+  if (comp > 0)
+    return false;
+  return (this_len < other_len);
 }
 
-
-ArrayIndex 
-Value::CZString::index() const
-{
-   return index_;
+bool Value::CZString::operator==(const CZString& other) const {
+  if (!cstr_)
+    return index_ == other.index_;
+  // return strcmp(cstr_, other.cstr_) == 0;
+  // Assume both are strings.
+  unsigned this_len = this->storage_.length_;
+  unsigned other_len = other.storage_.length_;
+  if (this_len != other_len)
+    return false;
+  JSON_ASSERT(this->cstr_ && other.cstr_);
+  int comp = memcmp(this->cstr_, other.cstr_, this_len);
+  return comp == 0;
 }
 
+ArrayIndex Value::CZString::index() const { return index_; }
 
-const char *
-Value::CZString::c_str() const
-{
-   return cstr_;
+// const char* Value::CZString::c_str() const { return cstr_; }
+const char* Value::CZString::data() const { return cstr_; }
+unsigned Value::CZString::length() const { return storage_.length_; }
+bool Value::CZString::isStaticString() const {
+  return storage_.policy_ == noDuplication;
 }
 
-bool 
-Value::CZString::isStaticString() const
-{
-   return index_ == noDuplication;
-}
-
-#endif // ifndef JSON_VALUE_USE_INTERNAL_MAP
-
-
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
@@ -267,1085 +366,935 @@
  * memset( this, 0, sizeof(Value) )
  * This optimization is used in ValueInternalMap fast allocator.
  */
-Value::Value( ValueType type )
-   : type_( type )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   switch ( type )
-   {
-   case nullValue:
-      break;
-   case intValue:
-   case uintValue:
-      value_.int_ = 0;
-      break;
-   case realValue:
-      value_.real_ = 0.0;
-      break;
-   case stringValue:
-      value_.string_ = 0;
-      break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      value_.map_ = new ObjectValues();
-      break;
-#else
-   case arrayValue:
-      value_.array_ = arrayAllocator()->newArray();
-      break;
-   case objectValue:
-      value_.map_ = mapAllocator()->newMap();
-      break;
-#endif
-   case booleanValue:
-      value_.bool_ = false;
-      break;
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
+Value::Value(ValueType type) {
+  static char const emptyString[] = "";
+  initBasic(type);
+  switch (type) {
+  case nullValue:
+    break;
+  case intValue:
+  case uintValue:
+    value_.int_ = 0;
+    break;
+  case realValue:
+    value_.real_ = 0.0;
+    break;
+  case stringValue:
+    // allocated_ == false, so this is safe.
+    value_.string_ = const_cast<char*>(static_cast<char const*>(emptyString));
+    break;
+  case arrayValue:
+  case objectValue:
+    value_.map_ = new ObjectValues();
+    break;
+  case booleanValue:
+    value_.bool_ = false;
+    break;
+  default:
+    JSON_ASSERT_UNREACHABLE;
+  }
 }
 
-
-Value::Value( UInt value )
-   : type_( uintValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.uint_ = value;
+Value::Value(Int value) {
+  initBasic(intValue);
+  value_.int_ = value;
 }
 
-Value::Value( Int value )
-   : type_( intValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.int_ = value;
+Value::Value(UInt value) {
+  initBasic(uintValue);
+  value_.uint_ = value;
 }
-
-
-# if defined(JSON_HAS_INT64)
-Value::Value( Int64 value )
-   : type_( intValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.int_ = value;
+#if defined(JSON_HAS_INT64)
+Value::Value(Int64 value) {
+  initBasic(intValue);
+  value_.int_ = value;
 }
-
-
-Value::Value( UInt64 value )
-   : type_( uintValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.uint_ = value;
+Value::Value(UInt64 value) {
+  initBasic(uintValue);
+  value_.uint_ = value;
 }
 #endif // defined(JSON_HAS_INT64)
 
-Value::Value( double value )
-   : type_( realValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.real_ = value;
+Value::Value(double value) {
+  initBasic(realValue);
+  value_.real_ = value;
 }
 
-Value::Value( const char *value )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( value );
+Value::Value(const char* value) {
+  initBasic(stringValue, true);
+  JSON_ASSERT_MESSAGE(value != nullptr,
+                      "Null Value Passed to Value Constructor");
+  value_.string_ = duplicateAndPrefixStringValue(
+      value, static_cast<unsigned>(strlen(value)));
 }
 
-
-Value::Value( const char *beginValue, 
-              const char *endValue )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( beginValue, 
-                                          (unsigned int)(endValue - beginValue) );
+Value::Value(const char* begin, const char* end) {
+  initBasic(stringValue, true);
+  value_.string_ =
+      duplicateAndPrefixStringValue(begin, static_cast<unsigned>(end - begin));
 }
 
-
-Value::Value( const std::string &value )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( value.c_str(), 
-                                          (unsigned int)value.length() );
-
+Value::Value(const String& value) {
+  initBasic(stringValue, true);
+  value_.string_ = duplicateAndPrefixStringValue(
+      value.data(), static_cast<unsigned>(value.length()));
 }
 
-Value::Value( const StaticString &value )
-   : type_( stringValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = const_cast<char *>( value.c_str() );
+Value::Value(const StaticString& value) {
+  initBasic(stringValue);
+  value_.string_ = const_cast<char*>(value.c_str());
 }
 
-
-# ifdef JSON_USE_CPPTL
-Value::Value( const CppTL::ConstString &value )
-   : type_( stringValue )
-   , allocated_( true )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   value_.string_ = duplicateStringValue( value, value.length() );
+#ifdef JSON_USE_CPPTL
+Value::Value(const CppTL::ConstString& value) {
+  initBasic(stringValue, true);
+  value_.string_ = duplicateAndPrefixStringValue(
+      value, static_cast<unsigned>(value.length()));
 }
-# endif
-
-Value::Value( bool value )
-   : type_( booleanValue )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
 #endif
-   , comments_( 0 )
-{
-   value_.bool_ = value;
+
+Value::Value(bool value) {
+  initBasic(booleanValue);
+  value_.bool_ = value;
 }
 
+Value::Value(const Value& other) {
+  dupPayload(other);
+  dupMeta(other);
+}
 
-Value::Value( const Value &other )
-   : type_( other.type_ )
-   , allocated_( false )
-# ifdef JSON_VALUE_USE_INTERNAL_MAP
-   , itemIsUsed_( 0 )
-#endif
-   , comments_( 0 )
-{
-   switch ( type_ )
-   {
-   case nullValue:
-   case intValue:
-   case uintValue:
-   case realValue:
-   case booleanValue:
-      value_ = other.value_;
-      break;
-   case stringValue:
-      if ( other.value_.string_ )
-      {
-         value_.string_ = duplicateStringValue( other.value_.string_ );
-         allocated_ = true;
-      }
+Value::Value(Value&& other) {
+  initBasic(nullValue);
+  swap(other);
+}
+
+Value::~Value() {
+  releasePayload();
+  value_.uint_ = 0;
+}
+
+Value& Value::operator=(const Value& other) {
+  Value(other).swap(*this);
+  return *this;
+}
+
+Value& Value::operator=(Value&& other) {
+  other.swap(*this);
+  return *this;
+}
+
+void Value::swapPayload(Value& other) {
+  std::swap(bits_, other.bits_);
+  std::swap(value_, other.value_);
+}
+
+void Value::copyPayload(const Value& other) {
+  releasePayload();
+  dupPayload(other);
+}
+
+void Value::swap(Value& other) {
+  swapPayload(other);
+  std::swap(comments_, other.comments_);
+  std::swap(start_, other.start_);
+  std::swap(limit_, other.limit_);
+}
+
+void Value::copy(const Value& other) {
+  copyPayload(other);
+  dupMeta(other);
+}
+
+ValueType Value::type() const {
+  return static_cast<ValueType>(bits_.value_type_);
+}
+
+int Value::compare(const Value& other) const {
+  if (*this < other)
+    return -1;
+  if (*this > other)
+    return 1;
+  return 0;
+}
+
+bool Value::operator<(const Value& other) const {
+  int typeDelta = type() - other.type();
+  if (typeDelta)
+    return typeDelta < 0 ? true : false;
+  switch (type()) {
+  case nullValue:
+    return false;
+  case intValue:
+    return value_.int_ < other.value_.int_;
+  case uintValue:
+    return value_.uint_ < other.value_.uint_;
+  case realValue:
+    return value_.real_ < other.value_.real_;
+  case booleanValue:
+    return value_.bool_ < other.value_.bool_;
+  case stringValue: {
+    if ((value_.string_ == nullptr) || (other.value_.string_ == nullptr)) {
+      if (other.value_.string_)
+        return true;
       else
-         value_.string_ = 0;
-      break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      value_.map_ = new ObjectValues( *other.value_.map_ );
-      break;
-#else
-   case arrayValue:
-      value_.array_ = arrayAllocator()->newArrayCopy( *other.value_.array_ );
-      break;
-   case objectValue:
-      value_.map_ = mapAllocator()->newMapCopy( *other.value_.map_ );
-      break;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-   if ( other.comments_ )
-   {
-      comments_ = new CommentInfo[numberOfCommentPlacement];
-      for ( int comment =0; comment < numberOfCommentPlacement; ++comment )
-      {
-         const CommentInfo &otherComment = other.comments_[comment];
-         if ( otherComment.comment_ )
-            comments_[comment].setComment( otherComment.comment_ );
-      }
-   }
-}
-
-
-Value::~Value()
-{
-   switch ( type_ )
-   {
-   case nullValue:
-   case intValue:
-   case uintValue:
-   case realValue:
-   case booleanValue:
-      break;
-   case stringValue:
-      if ( allocated_ )
-         releaseStringValue( value_.string_ );
-      break;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      delete value_.map_;
-      break;
-#else
-   case arrayValue:
-      arrayAllocator()->destructArray( value_.array_ );
-      break;
-   case objectValue:
-      mapAllocator()->destructMap( value_.map_ );
-      break;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-
-   if ( comments_ )
-      delete[] comments_;
-}
-
-Value &
-Value::operator=( const Value &other )
-{
-   Value temp( other );
-   swap( temp );
-   return *this;
-}
-
-void 
-Value::swap( Value &other )
-{
-   ValueType temp = type_;
-   type_ = other.type_;
-   other.type_ = temp;
-   std::swap( value_, other.value_ );
-   int temp2 = allocated_;
-   allocated_ = other.allocated_;
-   other.allocated_ = temp2;
-}
-
-ValueType 
-Value::type() const
-{
-   return type_;
-}
-
-
-int 
-Value::compare( const Value &other ) const
-{
-   if ( *this < other )
-      return -1;
-   if ( *this > other )
-      return 1;
-   return 0;
-}
-
-
-bool 
-Value::operator <( const Value &other ) const
-{
-   int typeDelta = type_ - other.type_;
-   if ( typeDelta )
-      return typeDelta < 0 ? true : false;
-   switch ( type_ )
-   {
-   case nullValue:
-      return false;
-   case intValue:
-      return value_.int_ < other.value_.int_;
-   case uintValue:
-      return value_.uint_ < other.value_.uint_;
-   case realValue:
-      return value_.real_ < other.value_.real_;
-   case booleanValue:
-      return value_.bool_ < other.value_.bool_;
-   case stringValue:
-      return ( value_.string_ == 0  &&  other.value_.string_ )
-             || ( other.value_.string_  
-                  &&  value_.string_  
-                  && strcmp( value_.string_, other.value_.string_ ) < 0 );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      {
-         int delta = int( value_.map_->size() - other.value_.map_->size() );
-         if ( delta )
-            return delta < 0;
-         return (*value_.map_) < (*other.value_.map_);
-      }
-#else
-   case arrayValue:
-      return value_.array_->compare( *(other.value_.array_) ) < 0;
-   case objectValue:
-      return value_.map_->compare( *(other.value_.map_) ) < 0;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-   return false;  // unreachable
-}
-
-bool 
-Value::operator <=( const Value &other ) const
-{
-   return !(other < *this);
-}
-
-bool 
-Value::operator >=( const Value &other ) const
-{
-   return !(*this < other);
-}
-
-bool 
-Value::operator >( const Value &other ) const
-{
-   return other < *this;
-}
-
-bool 
-Value::operator ==( const Value &other ) const
-{
-   //if ( type_ != other.type_ )
-   // GCC 2.95.3 says:
-   // attempt to take address of bit-field structure member `Json::Value::type_'
-   // Beats me, but a temp solves the problem.
-   int temp = other.type_;
-   if ( type_ != temp )
-      return false;
-   switch ( type_ )
-   {
-   case nullValue:
+        return false;
+    }
+    unsigned this_len;
+    unsigned other_len;
+    char const* this_str;
+    char const* other_str;
+    decodePrefixedString(this->isAllocated(), this->value_.string_, &this_len,
+                         &this_str);
+    decodePrefixedString(other.isAllocated(), other.value_.string_, &other_len,
+                         &other_str);
+    unsigned min_len = std::min<unsigned>(this_len, other_len);
+    JSON_ASSERT(this_str && other_str);
+    int comp = memcmp(this_str, other_str, min_len);
+    if (comp < 0)
       return true;
-   case intValue:
-      return value_.int_ == other.value_.int_;
-   case uintValue:
-      return value_.uint_ == other.value_.uint_;
-   case realValue:
-      return value_.real_ == other.value_.real_;
-   case booleanValue:
-      return value_.bool_ == other.value_.bool_;
-   case stringValue:
-      return ( value_.string_ == other.value_.string_ )
-             || ( other.value_.string_  
-                  &&  value_.string_  
-                  && strcmp( value_.string_, other.value_.string_ ) == 0 );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      return value_.map_->size() == other.value_.map_->size()
-             && (*value_.map_) == (*other.value_.map_);
-#else
-   case arrayValue:
-      return value_.array_->compare( *(other.value_.array_) ) == 0;
-   case objectValue:
-      return value_.map_->compare( *(other.value_.map_) ) == 0;
-#endif
-   default:
-      JSON_ASSERT_UNREACHABLE;
-   }
-   return false;  // unreachable
-}
-
-bool 
-Value::operator !=( const Value &other ) const
-{
-   return !( *this == other );
-}
-
-const char *
-Value::asCString() const
-{
-   JSON_ASSERT( type_ == stringValue );
-   return value_.string_;
-}
-
-
-std::string 
-Value::asString() const
-{
-   switch ( type_ )
-   {
-   case nullValue:
-      return "";
-   case stringValue:
-      return value_.string_ ? value_.string_ : "";
-   case booleanValue:
-      return value_.bool_ ? "true" : "false";
-   case intValue:
-      return valueToString( value_.int_ );
-   case uintValue:
-      return valueToString( value_.uint_ );
-   case realValue:
-      return valueToString( value_.real_ );
-   default:
-      JSON_FAIL_MESSAGE( "Type is not convertible to string" );
-   }
-}
-
-# ifdef JSON_USE_CPPTL
-CppTL::ConstString 
-Value::asConstString() const
-{
-   return CppTL::ConstString( asString().c_str() );
-}
-# endif
-
-
-Value::Int 
-Value::asInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      JSON_ASSERT_MESSAGE(isInt(), "LargestInt out of Int range");
-      return Int(value_.int_);
-   case uintValue:
-      JSON_ASSERT_MESSAGE(isInt(), "LargestUInt out of Int range");
-      return Int(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt, maxInt), "double out of Int range");
-      return Int(value_.real_);
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to Int.");
-}
-
-
-Value::UInt 
-Value::asUInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      JSON_ASSERT_MESSAGE(isUInt(), "LargestInt out of UInt range");
-      return UInt(value_.int_);
-   case uintValue:
-      JSON_ASSERT_MESSAGE(isUInt(), "LargestUInt out of UInt range");
-      return UInt(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt), "double out of UInt range");
-      return UInt( value_.real_ );
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to UInt.");
-}
-
-
-# if defined(JSON_HAS_INT64)
-
-Value::Int64
-Value::asInt64() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return Int64(value_.int_);
-   case uintValue:
-      JSON_ASSERT_MESSAGE(isInt64(), "LargestUInt out of Int64 range");
-      return Int64(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt64, maxInt64), "double out of Int64 range");
-      return Int64(value_.real_);
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to Int64.");
-}
-
-
-Value::UInt64
-Value::asUInt64() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      JSON_ASSERT_MESSAGE(isUInt64(), "LargestInt out of UInt64 range");
-      return UInt64(value_.int_);
-   case uintValue:
-      return UInt64(value_.uint_);
-   case realValue:
-      JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt64), "double out of UInt64 range");
-      return UInt64( value_.real_ );
-   case nullValue:
-      return 0;
-   case booleanValue:
-      return value_.bool_ ? 1 : 0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to UInt64.");
-}
-# endif // if defined(JSON_HAS_INT64)
-
-
-LargestInt 
-Value::asLargestInt() const
-{
-#if defined(JSON_NO_INT64)
-    return asInt();
-#else
-    return asInt64();
-#endif
-}
-
-
-LargestUInt 
-Value::asLargestUInt() const
-{
-#if defined(JSON_NO_INT64)
-    return asUInt();
-#else
-    return asUInt64();
-#endif
-}
-
-
-double 
-Value::asDouble() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return static_cast<double>( value_.int_ );
-   case uintValue:
-#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return static_cast<double>( value_.uint_ );
-#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return integerToDouble( value_.uint_ );
-#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-   case realValue:
-      return value_.real_;
-   case nullValue:
-      return 0.0;
-   case booleanValue:
-      return value_.bool_ ? 1.0 : 0.0;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to double.");
-}
-
-float
-Value::asFloat() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return static_cast<float>( value_.int_ );
-   case uintValue:
-#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return static_cast<float>( value_.uint_ );
-#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-      return integerToDouble( value_.uint_ );
-#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-   case realValue:
-      return static_cast<float>( value_.real_ );
-   case nullValue:
-      return 0.0;
-   case booleanValue:
-      return value_.bool_ ? 1.0f : 0.0f;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to float.");
-}
-
-bool 
-Value::asBool() const
-{
-   switch ( type_ )
-   {
-   case booleanValue:
-      return value_.bool_;
-   case nullValue:
+    if (comp > 0)
       return false;
-   case intValue:
-      return value_.int_ ? true : false;
-   case uintValue:
-      return value_.uint_ ? true : false;
-   case realValue:
-      return value_.real_ ? true : false;
-   default:
-      break;
-   }
-   JSON_FAIL_MESSAGE("Value is not convertible to bool.");
+    return (this_len < other_len);
+  }
+  case arrayValue:
+  case objectValue: {
+    int delta = int(value_.map_->size() - other.value_.map_->size());
+    if (delta)
+      return delta < 0;
+    return (*value_.map_) < (*other.value_.map_);
+  }
+  default:
+    JSON_ASSERT_UNREACHABLE;
+  }
+  return false; // unreachable
 }
 
+bool Value::operator<=(const Value& other) const { return !(other < *this); }
 
-bool 
-Value::isConvertibleTo( ValueType other ) const
-{
-   switch ( other )
-   {
-   case nullValue:
-      return ( isNumeric() && asDouble() == 0.0 )
-             || ( type_ == booleanValue && value_.bool_ == false )
-             || ( type_ == stringValue && asString() == "" )
-             || ( type_ == arrayValue && value_.map_->size() == 0 )
-             || ( type_ == objectValue && value_.map_->size() == 0 )
-             || type_ == nullValue;
-   case intValue:
-      return isInt()
-             || (type_ == realValue && InRange(value_.real_, minInt, maxInt))
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case uintValue:
-      return isUInt()
-             || (type_ == realValue && InRange(value_.real_, 0, maxUInt))
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case realValue:
-      return isNumeric()
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case booleanValue:
-      return isNumeric()
-             || type_ == booleanValue
-             || type_ == nullValue;
-   case stringValue:
-      return isNumeric()
-             || type_ == booleanValue
-             || type_ == stringValue
-             || type_ == nullValue;
-   case arrayValue:
-      return type_ == arrayValue
-             || type_ == nullValue;
-   case objectValue:
-      return type_ == objectValue
-             || type_ == nullValue;
-   }
-   JSON_ASSERT_UNREACHABLE;
-   return false;
+bool Value::operator>=(const Value& other) const { return !(*this < other); }
+
+bool Value::operator>(const Value& other) const { return other < *this; }
+
+bool Value::operator==(const Value& other) const {
+  if (type() != other.type())
+    return false;
+  switch (type()) {
+  case nullValue:
+    return true;
+  case intValue:
+    return value_.int_ == other.value_.int_;
+  case uintValue:
+    return value_.uint_ == other.value_.uint_;
+  case realValue:
+    return value_.real_ == other.value_.real_;
+  case booleanValue:
+    return value_.bool_ == other.value_.bool_;
+  case stringValue: {
+    if ((value_.string_ == nullptr) || (other.value_.string_ == nullptr)) {
+      return (value_.string_ == other.value_.string_);
+    }
+    unsigned this_len;
+    unsigned other_len;
+    char const* this_str;
+    char const* other_str;
+    decodePrefixedString(this->isAllocated(), this->value_.string_, &this_len,
+                         &this_str);
+    decodePrefixedString(other.isAllocated(), other.value_.string_, &other_len,
+                         &other_str);
+    if (this_len != other_len)
+      return false;
+    JSON_ASSERT(this_str && other_str);
+    int comp = memcmp(this_str, other_str, this_len);
+    return comp == 0;
+  }
+  case arrayValue:
+  case objectValue:
+    return value_.map_->size() == other.value_.map_->size() &&
+           (*value_.map_) == (*other.value_.map_);
+  default:
+    JSON_ASSERT_UNREACHABLE;
+  }
+  return false; // unreachable
 }
 
+bool Value::operator!=(const Value& other) const { return !(*this == other); }
+
+const char* Value::asCString() const {
+  JSON_ASSERT_MESSAGE(type() == stringValue,
+                      "in Json::Value::asCString(): requires stringValue");
+  if (value_.string_ == nullptr)
+    return nullptr;
+  unsigned this_len;
+  char const* this_str;
+  decodePrefixedString(this->isAllocated(), this->value_.string_, &this_len,
+                       &this_str);
+  return this_str;
+}
+
+#if JSONCPP_USING_SECURE_MEMORY
+unsigned Value::getCStringLength() const {
+  JSON_ASSERT_MESSAGE(type() == stringValue,
+                      "in Json::Value::asCString(): requires stringValue");
+  if (value_.string_ == 0)
+    return 0;
+  unsigned this_len;
+  char const* this_str;
+  decodePrefixedString(this->isAllocated(), this->value_.string_, &this_len,
+                       &this_str);
+  return this_len;
+}
+#endif
+
+bool Value::getString(char const** begin, char const** end) const {
+  if (type() != stringValue)
+    return false;
+  if (value_.string_ == nullptr)
+    return false;
+  unsigned length;
+  decodePrefixedString(this->isAllocated(), this->value_.string_, &length,
+                       begin);
+  *end = *begin + length;
+  return true;
+}
+
+String Value::asString() const {
+  switch (type()) {
+  case nullValue:
+    return "";
+  case stringValue: {
+    if (value_.string_ == nullptr)
+      return "";
+    unsigned this_len;
+    char const* this_str;
+    decodePrefixedString(this->isAllocated(), this->value_.string_, &this_len,
+                         &this_str);
+    return String(this_str, this_len);
+  }
+  case booleanValue:
+    return value_.bool_ ? "true" : "false";
+  case intValue:
+    return valueToString(value_.int_);
+  case uintValue:
+    return valueToString(value_.uint_);
+  case realValue:
+    return valueToString(value_.real_);
+  default:
+    JSON_FAIL_MESSAGE("Type is not convertible to string");
+  }
+}
+
+#ifdef JSON_USE_CPPTL
+CppTL::ConstString Value::asConstString() const {
+  unsigned len;
+  char const* str;
+  decodePrefixedString(isAllocated(), value_.string_, &len, &str);
+  return CppTL::ConstString(str, len);
+}
+#endif
+
+Value::Int Value::asInt() const {
+  switch (type()) {
+  case intValue:
+    JSON_ASSERT_MESSAGE(isInt(), "LargestInt out of Int range");
+    return Int(value_.int_);
+  case uintValue:
+    JSON_ASSERT_MESSAGE(isInt(), "LargestUInt out of Int range");
+    return Int(value_.uint_);
+  case realValue:
+    JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt, maxInt),
+                        "double out of Int range");
+    return Int(value_.real_);
+  case nullValue:
+    return 0;
+  case booleanValue:
+    return value_.bool_ ? 1 : 0;
+  default:
+    break;
+  }
+  JSON_FAIL_MESSAGE("Value is not convertible to Int.");
+}
+
+Value::UInt Value::asUInt() const {
+  switch (type()) {
+  case intValue:
+    JSON_ASSERT_MESSAGE(isUInt(), "LargestInt out of UInt range");
+    return UInt(value_.int_);
+  case uintValue:
+    JSON_ASSERT_MESSAGE(isUInt(), "LargestUInt out of UInt range");
+    return UInt(value_.uint_);
+  case realValue:
+    JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt),
+                        "double out of UInt range");
+    return UInt(value_.real_);
+  case nullValue:
+    return 0;
+  case booleanValue:
+    return value_.bool_ ? 1 : 0;
+  default:
+    break;
+  }
+  JSON_FAIL_MESSAGE("Value is not convertible to UInt.");
+}
+
+#if defined(JSON_HAS_INT64)
+
+Value::Int64 Value::asInt64() const {
+  switch (type()) {
+  case intValue:
+    return Int64(value_.int_);
+  case uintValue:
+    JSON_ASSERT_MESSAGE(isInt64(), "LargestUInt out of Int64 range");
+    return Int64(value_.uint_);
+  case realValue:
+    JSON_ASSERT_MESSAGE(InRange(value_.real_, minInt64, maxInt64),
+                        "double out of Int64 range");
+    return Int64(value_.real_);
+  case nullValue:
+    return 0;
+  case booleanValue:
+    return value_.bool_ ? 1 : 0;
+  default:
+    break;
+  }
+  JSON_FAIL_MESSAGE("Value is not convertible to Int64.");
+}
+
+Value::UInt64 Value::asUInt64() const {
+  switch (type()) {
+  case intValue:
+    JSON_ASSERT_MESSAGE(isUInt64(), "LargestInt out of UInt64 range");
+    return UInt64(value_.int_);
+  case uintValue:
+    return UInt64(value_.uint_);
+  case realValue:
+    JSON_ASSERT_MESSAGE(InRange(value_.real_, 0, maxUInt64),
+                        "double out of UInt64 range");
+    return UInt64(value_.real_);
+  case nullValue:
+    return 0;
+  case booleanValue:
+    return value_.bool_ ? 1 : 0;
+  default:
+    break;
+  }
+  JSON_FAIL_MESSAGE("Value is not convertible to UInt64.");
+}
+#endif // if defined(JSON_HAS_INT64)
+
+LargestInt Value::asLargestInt() const {
+#if defined(JSON_NO_INT64)
+  return asInt();
+#else
+  return asInt64();
+#endif
+}
+
+LargestUInt Value::asLargestUInt() const {
+#if defined(JSON_NO_INT64)
+  return asUInt();
+#else
+  return asUInt64();
+#endif
+}
+
+double Value::asDouble() const {
+  switch (type()) {
+  case intValue:
+    return static_cast<double>(value_.int_);
+  case uintValue:
+#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+    return static_cast<double>(value_.uint_);
+#else  // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+    return integerToDouble(value_.uint_);
+#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+  case realValue:
+    return value_.real_;
+  case nullValue:
+    return 0.0;
+  case booleanValue:
+    return value_.bool_ ? 1.0 : 0.0;
+  default:
+    break;
+  }
+  JSON_FAIL_MESSAGE("Value is not convertible to double.");
+}
+
+float Value::asFloat() const {
+  switch (type()) {
+  case intValue:
+    return static_cast<float>(value_.int_);
+  case uintValue:
+#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+    return static_cast<float>(value_.uint_);
+#else  // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+    // This can fail (silently?) if the value is bigger than MAX_FLOAT.
+    return static_cast<float>(integerToDouble(value_.uint_));
+#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+  case realValue:
+    return static_cast<float>(value_.real_);
+  case nullValue:
+    return 0.0;
+  case booleanValue:
+    return value_.bool_ ? 1.0f : 0.0f;
+  default:
+    break;
+  }
+  JSON_FAIL_MESSAGE("Value is not convertible to float.");
+}
+
+bool Value::asBool() const {
+  switch (type()) {
+  case booleanValue:
+    return value_.bool_;
+  case nullValue:
+    return false;
+  case intValue:
+    return value_.int_ ? true : false;
+  case uintValue:
+    return value_.uint_ ? true : false;
+  case realValue: {
+    // According to JavaScript language zero or NaN is regarded as false
+    const auto value_classification = std::fpclassify(value_.real_);
+    return value_classification != FP_ZERO && value_classification != FP_NAN;
+  }
+  default:
+    break;
+  }
+  JSON_FAIL_MESSAGE("Value is not convertible to bool.");
+}
+
+bool Value::isConvertibleTo(ValueType other) const {
+  switch (other) {
+  case nullValue:
+    return (isNumeric() && asDouble() == 0.0) ||
+           (type() == booleanValue && value_.bool_ == false) ||
+           (type() == stringValue && asString().empty()) ||
+           (type() == arrayValue && value_.map_->empty()) ||
+           (type() == objectValue && value_.map_->empty()) ||
+           type() == nullValue;
+  case intValue:
+    return isInt() ||
+           (type() == realValue && InRange(value_.real_, minInt, maxInt)) ||
+           type() == booleanValue || type() == nullValue;
+  case uintValue:
+    return isUInt() ||
+           (type() == realValue && InRange(value_.real_, 0, maxUInt)) ||
+           type() == booleanValue || type() == nullValue;
+  case realValue:
+    return isNumeric() || type() == booleanValue || type() == nullValue;
+  case booleanValue:
+    return isNumeric() || type() == booleanValue || type() == nullValue;
+  case stringValue:
+    return isNumeric() || type() == booleanValue || type() == stringValue ||
+           type() == nullValue;
+  case arrayValue:
+    return type() == arrayValue || type() == nullValue;
+  case objectValue:
+    return type() == objectValue || type() == nullValue;
+  }
+  JSON_ASSERT_UNREACHABLE;
+  return false;
+}
 
 /// Number of values in array or object
-ArrayIndex 
-Value::size() const
-{
-   switch ( type_ )
-   {
-   case nullValue:
-   case intValue:
-   case uintValue:
-   case realValue:
-   case booleanValue:
-   case stringValue:
-      return 0;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:  // size of the array is highest index + 1
-      if ( !value_.map_->empty() )
-      {
-         ObjectValues::const_iterator itLast = value_.map_->end();
-         --itLast;
-         return (*itLast).first.index()+1;
-      }
-      return 0;
-   case objectValue:
-      return ArrayIndex( value_.map_->size() );
-#else
-   case arrayValue:
-      return Int( value_.array_->size() );
-   case objectValue:
-      return Int( value_.map_->size() );
-#endif
-   }
-   JSON_ASSERT_UNREACHABLE;
-   return 0; // unreachable;
+ArrayIndex Value::size() const {
+  switch (type()) {
+  case nullValue:
+  case intValue:
+  case uintValue:
+  case realValue:
+  case booleanValue:
+  case stringValue:
+    return 0;
+  case arrayValue: // size of the array is highest index + 1
+    if (!value_.map_->empty()) {
+      ObjectValues::const_iterator itLast = value_.map_->end();
+      --itLast;
+      return (*itLast).first.index() + 1;
+    }
+    return 0;
+  case objectValue:
+    return ArrayIndex(value_.map_->size());
+  }
+  JSON_ASSERT_UNREACHABLE;
+  return 0; // unreachable;
 }
 
-
-bool 
-Value::empty() const
-{
-   if ( isNull() || isArray() || isObject() )
-      return size() == 0u;
-   else
-      return false;
+bool Value::empty() const {
+  if (isNull() || isArray() || isObject())
+    return size() == 0u;
+  else
+    return false;
 }
 
+Value::operator bool() const { return !isNull(); }
 
-bool
-Value::operator!() const
-{
-   return isNull();
+void Value::clear() {
+  JSON_ASSERT_MESSAGE(type() == nullValue || type() == arrayValue ||
+                          type() == objectValue,
+                      "in Json::Value::clear(): requires complex value");
+  start_ = 0;
+  limit_ = 0;
+  switch (type()) {
+  case arrayValue:
+  case objectValue:
+    value_.map_->clear();
+    break;
+  default:
+    break;
+  }
 }
 
-
-void 
-Value::clear()
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue  || type_ == objectValue );
-
-   switch ( type_ )
-   {
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-   case objectValue:
-      value_.map_->clear();
-      break;
-#else
-   case arrayValue:
-      value_.array_->clear();
-      break;
-   case objectValue:
-      value_.map_->clear();
-      break;
-#endif
-   default:
-      break;
-   }
+void Value::resize(ArrayIndex newSize) {
+  JSON_ASSERT_MESSAGE(type() == nullValue || type() == arrayValue,
+                      "in Json::Value::resize(): requires arrayValue");
+  if (type() == nullValue)
+    *this = Value(arrayValue);
+  ArrayIndex oldSize = size();
+  if (newSize == 0)
+    clear();
+  else if (newSize > oldSize)
+    this->operator[](newSize - 1);
+  else {
+    for (ArrayIndex index = newSize; index < oldSize; ++index) {
+      value_.map_->erase(index);
+    }
+    JSON_ASSERT(size() == newSize);
+  }
 }
 
-void 
-Value::resize( ArrayIndex newSize )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue );
-   if ( type_ == nullValue )
-      *this = Value( arrayValue );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   ArrayIndex oldSize = size();
-   if ( newSize == 0 )
-      clear();
-   else if ( newSize > oldSize )
-      (*this)[ newSize - 1 ];
-   else
-   {
-      for ( ArrayIndex index = newSize; index < oldSize; ++index )
-      {
-         value_.map_->erase( index );
-      }
-      assert( size() == newSize );
-   }
-#else
-   value_.array_->resize( newSize );
-#endif
+Value& Value::operator[](ArrayIndex index) {
+  JSON_ASSERT_MESSAGE(
+      type() == nullValue || type() == arrayValue,
+      "in Json::Value::operator[](ArrayIndex): requires arrayValue");
+  if (type() == nullValue)
+    *this = Value(arrayValue);
+  CZString key(index);
+  auto it = value_.map_->lower_bound(key);
+  if (it != value_.map_->end() && (*it).first == key)
+    return (*it).second;
+
+  ObjectValues::value_type defaultValue(key, nullSingleton());
+  it = value_.map_->insert(it, defaultValue);
+  return (*it).second;
 }
 
-
-Value &
-Value::operator[]( ArrayIndex index )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue );
-   if ( type_ == nullValue )
-      *this = Value( arrayValue );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString key( index );
-   ObjectValues::iterator it = value_.map_->lower_bound( key );
-   if ( it != value_.map_->end()  &&  (*it).first == key )
-      return (*it).second;
-
-   ObjectValues::value_type defaultValue( key, null );
-   it = value_.map_->insert( it, defaultValue );
-   return (*it).second;
-#else
-   return value_.array_->resolveReference( index );
-#endif
+Value& Value::operator[](int index) {
+  JSON_ASSERT_MESSAGE(
+      index >= 0,
+      "in Json::Value::operator[](int index): index cannot be negative");
+  return (*this)[ArrayIndex(index)];
 }
 
-
-Value &
-Value::operator[]( int index )
-{
-   JSON_ASSERT( index >= 0 );
-   return (*this)[ ArrayIndex(index) ];
+const Value& Value::operator[](ArrayIndex index) const {
+  JSON_ASSERT_MESSAGE(
+      type() == nullValue || type() == arrayValue,
+      "in Json::Value::operator[](ArrayIndex)const: requires arrayValue");
+  if (type() == nullValue)
+    return nullSingleton();
+  CZString key(index);
+  ObjectValues::const_iterator it = value_.map_->find(key);
+  if (it == value_.map_->end())
+    return nullSingleton();
+  return (*it).second;
 }
 
-
-const Value &
-Value::operator[]( ArrayIndex index ) const
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == arrayValue );
-   if ( type_ == nullValue )
-      return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString key( index );
-   ObjectValues::const_iterator it = value_.map_->find( key );
-   if ( it == value_.map_->end() )
-      return null;
-   return (*it).second;
-#else
-   Value *value = value_.array_->find( index );
-   return value ? *value : null;
-#endif
+const Value& Value::operator[](int index) const {
+  JSON_ASSERT_MESSAGE(
+      index >= 0,
+      "in Json::Value::operator[](int index) const: index cannot be negative");
+  return (*this)[ArrayIndex(index)];
 }
 
-
-const Value &
-Value::operator[]( int index ) const
-{
-   JSON_ASSERT( index >= 0 );
-   return (*this)[ ArrayIndex(index) ];
+void Value::initBasic(ValueType type, bool allocated) {
+  setType(type);
+  setIsAllocated(allocated);
+  comments_ = Comments{};
+  start_ = 0;
+  limit_ = 0;
 }
 
-
-Value &
-Value::operator[]( const char *key )
-{
-   return resolveReference( key, false );
+void Value::dupPayload(const Value& other) {
+  setType(other.type());
+  setIsAllocated(false);
+  switch (type()) {
+  case nullValue:
+  case intValue:
+  case uintValue:
+  case realValue:
+  case booleanValue:
+    value_ = other.value_;
+    break;
+  case stringValue:
+    if (other.value_.string_ && other.isAllocated()) {
+      unsigned len;
+      char const* str;
+      decodePrefixedString(other.isAllocated(), other.value_.string_, &len,
+                           &str);
+      value_.string_ = duplicateAndPrefixStringValue(str, len);
+      setIsAllocated(true);
+    } else {
+      value_.string_ = other.value_.string_;
+    }
+    break;
+  case arrayValue:
+  case objectValue:
+    value_.map_ = new ObjectValues(*other.value_.map_);
+    break;
+  default:
+    JSON_ASSERT_UNREACHABLE;
+  }
 }
 
-
-Value &
-Value::resolveReference( const char *key, 
-                         bool isStatic )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-      *this = Value( objectValue );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString actualKey( key, isStatic ? CZString::noDuplication 
-                                     : CZString::duplicateOnCopy );
-   ObjectValues::iterator it = value_.map_->lower_bound( actualKey );
-   if ( it != value_.map_->end()  &&  (*it).first == actualKey )
-      return (*it).second;
-
-   ObjectValues::value_type defaultValue( actualKey, null );
-   it = value_.map_->insert( it, defaultValue );
-   Value &value = (*it).second;
-   return value;
-#else
-   return value_.map_->resolveReference( key, isStatic );
-#endif
+void Value::releasePayload() {
+  switch (type()) {
+  case nullValue:
+  case intValue:
+  case uintValue:
+  case realValue:
+  case booleanValue:
+    break;
+  case stringValue:
+    if (isAllocated())
+      releasePrefixedStringValue(value_.string_);
+    break;
+  case arrayValue:
+  case objectValue:
+    delete value_.map_;
+    break;
+  default:
+    JSON_ASSERT_UNREACHABLE;
+  }
 }
 
-
-Value 
-Value::get( ArrayIndex index, 
-            const Value &defaultValue ) const
-{
-   const Value *value = &((*this)[index]);
-   return value == &null ? defaultValue : *value;
+void Value::dupMeta(const Value& other) {
+  comments_ = other.comments_;
+  start_ = other.start_;
+  limit_ = other.limit_;
 }
 
+// Access an object value by name, create a null member if it does not exist.
+// @pre Type of '*this' is object or null.
+// @param key is null-terminated.
+Value& Value::resolveReference(const char* key) {
+  JSON_ASSERT_MESSAGE(
+      type() == nullValue || type() == objectValue,
+      "in Json::Value::resolveReference(): requires objectValue");
+  if (type() == nullValue)
+    *this = Value(objectValue);
+  CZString actualKey(key, static_cast<unsigned>(strlen(key)),
+                     CZString::noDuplication); // NOTE!
+  auto it = value_.map_->lower_bound(actualKey);
+  if (it != value_.map_->end() && (*it).first == actualKey)
+    return (*it).second;
 
-bool 
-Value::isValidIndex( ArrayIndex index ) const
-{
-   return index < size();
+  ObjectValues::value_type defaultValue(actualKey, nullSingleton());
+  it = value_.map_->insert(it, defaultValue);
+  Value& value = (*it).second;
+  return value;
 }
 
+// @param key is not null-terminated.
+Value& Value::resolveReference(char const* key, char const* end) {
+  JSON_ASSERT_MESSAGE(
+      type() == nullValue || type() == objectValue,
+      "in Json::Value::resolveReference(key, end): requires objectValue");
+  if (type() == nullValue)
+    *this = Value(objectValue);
+  CZString actualKey(key, static_cast<unsigned>(end - key),
+                     CZString::duplicateOnCopy);
+  auto it = value_.map_->lower_bound(actualKey);
+  if (it != value_.map_->end() && (*it).first == actualKey)
+    return (*it).second;
 
-
-const Value &
-Value::operator[]( const char *key ) const
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-      return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString actualKey( key, CZString::noDuplication );
-   ObjectValues::const_iterator it = value_.map_->find( actualKey );
-   if ( it == value_.map_->end() )
-      return null;
-   return (*it).second;
-#else
-   const Value *value = value_.map_->find( key );
-   return value ? *value : null;
-#endif
+  ObjectValues::value_type defaultValue(actualKey, nullSingleton());
+  it = value_.map_->insert(it, defaultValue);
+  Value& value = (*it).second;
+  return value;
 }
 
-
-Value &
-Value::operator[]( const std::string &key )
-{
-   return (*this)[ key.c_str() ];
+Value Value::get(ArrayIndex index, const Value& defaultValue) const {
+  const Value* value = &((*this)[index]);
+  return value == &nullSingleton() ? defaultValue : *value;
 }
 
+bool Value::isValidIndex(ArrayIndex index) const { return index < size(); }
 
-const Value &
-Value::operator[]( const std::string &key ) const
-{
-   return (*this)[ key.c_str() ];
+Value const* Value::find(char const* begin, char const* end) const {
+  JSON_ASSERT_MESSAGE(type() == nullValue || type() == objectValue,
+                      "in Json::Value::find(begin, end): requires "
+                      "objectValue or nullValue");
+  if (type() == nullValue)
+    return nullptr;
+  CZString actualKey(begin, static_cast<unsigned>(end - begin),
+                     CZString::noDuplication);
+  ObjectValues::const_iterator it = value_.map_->find(actualKey);
+  if (it == value_.map_->end())
+    return nullptr;
+  return &(*it).second;
+}
+Value* Value::demand(char const* begin, char const* end) {
+  JSON_ASSERT_MESSAGE(type() == nullValue || type() == objectValue,
+                      "in Json::Value::demand(begin, end): requires "
+                      "objectValue or nullValue");
+  return &resolveReference(begin, end);
+}
+const Value& Value::operator[](const char* key) const {
+  Value const* found = find(key, key + strlen(key));
+  if (!found)
+    return nullSingleton();
+  return *found;
+}
+Value const& Value::operator[](const String& key) const {
+  Value const* found = find(key.data(), key.data() + key.length());
+  if (!found)
+    return nullSingleton();
+  return *found;
 }
 
-Value &
-Value::operator[]( const StaticString &key )
-{
-   return resolveReference( key, true );
+Value& Value::operator[](const char* key) {
+  return resolveReference(key, key + strlen(key));
 }
 
-
-# ifdef JSON_USE_CPPTL
-Value &
-Value::operator[]( const CppTL::ConstString &key )
-{
-   return (*this)[ key.c_str() ];
+Value& Value::operator[](const String& key) {
+  return resolveReference(key.data(), key.data() + key.length());
 }
 
-
-const Value &
-Value::operator[]( const CppTL::ConstString &key ) const
-{
-   return (*this)[ key.c_str() ];
-}
-# endif
-
-
-Value &
-Value::append( const Value &value )
-{
-   return (*this)[size()] = value;
+Value& Value::operator[](const StaticString& key) {
+  return resolveReference(key.c_str());
 }
 
-
-Value 
-Value::get( const char *key, 
-            const Value &defaultValue ) const
-{
-   const Value *value = &((*this)[key]);
-   return value == &null ? defaultValue : *value;
+#ifdef JSON_USE_CPPTL
+Value& Value::operator[](const CppTL::ConstString& key) {
+  return resolveReference(key.c_str(), key.end_c_str());
 }
-
-
-Value 
-Value::get( const std::string &key,
-            const Value &defaultValue ) const
-{
-   return get( key.c_str(), defaultValue );
-}
-
-Value
-Value::removeMember( const char* key )
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-      return null;
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   CZString actualKey( key, CZString::noDuplication );
-   ObjectValues::iterator it = value_.map_->find( actualKey );
-   if ( it == value_.map_->end() )
-      return null;
-   Value old(it->second);
-   value_.map_->erase(it);
-   return old;
-#else
-   Value *value = value_.map_->find( key );
-   if (value){
-      Value old(*value);
-      value_.map_.remove( key );
-      return old;
-   } else {
-      return null;
-   }
-#endif
-}
-
-Value
-Value::removeMember( const std::string &key )
-{
-   return removeMember( key.c_str() );
-}
-
-# ifdef JSON_USE_CPPTL
-Value 
-Value::get( const CppTL::ConstString &key,
-            const Value &defaultValue ) const
-{
-   return get( key.c_str(), defaultValue );
-}
-# endif
-
-bool 
-Value::isMember( const char *key ) const
-{
-   const Value *value = &((*this)[key]);
-   return value != &null;
-}
-
-
-bool 
-Value::isMember( const std::string &key ) const
-{
-   return isMember( key.c_str() );
-}
-
-
-# ifdef JSON_USE_CPPTL
-bool 
-Value::isMember( const CppTL::ConstString &key ) const
-{
-   return isMember( key.c_str() );
+Value const& Value::operator[](CppTL::ConstString const& key) const {
+  Value const* found = find(key.c_str(), key.end_c_str());
+  if (!found)
+    return nullSingleton();
+  return *found;
 }
 #endif
 
-Value::Members 
-Value::getMemberNames() const
-{
-   JSON_ASSERT( type_ == nullValue  ||  type_ == objectValue );
-   if ( type_ == nullValue )
-       return Value::Members();
-   Members members;
-   members.reserve( value_.map_->size() );
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   ObjectValues::const_iterator it = value_.map_->begin();
-   ObjectValues::const_iterator itEnd = value_.map_->end();
-   for ( ; it != itEnd; ++it )
-      members.push_back( std::string( (*it).first.c_str() ) );
-#else
-   ValueInternalMap::IteratorState it;
-   ValueInternalMap::IteratorState itEnd;
-   value_.map_->makeBeginIterator( it );
-   value_.map_->makeEndIterator( itEnd );
-   for ( ; !ValueInternalMap::equals( it, itEnd ); ValueInternalMap::increment(it) )
-      members.push_back( std::string( ValueInternalMap::key( it ) ) );
+Value& Value::append(const Value& value) { return (*this)[size()] = value; }
+
+Value& Value::append(Value&& value) {
+  return (*this)[size()] = std::move(value);
+}
+
+Value Value::get(char const* begin,
+                 char const* end,
+                 Value const& defaultValue) const {
+  Value const* found = find(begin, end);
+  return !found ? defaultValue : *found;
+}
+Value Value::get(char const* key, Value const& defaultValue) const {
+  return get(key, key + strlen(key), defaultValue);
+}
+Value Value::get(String const& key, Value const& defaultValue) const {
+  return get(key.data(), key.data() + key.length(), defaultValue);
+}
+
+bool Value::removeMember(const char* begin, const char* end, Value* removed) {
+  if (type() != objectValue) {
+    return false;
+  }
+  CZString actualKey(begin, static_cast<unsigned>(end - begin),
+                     CZString::noDuplication);
+  auto it = value_.map_->find(actualKey);
+  if (it == value_.map_->end())
+    return false;
+  if (removed)
+    *removed = std::move(it->second);
+  value_.map_->erase(it);
+  return true;
+}
+bool Value::removeMember(const char* key, Value* removed) {
+  return removeMember(key, key + strlen(key), removed);
+}
+bool Value::removeMember(String const& key, Value* removed) {
+  return removeMember(key.data(), key.data() + key.length(), removed);
+}
+void Value::removeMember(const char* key) {
+  JSON_ASSERT_MESSAGE(type() == nullValue || type() == objectValue,
+                      "in Json::Value::removeMember(): requires objectValue");
+  if (type() == nullValue)
+    return;
+
+  CZString actualKey(key, unsigned(strlen(key)), CZString::noDuplication);
+  value_.map_->erase(actualKey);
+}
+void Value::removeMember(const String& key) { removeMember(key.c_str()); }
+
+bool Value::removeIndex(ArrayIndex index, Value* removed) {
+  if (type() != arrayValue) {
+    return false;
+  }
+  CZString key(index);
+  auto it = value_.map_->find(key);
+  if (it == value_.map_->end()) {
+    return false;
+  }
+  if (removed)
+    *removed = it->second;
+  ArrayIndex oldSize = size();
+  // shift left all items left, into the place of the "removed"
+  for (ArrayIndex i = index; i < (oldSize - 1); ++i) {
+    CZString keey(i);
+    (*value_.map_)[keey] = (*this)[i + 1];
+  }
+  // erase the last one ("leftover")
+  CZString keyLast(oldSize - 1);
+  auto itLast = value_.map_->find(keyLast);
+  value_.map_->erase(itLast);
+  return true;
+}
+
+#ifdef JSON_USE_CPPTL
+Value Value::get(const CppTL::ConstString& key,
+                 const Value& defaultValue) const {
+  return get(key.c_str(), key.end_c_str(), defaultValue);
+}
 #endif
-   return members;
+
+bool Value::isMember(char const* begin, char const* end) const {
+  Value const* value = find(begin, end);
+  return nullptr != value;
+}
+bool Value::isMember(char const* key) const {
+  return isMember(key, key + strlen(key));
+}
+bool Value::isMember(String const& key) const {
+  return isMember(key.data(), key.data() + key.length());
+}
+
+#ifdef JSON_USE_CPPTL
+bool Value::isMember(const CppTL::ConstString& key) const {
+  return isMember(key.c_str(), key.end_c_str());
+}
+#endif
+
+Value::Members Value::getMemberNames() const {
+  JSON_ASSERT_MESSAGE(
+      type() == nullValue || type() == objectValue,
+      "in Json::Value::getMemberNames(), value must be objectValue");
+  if (type() == nullValue)
+    return Value::Members();
+  Members members;
+  members.reserve(value_.map_->size());
+  ObjectValues::const_iterator it = value_.map_->begin();
+  ObjectValues::const_iterator itEnd = value_.map_->end();
+  for (; it != itEnd; ++it) {
+    members.push_back(String((*it).first.data(), (*it).first.length()));
+  }
+  return members;
 }
 //
 //# ifdef JSON_USE_CPPTL
-//EnumMemberNames
-//Value::enumMemberNames() const
+// EnumMemberNames
+// Value::enumMemberNames() const
 //{
-//   if ( type_ == objectValue )
+//   if ( type() == objectValue )
 //   {
 //      return CppTL::Enum::any(  CppTL::Enum::transform(
 //         CppTL::Enum::keys( *(value_.map_), CppTL::Type<const CZString &>() ),
@@ -1355,11 +1304,11 @@
 //}
 //
 //
-//EnumValues 
-//Value::enumValues() const
+// EnumValues
+// Value::enumValues() const
 //{
-//   if ( type_ == objectValue  ||  type_ == arrayValue )
-//      return CppTL::Enum::anyValues( *(value_.map_), 
+//   if ( type() == objectValue  ||  type() == arrayValue )
+//      return CppTL::Enum::anyValues( *(value_.map_),
 //                                     CppTL::Type<const Value &>() );
 //   return EnumValues();
 //}
@@ -1371,550 +1320,388 @@
   return modf(d, &integral_part) == 0.0;
 }
 
+bool Value::isNull() const { return type() == nullValue; }
 
-bool
-Value::isNull() const
-{
-   return type_ == nullValue;
-}
+bool Value::isBool() const { return type() == booleanValue; }
 
-
-bool 
-Value::isBool() const
-{
-   return type_ == booleanValue;
-}
-
-
-bool 
-Value::isInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return value_.int_ >= minInt  &&  value_.int_ <= maxInt;
-   case uintValue:
-      return value_.uint_ <= UInt(maxInt);
-   case realValue:
-      return value_.real_ >= minInt &&
-             value_.real_ <= maxInt &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-   return false;
-}
-
-
-bool 
-Value::isUInt() const
-{
-   switch ( type_ )
-   {
-   case intValue:
-      return value_.int_ >= 0 && LargestUInt(value_.int_) <= LargestUInt(maxUInt);
-   case uintValue:
-      return value_.uint_ <= maxUInt;
-   case realValue:
-      return value_.real_ >= 0 &&
-             value_.real_ <= maxUInt &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-   return false;
-}
-
-bool 
-Value::isInt64() const
-{
-# if defined(JSON_HAS_INT64)
-   switch ( type_ )
-   {
-   case intValue:
-     return true;
-   case uintValue:
-      return value_.uint_ <= UInt64(maxInt64);
-   case realValue:
-      // Note that maxInt64 (= 2^63 - 1) is not exactly representable as a
-      // double, so double(maxInt64) will be rounded up to 2^63. Therefore we
-      // require the value to be strictly less than the limit.
-      return value_.real_ >= double(minInt64) &&
-             value_.real_ < double(maxInt64) &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-# endif  // JSON_HAS_INT64
-   return false;
-}
-
-bool 
-Value::isUInt64() const
-{
-# if defined(JSON_HAS_INT64)
-   switch ( type_ )
-   {
-   case intValue:
-     return value_.int_ >= 0;
-   case uintValue:
-      return true;
-   case realValue:
-      // Note that maxUInt64 (= 2^64 - 1) is not exactly representable as a
-      // double, so double(maxUInt64) will be rounded up to 2^64. Therefore we
-      // require the value to be strictly less than the limit.
-      return value_.real_ >= 0 &&
-             value_.real_ < maxUInt64AsDouble &&
-             IsIntegral(value_.real_);
-   default:
-      break;
-   }
-# endif  // JSON_HAS_INT64
-   return false;
-}
-
-
-bool 
-Value::isIntegral() const
-{
+bool Value::isInt() const {
+  switch (type()) {
+  case intValue:
 #if defined(JSON_HAS_INT64)
-  return isInt64() || isUInt64();
+    return value_.int_ >= minInt && value_.int_ <= maxInt;
 #else
-  return isInt() || isUInt();
+    return true;
 #endif
+  case uintValue:
+    return value_.uint_ <= UInt(maxInt);
+  case realValue:
+    return value_.real_ >= minInt && value_.real_ <= maxInt &&
+           IsIntegral(value_.real_);
+  default:
+    break;
+  }
+  return false;
 }
 
-
-bool 
-Value::isDouble() const
-{
-   return type_ == realValue || isIntegral();
-}
-
-
-bool 
-Value::isNumeric() const
-{
-   return isIntegral() || isDouble();
-}
-
-
-bool 
-Value::isString() const
-{
-   return type_ == stringValue;
-}
-
-
-bool 
-Value::isArray() const
-{
-   return type_ == arrayValue;
-}
-
-
-bool 
-Value::isObject() const
-{
-   return type_ == objectValue;
-}
-
-
-void 
-Value::setComment( const char *comment,
-                   CommentPlacement placement )
-{
-   if ( !comments_ )
-      comments_ = new CommentInfo[numberOfCommentPlacement];
-   comments_[placement].setComment( comment );
-}
-
-
-void 
-Value::setComment( const std::string &comment,
-                   CommentPlacement placement )
-{
-   setComment( comment.c_str(), placement );
-}
-
-
-bool 
-Value::hasComment( CommentPlacement placement ) const
-{
-   return comments_ != 0  &&  comments_[placement].comment_ != 0;
-}
-
-std::string 
-Value::getComment( CommentPlacement placement ) const
-{
-   if ( hasComment(placement) )
-      return comments_[placement].comment_;
-   return "";
-}
-
-
-std::string 
-Value::toStyledString() const
-{
-   StyledWriter writer;
-   return writer.write( *this );
-}
-
-
-Value::const_iterator 
-Value::begin() const
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeBeginIterator( it );
-         return const_iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeBeginIterator( it );
-         return const_iterator( it );
-      }
-      break;
+bool Value::isUInt() const {
+  switch (type()) {
+  case intValue:
+#if defined(JSON_HAS_INT64)
+    return value_.int_ >= 0 && LargestUInt(value_.int_) <= LargestUInt(maxUInt);
 #else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return const_iterator( value_.map_->begin() );
-      break;
+    return value_.int_ >= 0;
 #endif
-   default:
-      break;
-   }
-   return const_iterator();
-}
-
-Value::const_iterator 
-Value::end() const
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeEndIterator( it );
-         return const_iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeEndIterator( it );
-         return const_iterator( it );
-      }
-      break;
+  case uintValue:
+#if defined(JSON_HAS_INT64)
+    return value_.uint_ <= maxUInt;
 #else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return const_iterator( value_.map_->end() );
-      break;
+    return true;
 #endif
-   default:
-      break;
-   }
-   return const_iterator();
+  case realValue:
+    return value_.real_ >= 0 && value_.real_ <= maxUInt &&
+           IsIntegral(value_.real_);
+  default:
+    break;
+  }
+  return false;
 }
 
+bool Value::isInt64() const {
+#if defined(JSON_HAS_INT64)
+  switch (type()) {
+  case intValue:
+    return true;
+  case uintValue:
+    return value_.uint_ <= UInt64(maxInt64);
+  case realValue:
+    // Note that maxInt64 (= 2^63 - 1) is not exactly representable as a
+    // double, so double(maxInt64) will be rounded up to 2^63. Therefore we
+    // require the value to be strictly less than the limit.
+    return value_.real_ >= double(minInt64) &&
+           value_.real_ < double(maxInt64) && IsIntegral(value_.real_);
+  default:
+    break;
+  }
+#endif // JSON_HAS_INT64
+  return false;
+}
 
-Value::iterator 
-Value::begin()
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeBeginIterator( it );
-         return iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeBeginIterator( it );
-         return iterator( it );
-      }
-      break;
+bool Value::isUInt64() const {
+#if defined(JSON_HAS_INT64)
+  switch (type()) {
+  case intValue:
+    return value_.int_ >= 0;
+  case uintValue:
+    return true;
+  case realValue:
+    // Note that maxUInt64 (= 2^64 - 1) is not exactly representable as a
+    // double, so double(maxUInt64) will be rounded up to 2^64. Therefore we
+    // require the value to be strictly less than the limit.
+    return value_.real_ >= 0 && value_.real_ < maxUInt64AsDouble &&
+           IsIntegral(value_.real_);
+  default:
+    break;
+  }
+#endif // JSON_HAS_INT64
+  return false;
+}
+
+bool Value::isIntegral() const {
+  switch (type()) {
+  case intValue:
+  case uintValue:
+    return true;
+  case realValue:
+#if defined(JSON_HAS_INT64)
+    // Note that maxUInt64 (= 2^64 - 1) is not exactly representable as a
+    // double, so double(maxUInt64) will be rounded up to 2^64. Therefore we
+    // require the value to be strictly less than the limit.
+    return value_.real_ >= double(minInt64) &&
+           value_.real_ < maxUInt64AsDouble && IsIntegral(value_.real_);
 #else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return iterator( value_.map_->begin() );
-      break;
-#endif
-   default:
-      break;
-   }
-   return iterator();
+    return value_.real_ >= minInt && value_.real_ <= maxUInt &&
+           IsIntegral(value_.real_);
+#endif // JSON_HAS_INT64
+  default:
+    break;
+  }
+  return false;
 }
 
-Value::iterator 
-Value::end()
-{
-   switch ( type_ )
-   {
-#ifdef JSON_VALUE_USE_INTERNAL_MAP
-   case arrayValue:
-      if ( value_.array_ )
-      {
-         ValueInternalArray::IteratorState it;
-         value_.array_->makeEndIterator( it );
-         return iterator( it );
-      }
-      break;
-   case objectValue:
-      if ( value_.map_ )
-      {
-         ValueInternalMap::IteratorState it;
-         value_.map_->makeEndIterator( it );
-         return iterator( it );
-      }
-      break;
-#else
-   case arrayValue:
-   case objectValue:
-      if ( value_.map_ )
-         return iterator( value_.map_->end() );
-      break;
-#endif
-   default:
-      break;
-   }
-   return iterator();
+bool Value::isDouble() const {
+  return type() == intValue || type() == uintValue || type() == realValue;
 }
 
+bool Value::isNumeric() const { return isDouble(); }
+
+bool Value::isString() const { return type() == stringValue; }
+
+bool Value::isArray() const { return type() == arrayValue; }
+
+bool Value::isObject() const { return type() == objectValue; }
+
+Value::Comments::Comments(const Comments& that)
+    : ptr_{cloneUnique(that.ptr_)} {}
+
+Value::Comments::Comments(Comments&& that) : ptr_{std::move(that.ptr_)} {}
+
+Value::Comments& Value::Comments::operator=(const Comments& that) {
+  ptr_ = cloneUnique(that.ptr_);
+  return *this;
+}
+
+Value::Comments& Value::Comments::operator=(Comments&& that) {
+  ptr_ = std::move(that.ptr_);
+  return *this;
+}
+
+bool Value::Comments::has(CommentPlacement slot) const {
+  return ptr_ && !(*ptr_)[slot].empty();
+}
+
+String Value::Comments::get(CommentPlacement slot) const {
+  if (!ptr_)
+    return {};
+  return (*ptr_)[slot];
+}
+
+void Value::Comments::set(CommentPlacement slot, String comment) {
+  if (!ptr_) {
+    ptr_ = std::unique_ptr<Array>(new Array());
+  }
+  (*ptr_)[slot] = std::move(comment);
+}
+
+void Value::setComment(String comment, CommentPlacement placement) {
+  if (!comment.empty() && (comment.back() == '\n')) {
+    // Always discard trailing newline, to aid indentation.
+    comment.pop_back();
+  }
+  JSON_ASSERT(!comment.empty());
+  JSON_ASSERT_MESSAGE(
+      comment[0] == '\0' || comment[0] == '/',
+      "in Json::Value::setComment(): Comments must start with /");
+  comments_.set(placement, std::move(comment));
+}
+
+bool Value::hasComment(CommentPlacement placement) const {
+  return comments_.has(placement);
+}
+
+String Value::getComment(CommentPlacement placement) const {
+  return comments_.get(placement);
+}
+
+void Value::setOffsetStart(ptrdiff_t start) { start_ = start; }
+
+void Value::setOffsetLimit(ptrdiff_t limit) { limit_ = limit; }
+
+ptrdiff_t Value::getOffsetStart() const { return start_; }
+
+ptrdiff_t Value::getOffsetLimit() const { return limit_; }
+
+String Value::toStyledString() const {
+  StreamWriterBuilder builder;
+
+  String out = this->hasComment(commentBefore) ? "\n" : "";
+  out += Json::writeString(builder, *this);
+  out += '\n';
+
+  return out;
+}
+
+Value::const_iterator Value::begin() const {
+  switch (type()) {
+  case arrayValue:
+  case objectValue:
+    if (value_.map_)
+      return const_iterator(value_.map_->begin());
+    break;
+  default:
+    break;
+  }
+  return {};
+}
+
+Value::const_iterator Value::end() const {
+  switch (type()) {
+  case arrayValue:
+  case objectValue:
+    if (value_.map_)
+      return const_iterator(value_.map_->end());
+    break;
+  default:
+    break;
+  }
+  return {};
+}
+
+Value::iterator Value::begin() {
+  switch (type()) {
+  case arrayValue:
+  case objectValue:
+    if (value_.map_)
+      return iterator(value_.map_->begin());
+    break;
+  default:
+    break;
+  }
+  return iterator();
+}
+
+Value::iterator Value::end() {
+  switch (type()) {
+  case arrayValue:
+  case objectValue:
+    if (value_.map_)
+      return iterator(value_.map_->end());
+    break;
+  default:
+    break;
+  }
+  return iterator();
+}
 
 // class PathArgument
 // //////////////////////////////////////////////////////////////////
 
-PathArgument::PathArgument()
-   : key_()
-   , index_()
-   , kind_( kindNone )
-{
-}
+PathArgument::PathArgument() : key_() {}
 
+PathArgument::PathArgument(ArrayIndex index)
+    : key_(), index_(index), kind_(kindIndex) {}
 
-PathArgument::PathArgument( ArrayIndex index )
-   : key_()
-   , index_( index )
-   , kind_( kindIndex )
-{
-}
+PathArgument::PathArgument(const char* key)
+    : key_(key), index_(), kind_(kindKey) {}
 
-
-PathArgument::PathArgument( const char *key )
-   : key_( key )
-   , index_()
-   , kind_( kindKey )
-{
-}
-
-
-PathArgument::PathArgument( const std::string &key )
-   : key_( key.c_str() )
-   , index_()
-   , kind_( kindKey )
-{
-}
+PathArgument::PathArgument(const String& key)
+    : key_(key.c_str()), index_(), kind_(kindKey) {}
 
 // class Path
 // //////////////////////////////////////////////////////////////////
 
-Path::Path( const std::string &path,
-            const PathArgument &a1,
-            const PathArgument &a2,
-            const PathArgument &a3,
-            const PathArgument &a4,
-            const PathArgument &a5 )
-{
-   InArgs in;
-   in.push_back( &a1 );
-   in.push_back( &a2 );
-   in.push_back( &a3 );
-   in.push_back( &a4 );
-   in.push_back( &a5 );
-   makePath( path, in );
+Path::Path(const String& path,
+           const PathArgument& a1,
+           const PathArgument& a2,
+           const PathArgument& a3,
+           const PathArgument& a4,
+           const PathArgument& a5) {
+  InArgs in;
+  in.reserve(5);
+  in.push_back(&a1);
+  in.push_back(&a2);
+  in.push_back(&a3);
+  in.push_back(&a4);
+  in.push_back(&a5);
+  makePath(path, in);
 }
 
-
-void 
-Path::makePath( const std::string &path,
-                const InArgs &in )
-{
-   const char *current = path.c_str();
-   const char *end = current + path.length();
-   InArgs::const_iterator itInArg = in.begin();
-   while ( current != end )
-   {
-      if ( *current == '[' )
-      {
-         ++current;
-         if ( *current == '%' )
-            addPathInArg( path, in, itInArg, PathArgument::kindIndex );
-         else
-         {
-            ArrayIndex index = 0;
-            for ( ; current != end && *current >= '0'  &&  *current <= '9'; ++current )
-               index = index * 10 + ArrayIndex(*current - '0');
-            args_.push_back( index );
-         }
-         if ( current == end  ||  *current++ != ']' )
-            invalidPath( path, int(current - path.c_str()) );
+void Path::makePath(const String& path, const InArgs& in) {
+  const char* current = path.c_str();
+  const char* end = current + path.length();
+  auto itInArg = in.begin();
+  while (current != end) {
+    if (*current == '[') {
+      ++current;
+      if (*current == '%')
+        addPathInArg(path, in, itInArg, PathArgument::kindIndex);
+      else {
+        ArrayIndex index = 0;
+        for (; current != end && *current >= '0' && *current <= '9'; ++current)
+          index = index * 10 + ArrayIndex(*current - '0');
+        args_.push_back(index);
       }
-      else if ( *current == '%' )
-      {
-         addPathInArg( path, in, itInArg, PathArgument::kindKey );
-         ++current;
-      }
-      else if ( *current == '.' )
-      {
-         ++current;
-      }
-      else
-      {
-         const char *beginName = current;
-         while ( current != end  &&  !strchr( "[.", *current ) )
-            ++current;
-         args_.push_back( std::string( beginName, current ) );
-      }
-   }
+      if (current == end || *++current != ']')
+        invalidPath(path, int(current - path.c_str()));
+    } else if (*current == '%') {
+      addPathInArg(path, in, itInArg, PathArgument::kindKey);
+      ++current;
+    } else if (*current == '.' || *current == ']') {
+      ++current;
+    } else {
+      const char* beginName = current;
+      while (current != end && !strchr("[.", *current))
+        ++current;
+      args_.push_back(String(beginName, current));
+    }
+  }
 }
 
-
-void 
-Path::addPathInArg( const std::string &path, 
-                    const InArgs &in, 
-                    InArgs::const_iterator &itInArg, 
-                    PathArgument::Kind kind )
-{
-   if ( itInArg == in.end() )
-   {
-      // Error: missing argument %d
-   }
-   else if ( (*itInArg)->kind_ != kind )
-   {
-      // Error: bad argument type
-   }
-   else
-   {
-      args_.push_back( **itInArg );
-   }
+void Path::addPathInArg(const String& /*path*/,
+                        const InArgs& in,
+                        InArgs::const_iterator& itInArg,
+                        PathArgument::Kind kind) {
+  if (itInArg == in.end()) {
+    // Error: missing argument %d
+  } else if ((*itInArg)->kind_ != kind) {
+    // Error: bad argument type
+  } else {
+    args_.push_back(**itInArg++);
+  }
 }
 
-
-void 
-Path::invalidPath( const std::string &path, 
-                   int location )
-{
-   // Error: invalid path.
+void Path::invalidPath(const String& /*path*/, int /*location*/) {
+  // Error: invalid path.
 }
 
-
-const Value &
-Path::resolve( const Value &root ) const
-{
-   const Value *node = &root;
-   for ( Args::const_iterator it = args_.begin(); it != args_.end(); ++it )
-   {
-      const PathArgument &arg = *it;
-      if ( arg.kind_ == PathArgument::kindIndex )
-      {
-         if ( !node->isArray()  ||  !node->isValidIndex( arg.index_ ) )
-         {
-            // Error: unable to resolve path (array value expected at position...
-         }
-         node = &((*node)[arg.index_]);
+const Value& Path::resolve(const Value& root) const {
+  const Value* node = &root;
+  for (const auto& arg : args_) {
+    if (arg.kind_ == PathArgument::kindIndex) {
+      if (!node->isArray() || !node->isValidIndex(arg.index_)) {
+        // Error: unable to resolve path (array value expected at position... )
+        return Value::nullSingleton();
       }
-      else if ( arg.kind_ == PathArgument::kindKey )
-      {
-         if ( !node->isObject() )
-         {
-            // Error: unable to resolve path (object value expected at position...)
-         }
-         node = &((*node)[arg.key_]);
-         if ( node == &Value::null )
-         {
-            // Error: unable to resolve path (object has no member named '' at position...)
-         }
+      node = &((*node)[arg.index_]);
+    } else if (arg.kind_ == PathArgument::kindKey) {
+      if (!node->isObject()) {
+        // Error: unable to resolve path (object value expected at position...)
+        return Value::nullSingleton();
       }
-   }
-   return *node;
+      node = &((*node)[arg.key_]);
+      if (node == &Value::nullSingleton()) {
+        // Error: unable to resolve path (object has no member named '' at
+        // position...)
+        return Value::nullSingleton();
+      }
+    }
+  }
+  return *node;
 }
 
-
-Value 
-Path::resolve( const Value &root, 
-               const Value &defaultValue ) const
-{
-   const Value *node = &root;
-   for ( Args::const_iterator it = args_.begin(); it != args_.end(); ++it )
-   {
-      const PathArgument &arg = *it;
-      if ( arg.kind_ == PathArgument::kindIndex )
-      {
-         if ( !node->isArray()  ||  !node->isValidIndex( arg.index_ ) )
-            return defaultValue;
-         node = &((*node)[arg.index_]);
-      }
-      else if ( arg.kind_ == PathArgument::kindKey )
-      {
-         if ( !node->isObject() )
-            return defaultValue;
-         node = &((*node)[arg.key_]);
-         if ( node == &Value::null )
-            return defaultValue;
-      }
-   }
-   return *node;
+Value Path::resolve(const Value& root, const Value& defaultValue) const {
+  const Value* node = &root;
+  for (const auto& arg : args_) {
+    if (arg.kind_ == PathArgument::kindIndex) {
+      if (!node->isArray() || !node->isValidIndex(arg.index_))
+        return defaultValue;
+      node = &((*node)[arg.index_]);
+    } else if (arg.kind_ == PathArgument::kindKey) {
+      if (!node->isObject())
+        return defaultValue;
+      node = &((*node)[arg.key_]);
+      if (node == &Value::nullSingleton())
+        return defaultValue;
+    }
+  }
+  return *node;
 }
 
-
-Value &
-Path::make( Value &root ) const
-{
-   Value *node = &root;
-   for ( Args::const_iterator it = args_.begin(); it != args_.end(); ++it )
-   {
-      const PathArgument &arg = *it;
-      if ( arg.kind_ == PathArgument::kindIndex )
-      {
-         if ( !node->isArray() )
-         {
-            // Error: node is not an array at position ...
-         }
-         node = &((*node)[arg.index_]);
+Value& Path::make(Value& root) const {
+  Value* node = &root;
+  for (const auto& arg : args_) {
+    if (arg.kind_ == PathArgument::kindIndex) {
+      if (!node->isArray()) {
+        // Error: node is not an array at position ...
       }
-      else if ( arg.kind_ == PathArgument::kindKey )
-      {
-         if ( !node->isObject() )
-         {
-            // Error: node is not an object at position...
-         }
-         node = &((*node)[arg.key_]);
+      node = &((*node)[arg.index_]);
+    } else if (arg.kind_ == PathArgument::kindKey) {
+      if (!node->isObject()) {
+        // Error: node is not an object at position...
       }
-   }
-   return *node;
+      node = &((*node)[arg.key_]);
+    }
+  }
+  return *node;
 }
 
-
 } // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_valueiterator.inl b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_valueiterator.inl
index 7457ca3..4a3d210 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_valueiterator.inl
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_valueiterator.inl
@@ -1,4 +1,4 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
@@ -15,202 +15,99 @@
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 
-ValueIteratorBase::ValueIteratorBase()
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   : current_()
-   , isNull_( true )
-{
-}
+ValueIteratorBase::ValueIteratorBase() : current_() {}
+
+ValueIteratorBase::ValueIteratorBase(
+    const Value::ObjectValues::iterator& current)
+    : current_(current), isNull_(false) {}
+
+Value& ValueIteratorBase::deref() const { return current_->second; }
+
+void ValueIteratorBase::increment() { ++current_; }
+
+void ValueIteratorBase::decrement() { --current_; }
+
+ValueIteratorBase::difference_type
+ValueIteratorBase::computeDistance(const SelfType& other) const {
+#ifdef JSON_USE_CPPTL_SMALLMAP
+  return other.current_ - current_;
 #else
-   : isArray_( true )
-   , isNull_( true )
-{
-   iterator_.array_ = ValueInternalArray::IteratorState();
-}
-#endif
+  // Iterator for null value are initialized using the default
+  // constructor, which initialize current_ to the default
+  // std::map::iterator. As begin() and end() are two instance
+  // of the default std::map::iterator, they can not be compared.
+  // To allow this, we handle this comparison specifically.
+  if (isNull_ && other.isNull_) {
+    return 0;
+  }
 
-
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-ValueIteratorBase::ValueIteratorBase( const Value::ObjectValues::iterator &current )
-   : current_( current )
-   , isNull_( false )
-{
-}
-#else
-ValueIteratorBase::ValueIteratorBase( const ValueInternalArray::IteratorState &state )
-   : isArray_( true )
-{
-   iterator_.array_ = state;
-}
-
-
-ValueIteratorBase::ValueIteratorBase( const ValueInternalMap::IteratorState &state )
-   : isArray_( false )
-{
-   iterator_.map_ = state;
-}
-#endif
-
-Value &
-ValueIteratorBase::deref() const
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   return current_->second;
-#else
-   if ( isArray_ )
-      return ValueInternalArray::dereference( iterator_.array_ );
-   return ValueInternalMap::value( iterator_.map_ );
+  // Usage of std::distance is not portable (does not compile with Sun Studio 12
+  // RogueWave STL,
+  // which is the one used by default).
+  // Using a portable hand-made version for non random iterator instead:
+  //   return difference_type( std::distance( current_, other.current_ ) );
+  difference_type myDistance = 0;
+  for (Value::ObjectValues::iterator it = current_; it != other.current_;
+       ++it) {
+    ++myDistance;
+  }
+  return myDistance;
 #endif
 }
 
-
-void 
-ValueIteratorBase::increment()
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   ++current_;
-#else
-   if ( isArray_ )
-      ValueInternalArray::increment( iterator_.array_ );
-   ValueInternalMap::increment( iterator_.map_ );
-#endif
+bool ValueIteratorBase::isEqual(const SelfType& other) const {
+  if (isNull_) {
+    return other.isNull_;
+  }
+  return current_ == other.current_;
 }
 
-
-void 
-ValueIteratorBase::decrement()
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   --current_;
-#else
-   if ( isArray_ )
-      ValueInternalArray::decrement( iterator_.array_ );
-   ValueInternalMap::decrement( iterator_.map_ );
-#endif
+void ValueIteratorBase::copy(const SelfType& other) {
+  current_ = other.current_;
+  isNull_ = other.isNull_;
 }
 
-
-ValueIteratorBase::difference_type 
-ValueIteratorBase::computeDistance( const SelfType &other ) const
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-# ifdef JSON_USE_CPPTL_SMALLMAP
-   return current_ - other.current_;
-# else
-   // Iterator for null value are initialized using the default
-   // constructor, which initialize current_ to the default
-   // std::map::iterator. As begin() and end() are two instance 
-   // of the default std::map::iterator, they can not be compared.
-   // To allow this, we handle this comparison specifically.
-   if ( isNull_  &&  other.isNull_ )
-   {
-      return 0;
-   }
-
-
-   // Usage of std::distance is not portable (does not compile with Sun Studio 12 RogueWave STL,
-   // which is the one used by default).
-   // Using a portable hand-made version for non random iterator instead:
-   //   return difference_type( std::distance( current_, other.current_ ) );
-   difference_type myDistance = 0;
-   for ( Value::ObjectValues::iterator it = current_; it != other.current_; ++it )
-   {
-      ++myDistance;
-   }
-   return myDistance;
-# endif
-#else
-   if ( isArray_ )
-      return ValueInternalArray::distance( iterator_.array_, other.iterator_.array_ );
-   return ValueInternalMap::distance( iterator_.map_, other.iterator_.map_ );
-#endif
+Value ValueIteratorBase::key() const {
+  const Value::CZString czstring = (*current_).first;
+  if (czstring.data()) {
+    if (czstring.isStaticString())
+      return Value(StaticString(czstring.data()));
+    return Value(czstring.data(), czstring.data() + czstring.length());
+  }
+  return Value(czstring.index());
 }
 
-
-bool 
-ValueIteratorBase::isEqual( const SelfType &other ) const
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   if ( isNull_ )
-   {
-      return other.isNull_;
-   }
-   return current_ == other.current_;
-#else
-   if ( isArray_ )
-      return ValueInternalArray::equals( iterator_.array_, other.iterator_.array_ );
-   return ValueInternalMap::equals( iterator_.map_, other.iterator_.map_ );
-#endif
+UInt ValueIteratorBase::index() const {
+  const Value::CZString czstring = (*current_).first;
+  if (!czstring.data())
+    return czstring.index();
+  return Value::UInt(-1);
 }
 
-
-void 
-ValueIteratorBase::copy( const SelfType &other )
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   current_ = other.current_;
-#else
-   if ( isArray_ )
-      iterator_.array_ = other.iterator_.array_;
-   iterator_.map_ = other.iterator_.map_;
-#endif
+String ValueIteratorBase::name() const {
+  char const* keey;
+  char const* end;
+  keey = memberName(&end);
+  if (!keey)
+    return String();
+  return String(keey, end);
 }
 
-
-Value 
-ValueIteratorBase::key() const
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   const Value::CZString czstring = (*current_).first;
-   if ( czstring.c_str() )
-   {
-      if ( czstring.isStaticString() )
-         return Value( StaticString( czstring.c_str() ) );
-      return Value( czstring.c_str() );
-   }
-   return Value( czstring.index() );
-#else
-   if ( isArray_ )
-      return Value( ValueInternalArray::indexOf( iterator_.array_ ) );
-   bool isStatic;
-   const char *memberName = ValueInternalMap::key( iterator_.map_, isStatic );
-   if ( isStatic )
-      return Value( StaticString( memberName ) );
-   return Value( memberName );
-#endif
+char const* ValueIteratorBase::memberName() const {
+  const char* cname = (*current_).first.data();
+  return cname ? cname : "";
 }
 
-
-UInt 
-ValueIteratorBase::index() const
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   const Value::CZString czstring = (*current_).first;
-   if ( !czstring.c_str() )
-      return czstring.index();
-   return Value::UInt( -1 );
-#else
-   if ( isArray_ )
-      return Value::UInt( ValueInternalArray::indexOf( iterator_.array_ ) );
-   return Value::UInt( -1 );
-#endif
+char const* ValueIteratorBase::memberName(char const** end) const {
+  const char* cname = (*current_).first.data();
+  if (!cname) {
+    *end = nullptr;
+    return nullptr;
+  }
+  *end = cname + (*current_).first.length();
+  return cname;
 }
 
-
-const char *
-ValueIteratorBase::memberName() const
-{
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-   const char *name = (*current_).first.c_str();
-   return name ? name : "";
-#else
-   if ( !isArray_ )
-      return ValueInternalMap::key( iterator_.map_ );
-   return "";
-#endif
-}
-
-
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
@@ -219,35 +116,20 @@
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 
-ValueConstIterator::ValueConstIterator()
-{
-}
+ValueConstIterator::ValueConstIterator() = default;
 
+ValueConstIterator::ValueConstIterator(
+    const Value::ObjectValues::iterator& current)
+    : ValueIteratorBase(current) {}
 
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-ValueConstIterator::ValueConstIterator( const Value::ObjectValues::iterator &current )
-   : ValueIteratorBase( current )
-{
-}
-#else
-ValueConstIterator::ValueConstIterator( const ValueInternalArray::IteratorState &state )
-   : ValueIteratorBase( state )
-{
-}
+ValueConstIterator::ValueConstIterator(ValueIterator const& other)
+    : ValueIteratorBase(other) {}
 
-ValueConstIterator::ValueConstIterator( const ValueInternalMap::IteratorState &state )
-   : ValueIteratorBase( state )
-{
+ValueConstIterator& ValueConstIterator::
+operator=(const ValueIteratorBase& other) {
+  copy(other);
+  return *this;
 }
-#endif
-
-ValueConstIterator &
-ValueConstIterator::operator =( const ValueIteratorBase &other )
-{
-   copy( other );
-   return *this;
-}
-
 
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
@@ -257,43 +139,21 @@
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 
-ValueIterator::ValueIterator()
-{
+ValueIterator::ValueIterator() = default;
+
+ValueIterator::ValueIterator(const Value::ObjectValues::iterator& current)
+    : ValueIteratorBase(current) {}
+
+ValueIterator::ValueIterator(const ValueConstIterator& other)
+    : ValueIteratorBase(other) {
+  throwRuntimeError("ConstIterator to Iterator should never be allowed.");
 }
 
+ValueIterator::ValueIterator(const ValueIterator& other) = default;
 
-#ifndef JSON_VALUE_USE_INTERNAL_MAP
-ValueIterator::ValueIterator( const Value::ObjectValues::iterator &current )
-   : ValueIteratorBase( current )
-{
-}
-#else
-ValueIterator::ValueIterator( const ValueInternalArray::IteratorState &state )
-   : ValueIteratorBase( state )
-{
-}
-
-ValueIterator::ValueIterator( const ValueInternalMap::IteratorState &state )
-   : ValueIteratorBase( state )
-{
-}
-#endif
-
-ValueIterator::ValueIterator( const ValueConstIterator &other )
-   : ValueIteratorBase( other )
-{
-}
-
-ValueIterator::ValueIterator( const ValueIterator &other )
-   : ValueIteratorBase( other )
-{
-}
-
-ValueIterator &
-ValueIterator::operator =( const SelfType &other )
-{
-   copy( other );
-   return *this;
+ValueIterator& ValueIterator::operator=(const SelfType& other) {
+  copy(other);
+  return *this;
 }
 
 } // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_writer.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_writer.cpp
index b44def3..41dfd8c 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_writer.cpp
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/json_writer.cpp
@@ -1,841 +1,1253 @@
-// Copyright 2011 Baptiste Lepilleur
+// Copyright 2011 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #if !defined(JSON_IS_AMALGAMATION)
-# include <json/writer.h>
-# include "json_tool.h"
+#include "json_tool.h"
+#include <json/writer.h>
 #endif // if !defined(JSON_IS_AMALGAMATION)
-#include <utility>
-#include <assert.h>
-#include <stdio.h>
-#include <string.h>
-#include <sstream>
+#include <cassert>
+#include <cstring>
 #include <iomanip>
+#include <memory>
+#include <set>
+#include <sstream>
+#include <utility>
 
-#if _MSC_VER >= 1400 // VC++ 8.0
-#pragma warning( disable : 4996 )   // disable warning about strdup being deprecated.
+#if __cplusplus >= 201103L
+#include <cmath>
+#include <cstdio>
+
+#if !defined(isnan)
+#define isnan std::isnan
+#endif
+
+#if !defined(isfinite)
+#define isfinite std::isfinite
+#endif
+
+#else
+#include <cmath>
+#include <cstdio>
+
+#if defined(_MSC_VER)
+#if !defined(isnan)
+#include <float.h>
+#define isnan _isnan
+#endif
+
+#if !defined(isfinite)
+#include <float.h>
+#define isfinite _finite
+#endif
+
+#if !defined(_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES)
+#define _CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES 1
+#endif //_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES
+
+#endif //_MSC_VER
+
+#if defined(__sun) && defined(__SVR4) // Solaris
+#if !defined(isfinite)
+#include <ieeefp.h>
+#define isfinite finite
+#endif
+#endif
+
+#if defined(__hpux)
+#if !defined(isfinite)
+#if defined(__ia64) && !defined(finite)
+#define isfinite(x)                                                            \
+  ((sizeof(x) == sizeof(float) ? _Isfinitef(x) : _IsFinite(x)))
+#endif
+#endif
+#endif
+
+#if !defined(isnan)
+// IEEE standard states that NaN values will not compare to themselves
+#define isnan(x) (x != x)
+#endif
+
+#if !defined(__APPLE__)
+#if !defined(isfinite)
+#define isfinite finite
+#endif
+#endif
+#endif
+
+#if defined(_MSC_VER)
+// Disable warning about strdup being deprecated.
+#pragma warning(disable : 4996)
 #endif
 
 namespace Json {
 
-static bool containsControlCharacter( const char* str )
-{
-   while ( *str ) 
-   {
-      if ( isControlCharacter( *(str++) ) )
-         return true;
-   }
-   return false;
+#if __cplusplus >= 201103L || (defined(_CPPLIB_VER) && _CPPLIB_VER >= 520)
+typedef std::unique_ptr<StreamWriter> StreamWriterPtr;
+#else
+typedef std::auto_ptr<StreamWriter> StreamWriterPtr;
+#endif
+
+String valueToString(LargestInt value) {
+  UIntToStringBuffer buffer;
+  char* current = buffer + sizeof(buffer);
+  if (value == Value::minLargestInt) {
+    uintToString(LargestUInt(Value::maxLargestInt) + 1, current);
+    *--current = '-';
+  } else if (value < 0) {
+    uintToString(LargestUInt(-value), current);
+    *--current = '-';
+  } else {
+    uintToString(LargestUInt(value), current);
+  }
+  assert(current >= buffer);
+  return current;
 }
 
-
-std::string valueToString( LargestInt value )
-{
-   UIntToStringBuffer buffer;
-   char *current = buffer + sizeof(buffer);
-   bool isNegative = value < 0;
-   if ( isNegative )
-      value = -value;
-   uintToString( LargestUInt(value), current );
-   if ( isNegative )
-      *--current = '-';
-   assert( current >= buffer );
-   return current;
-}
-
-
-std::string valueToString( LargestUInt value )
-{
-   UIntToStringBuffer buffer;
-   char *current = buffer + sizeof(buffer);
-   uintToString( value, current );
-   assert( current >= buffer );
-   return current;
+String valueToString(LargestUInt value) {
+  UIntToStringBuffer buffer;
+  char* current = buffer + sizeof(buffer);
+  uintToString(value, current);
+  assert(current >= buffer);
+  return current;
 }
 
 #if defined(JSON_HAS_INT64)
 
-std::string valueToString( Int value )
-{
-   return valueToString( LargestInt(value) );
-}
+String valueToString(Int value) { return valueToString(LargestInt(value)); }
 
-
-std::string valueToString( UInt value )
-{
-   return valueToString( LargestUInt(value) );
-}
+String valueToString(UInt value) { return valueToString(LargestUInt(value)); }
 
 #endif // # if defined(JSON_HAS_INT64)
 
+namespace {
+String valueToString(double value,
+                     bool useSpecialFloats,
+                     unsigned int precision,
+                     PrecisionType precisionType) {
+  // Print into the buffer. We need not request the alternative representation
+  // that always has a decimal point because JSON doesn't distinguish the
+  // concepts of reals and integers.
+  if (!isfinite(value)) {
+    static const char* const reps[2][3] = {{"NaN", "-Infinity", "Infinity"},
+                                           {"null", "-1e+9999", "1e+9999"}};
+    return reps[useSpecialFloats ? 0 : 1]
+               [isnan(value) ? 0 : (value < 0) ? 1 : 2];
+  }
 
-std::string valueToString( double value )
-{
-   char buffer[32];
-#if defined(_MSC_VER) && defined(__STDC_SECURE_LIB__) // Use secure version with visual studio 2005 to avoid warning. 
-   sprintf_s(buffer, sizeof(buffer), "%#.16g", value); 
-#else	
-   sprintf(buffer, "%#.16g", value); 
-#endif
-   char* ch = buffer + strlen(buffer) - 1;
-   if (*ch != '0') return buffer; // nothing to truncate, so save time
-   while(ch > buffer && *ch == '0'){
-     --ch;
-   }
-   char* last_nonzero = ch;
-   while(ch >= buffer){
-     switch(*ch){
-     case '0':
-     case '1':
-     case '2':
-     case '3':
-     case '4':
-     case '5':
-     case '6':
-     case '7':
-     case '8':
-     case '9':
-       --ch;
-       continue;
-     case '.':
-       // Truncate zeroes to save bytes in output, but keep one.
-       *(last_nonzero+2) = '\0';
-       return buffer;
-     default:
-       return buffer;
-     }
-   }
-   return buffer;
+  String buffer(size_t(36), '\0');
+  while (true) {
+    int len = jsoncpp_snprintf(
+        &*buffer.begin(), buffer.size(),
+        (precisionType == PrecisionType::significantDigits) ? "%.*g" : "%.*f",
+        precision, value);
+    assert(len >= 0);
+    auto wouldPrint = static_cast<size_t>(len);
+    if (wouldPrint >= buffer.size()) {
+      buffer.resize(wouldPrint + 1);
+      continue;
+    }
+    buffer.resize(wouldPrint);
+    break;
+  }
+
+  buffer.erase(fixNumericLocale(buffer.begin(), buffer.end()), buffer.end());
+
+  // strip the zero padding from the right
+  if (precisionType == PrecisionType::decimalPlaces) {
+    buffer.erase(fixZerosInTheEnd(buffer.begin(), buffer.end()), buffer.end());
+  }
+
+  // try to ensure we preserve the fact that this was given to us as a double on
+  // input
+  if (buffer.find('.') == buffer.npos && buffer.find('e') == buffer.npos) {
+    buffer += ".0";
+  }
+  return buffer;
+}
+} // namespace
+
+String valueToString(double value,
+                     unsigned int precision,
+                     PrecisionType precisionType) {
+  return valueToString(value, false, precision, precisionType);
 }
 
+String valueToString(bool value) { return value ? "true" : "false"; }
 
-std::string valueToString( bool value )
-{
-   return value ? "true" : "false";
+static bool isAnyCharRequiredQuoting(char const* s, size_t n) {
+  assert(s || !n);
+
+  char const* const end = s + n;
+  for (char const* cur = s; cur < end; ++cur) {
+    if (*cur == '\\' || *cur == '\"' || *cur < ' ' ||
+        static_cast<unsigned char>(*cur) < 0x80)
+      return true;
+  }
+  return false;
 }
 
-std::string valueToQuotedString( const char *value )
-{
-   if (value == NULL)
-      return "";
-   // Not sure how to handle unicode...
-   if (strpbrk(value, "\"\\\b\f\n\r\t") == NULL && !containsControlCharacter( value ))
-      return std::string("\"") + value + "\"";
-   // We have to walk value and escape any special characters.
-   // Appending to std::string is not efficient, but this should be rare.
-   // (Note: forward slashes are *not* rare, but I am not escaping them.)
-   std::string::size_type maxsize = strlen(value)*2 + 3; // allescaped+quotes+NULL
-   std::string result;
-   result.reserve(maxsize); // to avoid lots of mallocs
-   result += "\"";
-   for (const char* c=value; *c != 0; ++c)
-   {
-      switch(*c)
-      {
-         case '\"':
-            result += "\\\"";
-            break;
-         case '\\':
-            result += "\\\\";
-            break;
-         case '\b':
-            result += "\\b";
-            break;
-         case '\f':
-            result += "\\f";
-            break;
-         case '\n':
-            result += "\\n";
-            break;
-         case '\r':
-            result += "\\r";
-            break;
-         case '\t':
-            result += "\\t";
-            break;
-         //case '/':
-            // Even though \/ is considered a legal escape in JSON, a bare
-            // slash is also legal, so I see no reason to escape it.
-            // (I hope I am not misunderstanding something.
-            // blep notes: actually escaping \/ may be useful in javascript to avoid </ 
-            // sequence.
-            // Should add a flag to allow this compatibility mode and prevent this 
-            // sequence from occurring.
-         default:
-            if ( isControlCharacter( *c ) )
-            {
-               std::ostringstream oss;
-               oss << "\\u" << std::hex << std::uppercase << std::setfill('0') << std::setw(4) << static_cast<int>(*c);
-               result += oss.str();
-            }
-            else
-            {
-               result += *c;
-            }
-            break;
+static unsigned int utf8ToCodepoint(const char*& s, const char* e) {
+  const unsigned int REPLACEMENT_CHARACTER = 0xFFFD;
+
+  unsigned int firstByte = static_cast<unsigned char>(*s);
+
+  if (firstByte < 0x80)
+    return firstByte;
+
+  if (firstByte < 0xE0) {
+    if (e - s < 2)
+      return REPLACEMENT_CHARACTER;
+
+    unsigned int calculated =
+        ((firstByte & 0x1F) << 6) | (static_cast<unsigned int>(s[1]) & 0x3F);
+    s += 1;
+    // oversized encoded characters are invalid
+    return calculated < 0x80 ? REPLACEMENT_CHARACTER : calculated;
+  }
+
+  if (firstByte < 0xF0) {
+    if (e - s < 3)
+      return REPLACEMENT_CHARACTER;
+
+    unsigned int calculated = ((firstByte & 0x0F) << 12) |
+                              ((static_cast<unsigned int>(s[1]) & 0x3F) << 6) |
+                              (static_cast<unsigned int>(s[2]) & 0x3F);
+    s += 2;
+    // surrogates aren't valid codepoints itself
+    // shouldn't be UTF-8 encoded
+    if (calculated >= 0xD800 && calculated <= 0xDFFF)
+      return REPLACEMENT_CHARACTER;
+    // oversized encoded characters are invalid
+    return calculated < 0x800 ? REPLACEMENT_CHARACTER : calculated;
+  }
+
+  if (firstByte < 0xF8) {
+    if (e - s < 4)
+      return REPLACEMENT_CHARACTER;
+
+    unsigned int calculated = ((firstByte & 0x07) << 18) |
+                              ((static_cast<unsigned int>(s[1]) & 0x3F) << 12) |
+                              ((static_cast<unsigned int>(s[2]) & 0x3F) << 6) |
+                              (static_cast<unsigned int>(s[3]) & 0x3F);
+    s += 3;
+    // oversized encoded characters are invalid
+    return calculated < 0x10000 ? REPLACEMENT_CHARACTER : calculated;
+  }
+
+  return REPLACEMENT_CHARACTER;
+}
+
+static const char hex2[] = "000102030405060708090a0b0c0d0e0f"
+                           "101112131415161718191a1b1c1d1e1f"
+                           "202122232425262728292a2b2c2d2e2f"
+                           "303132333435363738393a3b3c3d3e3f"
+                           "404142434445464748494a4b4c4d4e4f"
+                           "505152535455565758595a5b5c5d5e5f"
+                           "606162636465666768696a6b6c6d6e6f"
+                           "707172737475767778797a7b7c7d7e7f"
+                           "808182838485868788898a8b8c8d8e8f"
+                           "909192939495969798999a9b9c9d9e9f"
+                           "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+                           "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
+                           "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
+                           "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf"
+                           "e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
+                           "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
+
+static String toHex16Bit(unsigned int x) {
+  const unsigned int hi = (x >> 8) & 0xff;
+  const unsigned int lo = x & 0xff;
+  String result(4, ' ');
+  result[0] = hex2[2 * hi];
+  result[1] = hex2[2 * hi + 1];
+  result[2] = hex2[2 * lo];
+  result[3] = hex2[2 * lo + 1];
+  return result;
+}
+
+static String valueToQuotedStringN(const char* value, unsigned length) {
+  if (value == nullptr)
+    return "";
+
+  if (!isAnyCharRequiredQuoting(value, length))
+    return String("\"") + value + "\"";
+  // We have to walk value and escape any special characters.
+  // Appending to String is not efficient, but this should be rare.
+  // (Note: forward slashes are *not* rare, but I am not escaping them.)
+  String::size_type maxsize = length * 2 + 3; // allescaped+quotes+NULL
+  String result;
+  result.reserve(maxsize); // to avoid lots of mallocs
+  result += "\"";
+  char const* end = value + length;
+  for (const char* c = value; c != end; ++c) {
+    switch (*c) {
+    case '\"':
+      result += "\\\"";
+      break;
+    case '\\':
+      result += "\\\\";
+      break;
+    case '\b':
+      result += "\\b";
+      break;
+    case '\f':
+      result += "\\f";
+      break;
+    case '\n':
+      result += "\\n";
+      break;
+    case '\r':
+      result += "\\r";
+      break;
+    case '\t':
+      result += "\\t";
+      break;
+    // case '/':
+    // Even though \/ is considered a legal escape in JSON, a bare
+    // slash is also legal, so I see no reason to escape it.
+    // (I hope I am not misunderstanding something.)
+    // blep notes: actually escaping \/ may be useful in javascript to avoid </
+    // sequence.
+    // Should add a flag to allow this compatibility mode and prevent this
+    // sequence from occurring.
+    default: {
+      unsigned int cp = utf8ToCodepoint(c, end);
+      // don't escape non-control characters
+      // (short escape sequence are applied above)
+      if (cp < 0x80 && cp >= 0x20)
+        result += static_cast<char>(cp);
+      else if (cp < 0x10000) { // codepoint is in Basic Multilingual Plane
+        result += "\\u";
+        result += toHex16Bit(cp);
+      } else { // codepoint is not in Basic Multilingual Plane
+               // convert to surrogate pair first
+        cp -= 0x10000;
+        result += "\\u";
+        result += toHex16Bit((cp >> 10) + 0xD800);
+        result += "\\u";
+        result += toHex16Bit((cp & 0x3FF) + 0xDC00);
       }
-   }
-   result += "\"";
-   return result;
+    } break;
+    }
+  }
+  result += "\"";
+  return result;
+}
+
+String valueToQuotedString(const char* value) {
+  return valueToQuotedStringN(value, static_cast<unsigned int>(strlen(value)));
 }
 
 // Class Writer
 // //////////////////////////////////////////////////////////////////
-Writer::~Writer()
-{
-}
-
+Writer::~Writer() = default;
 
 // Class FastWriter
 // //////////////////////////////////////////////////////////////////
 
 FastWriter::FastWriter()
-   : yamlCompatiblityEnabled_( false )
-{
+
+    = default;
+
+void FastWriter::enableYAMLCompatibility() { yamlCompatibilityEnabled_ = true; }
+
+void FastWriter::dropNullPlaceholders() { dropNullPlaceholders_ = true; }
+
+void FastWriter::omitEndingLineFeed() { omitEndingLineFeed_ = true; }
+
+String FastWriter::write(const Value& root) {
+  document_.clear();
+  writeValue(root);
+  if (!omitEndingLineFeed_)
+    document_ += '\n';
+  return document_;
 }
 
-
-void 
-FastWriter::enableYAMLCompatibility()
-{
-   yamlCompatiblityEnabled_ = true;
-}
-
-
-std::string 
-FastWriter::write( const Value &root )
-{
-   document_ = "";
-   writeValue( root );
-   document_ += "\n";
-   return document_;
-}
-
-
-void 
-FastWriter::writeValue( const Value &value )
-{
-   switch ( value.type() )
-   {
-   case nullValue:
+void FastWriter::writeValue(const Value& value) {
+  switch (value.type()) {
+  case nullValue:
+    if (!dropNullPlaceholders_)
       document_ += "null";
-      break;
-   case intValue:
-      document_ += valueToString( value.asLargestInt() );
-      break;
-   case uintValue:
-      document_ += valueToString( value.asLargestUInt() );
-      break;
-   case realValue:
-      document_ += valueToString( value.asDouble() );
-      break;
-   case stringValue:
-      document_ += valueToQuotedString( value.asCString() );
-      break;
-   case booleanValue:
-      document_ += valueToString( value.asBool() );
-      break;
-   case arrayValue:
-      {
-         document_ += "[";
-         int size = value.size();
-         for ( int index =0; index < size; ++index )
-         {
-            if ( index > 0 )
-               document_ += ",";
-            writeValue( value[index] );
-         }
-         document_ += "]";
-      }
-      break;
-   case objectValue:
-      {
-         Value::Members members( value.getMemberNames() );
-         document_ += "{";
-         for ( Value::Members::iterator it = members.begin(); 
-               it != members.end(); 
-               ++it )
-         {
-            const std::string &name = *it;
-            if ( it != members.begin() )
-               document_ += ",";
-            document_ += valueToQuotedString( name.c_str() );
-            document_ += yamlCompatiblityEnabled_ ? ": " 
-                                                  : ":";
-            writeValue( value[name] );
-         }
-         document_ += "}";
-      }
-      break;
-   }
+    break;
+  case intValue:
+    document_ += valueToString(value.asLargestInt());
+    break;
+  case uintValue:
+    document_ += valueToString(value.asLargestUInt());
+    break;
+  case realValue:
+    document_ += valueToString(value.asDouble());
+    break;
+  case stringValue: {
+    // Is NULL possible for value.string_? No.
+    char const* str;
+    char const* end;
+    bool ok = value.getString(&str, &end);
+    if (ok)
+      document_ += valueToQuotedStringN(str, static_cast<unsigned>(end - str));
+    break;
+  }
+  case booleanValue:
+    document_ += valueToString(value.asBool());
+    break;
+  case arrayValue: {
+    document_ += '[';
+    ArrayIndex size = value.size();
+    for (ArrayIndex index = 0; index < size; ++index) {
+      if (index > 0)
+        document_ += ',';
+      writeValue(value[index]);
+    }
+    document_ += ']';
+  } break;
+  case objectValue: {
+    Value::Members members(value.getMemberNames());
+    document_ += '{';
+    for (auto it = members.begin(); it != members.end(); ++it) {
+      const String& name = *it;
+      if (it != members.begin())
+        document_ += ',';
+      document_ += valueToQuotedStringN(name.data(),
+                                        static_cast<unsigned>(name.length()));
+      document_ += yamlCompatibilityEnabled_ ? ": " : ":";
+      writeValue(value[name]);
+    }
+    document_ += '}';
+  } break;
+  }
 }
 
-
 // Class StyledWriter
 // //////////////////////////////////////////////////////////////////
 
-StyledWriter::StyledWriter()
-   : rightMargin_( 74 )
-   , indentSize_( 3 )
-   , addChildValues_()
-{
+StyledWriter::StyledWriter() = default;
+
+String StyledWriter::write(const Value& root) {
+  document_.clear();
+  addChildValues_ = false;
+  indentString_.clear();
+  writeCommentBeforeValue(root);
+  writeValue(root);
+  writeCommentAfterValueOnSameLine(root);
+  document_ += '\n';
+  return document_;
 }
 
-
-std::string 
-StyledWriter::write( const Value &root )
-{
-   document_ = "";
-   addChildValues_ = false;
-   indentString_ = "";
-   writeCommentBeforeValue( root );
-   writeValue( root );
-   writeCommentAfterValueOnSameLine( root );
-   document_ += "\n";
-   return document_;
-}
-
-
-void 
-StyledWriter::writeValue( const Value &value )
-{
-   switch ( value.type() )
-   {
-   case nullValue:
-      pushValue( "null" );
-      break;
-   case intValue:
-      pushValue( valueToString( value.asLargestInt() ) );
-      break;
-   case uintValue:
-      pushValue( valueToString( value.asLargestUInt() ) );
-      break;
-   case realValue:
-      pushValue( valueToString( value.asDouble() ) );
-      break;
-   case stringValue:
-      pushValue( valueToQuotedString( value.asCString() ) );
-      break;
-   case booleanValue:
-      pushValue( valueToString( value.asBool() ) );
-      break;
-   case arrayValue:
-      writeArrayValue( value);
-      break;
-   case objectValue:
-      {
-         Value::Members members( value.getMemberNames() );
-         if ( members.empty() )
-            pushValue( "{}" );
-         else
-         {
-            writeWithIndent( "{" );
-            indent();
-            Value::Members::iterator it = members.begin();
-            for (;;)
-            {
-               const std::string &name = *it;
-               const Value &childValue = value[name];
-               writeCommentBeforeValue( childValue );
-               writeWithIndent( valueToQuotedString( name.c_str() ) );
-               document_ += " : ";
-               writeValue( childValue );
-               if ( ++it == members.end() )
-               {
-                  writeCommentAfterValueOnSameLine( childValue );
-                  break;
-               }
-               document_ += ",";
-               writeCommentAfterValueOnSameLine( childValue );
-            }
-            unindent();
-            writeWithIndent( "}" );
-         }
+void StyledWriter::writeValue(const Value& value) {
+  switch (value.type()) {
+  case nullValue:
+    pushValue("null");
+    break;
+  case intValue:
+    pushValue(valueToString(value.asLargestInt()));
+    break;
+  case uintValue:
+    pushValue(valueToString(value.asLargestUInt()));
+    break;
+  case realValue:
+    pushValue(valueToString(value.asDouble()));
+    break;
+  case stringValue: {
+    // Is NULL possible for value.string_? No.
+    char const* str;
+    char const* end;
+    bool ok = value.getString(&str, &end);
+    if (ok)
+      pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end - str)));
+    else
+      pushValue("");
+    break;
+  }
+  case booleanValue:
+    pushValue(valueToString(value.asBool()));
+    break;
+  case arrayValue:
+    writeArrayValue(value);
+    break;
+  case objectValue: {
+    Value::Members members(value.getMemberNames());
+    if (members.empty())
+      pushValue("{}");
+    else {
+      writeWithIndent("{");
+      indent();
+      auto it = members.begin();
+      for (;;) {
+        const String& name = *it;
+        const Value& childValue = value[name];
+        writeCommentBeforeValue(childValue);
+        writeWithIndent(valueToQuotedString(name.c_str()));
+        document_ += " : ";
+        writeValue(childValue);
+        if (++it == members.end()) {
+          writeCommentAfterValueOnSameLine(childValue);
+          break;
+        }
+        document_ += ',';
+        writeCommentAfterValueOnSameLine(childValue);
       }
-      break;
-   }
+      unindent();
+      writeWithIndent("}");
+    }
+  } break;
+  }
 }
 
-
-void 
-StyledWriter::writeArrayValue( const Value &value )
-{
-   unsigned size = value.size();
-   if ( size == 0 )
-      pushValue( "[]" );
-   else
-   {
-      bool isArrayMultiLine = isMultineArray( value );
-      if ( isArrayMultiLine )
-      {
-         writeWithIndent( "[" );
-         indent();
-         bool hasChildValue = !childValues_.empty();
-         unsigned index =0;
-         for (;;)
-         {
-            const Value &childValue = value[index];
-            writeCommentBeforeValue( childValue );
-            if ( hasChildValue )
-               writeWithIndent( childValues_[index] );
-            else
-            {
-               writeIndent();
-               writeValue( childValue );
-            }
-            if ( ++index == size )
-            {
-               writeCommentAfterValueOnSameLine( childValue );
-               break;
-            }
-            document_ += ",";
-            writeCommentAfterValueOnSameLine( childValue );
-         }
-         unindent();
-         writeWithIndent( "]" );
+void StyledWriter::writeArrayValue(const Value& value) {
+  unsigned size = value.size();
+  if (size == 0)
+    pushValue("[]");
+  else {
+    bool isArrayMultiLine = isMultilineArray(value);
+    if (isArrayMultiLine) {
+      writeWithIndent("[");
+      indent();
+      bool hasChildValue = !childValues_.empty();
+      unsigned index = 0;
+      for (;;) {
+        const Value& childValue = value[index];
+        writeCommentBeforeValue(childValue);
+        if (hasChildValue)
+          writeWithIndent(childValues_[index]);
+        else {
+          writeIndent();
+          writeValue(childValue);
+        }
+        if (++index == size) {
+          writeCommentAfterValueOnSameLine(childValue);
+          break;
+        }
+        document_ += ',';
+        writeCommentAfterValueOnSameLine(childValue);
       }
-      else // output on a single line
-      {
-         assert( childValues_.size() == size );
-         document_ += "[ ";
-         for ( unsigned index =0; index < size; ++index )
-         {
-            if ( index > 0 )
-               document_ += ", ";
-            document_ += childValues_[index];
-         }
-         document_ += " ]";
+      unindent();
+      writeWithIndent("]");
+    } else // output on a single line
+    {
+      assert(childValues_.size() == size);
+      document_ += "[ ";
+      for (unsigned index = 0; index < size; ++index) {
+        if (index > 0)
+          document_ += ", ";
+        document_ += childValues_[index];
       }
-   }
+      document_ += " ]";
+    }
+  }
 }
 
-
-bool 
-StyledWriter::isMultineArray( const Value &value )
-{
-   int size = value.size();
-   bool isMultiLine = size*3 >= rightMargin_ ;
-   childValues_.clear();
-   for ( int index =0; index < size  &&  !isMultiLine; ++index )
-   {
-      const Value &childValue = value[index];
-      isMultiLine = isMultiLine  ||
-                     ( (childValue.isArray()  ||  childValue.isObject())  &&  
-                        childValue.size() > 0 );
-   }
-   if ( !isMultiLine ) // check if line length > max line length
-   {
-      childValues_.reserve( size );
-      addChildValues_ = true;
-      int lineLength = 4 + (size-1)*2; // '[ ' + ', '*n + ' ]'
-      for ( int index =0; index < size  &&  !isMultiLine; ++index )
-      {
-         writeValue( value[index] );
-         lineLength += int( childValues_[index].length() );
-         isMultiLine = isMultiLine  &&  hasCommentForValue( value[index] );
+bool StyledWriter::isMultilineArray(const Value& value) {
+  ArrayIndex const size = value.size();
+  bool isMultiLine = size * 3 >= rightMargin_;
+  childValues_.clear();
+  for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) {
+    const Value& childValue = value[index];
+    isMultiLine = ((childValue.isArray() || childValue.isObject()) &&
+                   !childValue.empty());
+  }
+  if (!isMultiLine) // check if line length > max line length
+  {
+    childValues_.reserve(size);
+    addChildValues_ = true;
+    ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
+    for (ArrayIndex index = 0; index < size; ++index) {
+      if (hasCommentForValue(value[index])) {
+        isMultiLine = true;
       }
-      addChildValues_ = false;
-      isMultiLine = isMultiLine  ||  lineLength >= rightMargin_;
-   }
-   return isMultiLine;
+      writeValue(value[index]);
+      lineLength += static_cast<ArrayIndex>(childValues_[index].length());
+    }
+    addChildValues_ = false;
+    isMultiLine = isMultiLine || lineLength >= rightMargin_;
+  }
+  return isMultiLine;
 }
 
-
-void 
-StyledWriter::pushValue( const std::string &value )
-{
-   if ( addChildValues_ )
-      childValues_.push_back( value );
-   else
-      document_ += value;
+void StyledWriter::pushValue(const String& value) {
+  if (addChildValues_)
+    childValues_.push_back(value);
+  else
+    document_ += value;
 }
 
-
-void 
-StyledWriter::writeIndent()
-{
-   if ( !document_.empty() )
-   {
-      char last = document_[document_.length()-1];
-      if ( last == ' ' )     // already indented
-         return;
-      if ( last != '\n' )    // Comments may add new-line
-         document_ += '\n';
-   }
-   document_ += indentString_;
-}
-
-
-void 
-StyledWriter::writeWithIndent( const std::string &value )
-{
-   writeIndent();
-   document_ += value;
-}
-
-
-void 
-StyledWriter::indent()
-{
-   indentString_ += std::string( indentSize_, ' ' );
-}
-
-
-void 
-StyledWriter::unindent()
-{
-   assert( int(indentString_.size()) >= indentSize_ );
-   indentString_.resize( indentString_.size() - indentSize_ );
-}
-
-
-void 
-StyledWriter::writeCommentBeforeValue( const Value &root )
-{
-   if ( !root.hasComment( commentBefore ) )
+void StyledWriter::writeIndent() {
+  if (!document_.empty()) {
+    char last = document_[document_.length() - 1];
+    if (last == ' ') // already indented
       return;
-   document_ += normalizeEOL( root.getComment( commentBefore ) );
-   document_ += "\n";
+    if (last != '\n') // Comments may add new-line
+      document_ += '\n';
+  }
+  document_ += indentString_;
 }
 
-
-void 
-StyledWriter::writeCommentAfterValueOnSameLine( const Value &root )
-{
-   if ( root.hasComment( commentAfterOnSameLine ) )
-      document_ += " " + normalizeEOL( root.getComment( commentAfterOnSameLine ) );
-
-   if ( root.hasComment( commentAfter ) )
-   {
-      document_ += "\n";
-      document_ += normalizeEOL( root.getComment( commentAfter ) );
-      document_ += "\n";
-   }
+void StyledWriter::writeWithIndent(const String& value) {
+  writeIndent();
+  document_ += value;
 }
 
+void StyledWriter::indent() { indentString_ += String(indentSize_, ' '); }
 
-bool 
-StyledWriter::hasCommentForValue( const Value &value )
-{
-   return value.hasComment( commentBefore )
-          ||  value.hasComment( commentAfterOnSameLine )
-          ||  value.hasComment( commentAfter );
+void StyledWriter::unindent() {
+  assert(indentString_.size() >= indentSize_);
+  indentString_.resize(indentString_.size() - indentSize_);
 }
 
+void StyledWriter::writeCommentBeforeValue(const Value& root) {
+  if (!root.hasComment(commentBefore))
+    return;
 
-std::string 
-StyledWriter::normalizeEOL( const std::string &text )
-{
-   std::string normalized;
-   normalized.reserve( text.length() );
-   const char *begin = text.c_str();
-   const char *end = begin + text.length();
-   const char *current = begin;
-   while ( current != end )
-   {
-      char c = *current++;
-      if ( c == '\r' ) // mac or dos EOL
-      {
-         if ( *current == '\n' ) // convert dos EOL
-            ++current;
-         normalized += '\n';
-      }
-      else // handle unix EOL & other char
-         normalized += c;
-   }
-   return normalized;
+  document_ += '\n';
+  writeIndent();
+  const String& comment = root.getComment(commentBefore);
+  String::const_iterator iter = comment.begin();
+  while (iter != comment.end()) {
+    document_ += *iter;
+    if (*iter == '\n' && ((iter + 1) != comment.end() && *(iter + 1) == '/'))
+      writeIndent();
+    ++iter;
+  }
+
+  // Comments are stripped of trailing newlines, so add one here
+  document_ += '\n';
 }
 
+void StyledWriter::writeCommentAfterValueOnSameLine(const Value& root) {
+  if (root.hasComment(commentAfterOnSameLine))
+    document_ += " " + root.getComment(commentAfterOnSameLine);
+
+  if (root.hasComment(commentAfter)) {
+    document_ += '\n';
+    document_ += root.getComment(commentAfter);
+    document_ += '\n';
+  }
+}
+
+bool StyledWriter::hasCommentForValue(const Value& value) {
+  return value.hasComment(commentBefore) ||
+         value.hasComment(commentAfterOnSameLine) ||
+         value.hasComment(commentAfter);
+}
 
 // Class StyledStreamWriter
 // //////////////////////////////////////////////////////////////////
 
-StyledStreamWriter::StyledStreamWriter( std::string indentation )
-   : document_(NULL)
-   , rightMargin_( 74 )
-   , indentation_( indentation )
-   , addChildValues_()
-{
+StyledStreamWriter::StyledStreamWriter(String indentation)
+    : document_(nullptr), indentation_(std::move(indentation)),
+      addChildValues_(), indented_(false) {}
+
+void StyledStreamWriter::write(OStream& out, const Value& root) {
+  document_ = &out;
+  addChildValues_ = false;
+  indentString_.clear();
+  indented_ = true;
+  writeCommentBeforeValue(root);
+  if (!indented_)
+    writeIndent();
+  indented_ = true;
+  writeValue(root);
+  writeCommentAfterValueOnSameLine(root);
+  *document_ << "\n";
+  document_ = nullptr; // Forget the stream, for safety.
 }
 
-
-void
-StyledStreamWriter::write( std::ostream &out, const Value &root )
-{
-   document_ = &out;
-   addChildValues_ = false;
-   indentString_ = "";
-   writeCommentBeforeValue( root );
-   writeValue( root );
-   writeCommentAfterValueOnSameLine( root );
-   *document_ << "\n";
-   document_ = NULL; // Forget the stream, for safety.
-}
-
-
-void 
-StyledStreamWriter::writeValue( const Value &value )
-{
-   switch ( value.type() )
-   {
-   case nullValue:
-      pushValue( "null" );
-      break;
-   case intValue:
-      pushValue( valueToString( value.asLargestInt() ) );
-      break;
-   case uintValue:
-      pushValue( valueToString( value.asLargestUInt() ) );
-      break;
-   case realValue:
-      pushValue( valueToString( value.asDouble() ) );
-      break;
-   case stringValue:
-      pushValue( valueToQuotedString( value.asCString() ) );
-      break;
-   case booleanValue:
-      pushValue( valueToString( value.asBool() ) );
-      break;
-   case arrayValue:
-      writeArrayValue( value);
-      break;
-   case objectValue:
-      {
-         Value::Members members( value.getMemberNames() );
-         if ( members.empty() )
-            pushValue( "{}" );
-         else
-         {
-            writeWithIndent( "{" );
-            indent();
-            Value::Members::iterator it = members.begin();
-            for (;;)
-            {
-               const std::string &name = *it;
-               const Value &childValue = value[name];
-               writeCommentBeforeValue( childValue );
-               writeWithIndent( valueToQuotedString( name.c_str() ) );
-               *document_ << " : ";
-               writeValue( childValue );
-               if ( ++it == members.end() )
-               {
-                  writeCommentAfterValueOnSameLine( childValue );
-                  break;
-               }
-               *document_ << ",";
-               writeCommentAfterValueOnSameLine( childValue );
-            }
-            unindent();
-            writeWithIndent( "}" );
-         }
+void StyledStreamWriter::writeValue(const Value& value) {
+  switch (value.type()) {
+  case nullValue:
+    pushValue("null");
+    break;
+  case intValue:
+    pushValue(valueToString(value.asLargestInt()));
+    break;
+  case uintValue:
+    pushValue(valueToString(value.asLargestUInt()));
+    break;
+  case realValue:
+    pushValue(valueToString(value.asDouble()));
+    break;
+  case stringValue: {
+    // Is NULL possible for value.string_? No.
+    char const* str;
+    char const* end;
+    bool ok = value.getString(&str, &end);
+    if (ok)
+      pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end - str)));
+    else
+      pushValue("");
+    break;
+  }
+  case booleanValue:
+    pushValue(valueToString(value.asBool()));
+    break;
+  case arrayValue:
+    writeArrayValue(value);
+    break;
+  case objectValue: {
+    Value::Members members(value.getMemberNames());
+    if (members.empty())
+      pushValue("{}");
+    else {
+      writeWithIndent("{");
+      indent();
+      auto it = members.begin();
+      for (;;) {
+        const String& name = *it;
+        const Value& childValue = value[name];
+        writeCommentBeforeValue(childValue);
+        writeWithIndent(valueToQuotedString(name.c_str()));
+        *document_ << " : ";
+        writeValue(childValue);
+        if (++it == members.end()) {
+          writeCommentAfterValueOnSameLine(childValue);
+          break;
+        }
+        *document_ << ",";
+        writeCommentAfterValueOnSameLine(childValue);
       }
-      break;
-   }
+      unindent();
+      writeWithIndent("}");
+    }
+  } break;
+  }
 }
 
-
-void 
-StyledStreamWriter::writeArrayValue( const Value &value )
-{
-   unsigned size = value.size();
-   if ( size == 0 )
-      pushValue( "[]" );
-   else
-   {
-      bool isArrayMultiLine = isMultineArray( value );
-      if ( isArrayMultiLine )
-      {
-         writeWithIndent( "[" );
-         indent();
-         bool hasChildValue = !childValues_.empty();
-         unsigned index =0;
-         for (;;)
-         {
-            const Value &childValue = value[index];
-            writeCommentBeforeValue( childValue );
-            if ( hasChildValue )
-               writeWithIndent( childValues_[index] );
-            else
-            {
-               writeIndent();
-               writeValue( childValue );
-            }
-            if ( ++index == size )
-            {
-               writeCommentAfterValueOnSameLine( childValue );
-               break;
-            }
-            *document_ << ",";
-            writeCommentAfterValueOnSameLine( childValue );
-         }
-         unindent();
-         writeWithIndent( "]" );
+void StyledStreamWriter::writeArrayValue(const Value& value) {
+  unsigned size = value.size();
+  if (size == 0)
+    pushValue("[]");
+  else {
+    bool isArrayMultiLine = isMultilineArray(value);
+    if (isArrayMultiLine) {
+      writeWithIndent("[");
+      indent();
+      bool hasChildValue = !childValues_.empty();
+      unsigned index = 0;
+      for (;;) {
+        const Value& childValue = value[index];
+        writeCommentBeforeValue(childValue);
+        if (hasChildValue)
+          writeWithIndent(childValues_[index]);
+        else {
+          if (!indented_)
+            writeIndent();
+          indented_ = true;
+          writeValue(childValue);
+          indented_ = false;
+        }
+        if (++index == size) {
+          writeCommentAfterValueOnSameLine(childValue);
+          break;
+        }
+        *document_ << ",";
+        writeCommentAfterValueOnSameLine(childValue);
       }
-      else // output on a single line
-      {
-         assert( childValues_.size() == size );
-         *document_ << "[ ";
-         for ( unsigned index =0; index < size; ++index )
-         {
-            if ( index > 0 )
-               *document_ << ", ";
-            *document_ << childValues_[index];
-         }
-         *document_ << " ]";
+      unindent();
+      writeWithIndent("]");
+    } else // output on a single line
+    {
+      assert(childValues_.size() == size);
+      *document_ << "[ ";
+      for (unsigned index = 0; index < size; ++index) {
+        if (index > 0)
+          *document_ << ", ";
+        *document_ << childValues_[index];
       }
-   }
+      *document_ << " ]";
+    }
+  }
 }
 
-
-bool 
-StyledStreamWriter::isMultineArray( const Value &value )
-{
-   int size = value.size();
-   bool isMultiLine = size*3 >= rightMargin_ ;
-   childValues_.clear();
-   for ( int index =0; index < size  &&  !isMultiLine; ++index )
-   {
-      const Value &childValue = value[index];
-      isMultiLine = isMultiLine  ||
-                     ( (childValue.isArray()  ||  childValue.isObject())  &&  
-                        childValue.size() > 0 );
-   }
-   if ( !isMultiLine ) // check if line length > max line length
-   {
-      childValues_.reserve( size );
-      addChildValues_ = true;
-      int lineLength = 4 + (size-1)*2; // '[ ' + ', '*n + ' ]'
-      for ( int index =0; index < size  &&  !isMultiLine; ++index )
-      {
-         writeValue( value[index] );
-         lineLength += int( childValues_[index].length() );
-         isMultiLine = isMultiLine  &&  hasCommentForValue( value[index] );
+bool StyledStreamWriter::isMultilineArray(const Value& value) {
+  ArrayIndex const size = value.size();
+  bool isMultiLine = size * 3 >= rightMargin_;
+  childValues_.clear();
+  for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) {
+    const Value& childValue = value[index];
+    isMultiLine = ((childValue.isArray() || childValue.isObject()) &&
+                   !childValue.empty());
+  }
+  if (!isMultiLine) // check if line length > max line length
+  {
+    childValues_.reserve(size);
+    addChildValues_ = true;
+    ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
+    for (ArrayIndex index = 0; index < size; ++index) {
+      if (hasCommentForValue(value[index])) {
+        isMultiLine = true;
       }
-      addChildValues_ = false;
-      isMultiLine = isMultiLine  ||  lineLength >= rightMargin_;
-   }
-   return isMultiLine;
+      writeValue(value[index]);
+      lineLength += static_cast<ArrayIndex>(childValues_[index].length());
+    }
+    addChildValues_ = false;
+    isMultiLine = isMultiLine || lineLength >= rightMargin_;
+  }
+  return isMultiLine;
 }
 
-
-void 
-StyledStreamWriter::pushValue( const std::string &value )
-{
-   if ( addChildValues_ )
-      childValues_.push_back( value );
-   else
-      *document_ << value;
+void StyledStreamWriter::pushValue(const String& value) {
+  if (addChildValues_)
+    childValues_.push_back(value);
+  else
+    *document_ << value;
 }
 
-
-void 
-StyledStreamWriter::writeIndent()
-{
-  /*
-    Some comments in this method would have been nice. ;-)
-
-   if ( !document_.empty() )
-   {
-      char last = document_[document_.length()-1];
-      if ( last == ' ' )     // already indented
-         return;
-      if ( last != '\n' )    // Comments may add new-line
-         *document_ << '\n';
-   }
-  */
-   *document_ << '\n' << indentString_;
+void StyledStreamWriter::writeIndent() {
+  // blep intended this to look at the so-far-written string
+  // to determine whether we are already indented, but
+  // with a stream we cannot do that. So we rely on some saved state.
+  // The caller checks indented_.
+  *document_ << '\n' << indentString_;
 }
 
-
-void 
-StyledStreamWriter::writeWithIndent( const std::string &value )
-{
-   writeIndent();
-   *document_ << value;
+void StyledStreamWriter::writeWithIndent(const String& value) {
+  if (!indented_)
+    writeIndent();
+  *document_ << value;
+  indented_ = false;
 }
 
+void StyledStreamWriter::indent() { indentString_ += indentation_; }
 
-void 
-StyledStreamWriter::indent()
-{
-   indentString_ += indentation_;
+void StyledStreamWriter::unindent() {
+  assert(indentString_.size() >= indentation_.size());
+  indentString_.resize(indentString_.size() - indentation_.size());
 }
 
+void StyledStreamWriter::writeCommentBeforeValue(const Value& root) {
+  if (!root.hasComment(commentBefore))
+    return;
 
-void 
-StyledStreamWriter::unindent()
-{
-   assert( indentString_.size() >= indentation_.size() );
-   indentString_.resize( indentString_.size() - indentation_.size() );
+  if (!indented_)
+    writeIndent();
+  const String& comment = root.getComment(commentBefore);
+  String::const_iterator iter = comment.begin();
+  while (iter != comment.end()) {
+    *document_ << *iter;
+    if (*iter == '\n' && ((iter + 1) != comment.end() && *(iter + 1) == '/'))
+      // writeIndent();  // would include newline
+      *document_ << indentString_;
+    ++iter;
+  }
+  indented_ = false;
 }
 
+void StyledStreamWriter::writeCommentAfterValueOnSameLine(const Value& root) {
+  if (root.hasComment(commentAfterOnSameLine))
+    *document_ << ' ' << root.getComment(commentAfterOnSameLine);
 
-void 
-StyledStreamWriter::writeCommentBeforeValue( const Value &root )
-{
-   if ( !root.hasComment( commentBefore ) )
-      return;
-   *document_ << normalizeEOL( root.getComment( commentBefore ) );
-   *document_ << "\n";
+  if (root.hasComment(commentAfter)) {
+    writeIndent();
+    *document_ << root.getComment(commentAfter);
+  }
+  indented_ = false;
 }
 
-
-void 
-StyledStreamWriter::writeCommentAfterValueOnSameLine( const Value &root )
-{
-   if ( root.hasComment( commentAfterOnSameLine ) )
-      *document_ << " " + normalizeEOL( root.getComment( commentAfterOnSameLine ) );
-
-   if ( root.hasComment( commentAfter ) )
-   {
-      *document_ << "\n";
-      *document_ << normalizeEOL( root.getComment( commentAfter ) );
-      *document_ << "\n";
-   }
+bool StyledStreamWriter::hasCommentForValue(const Value& value) {
+  return value.hasComment(commentBefore) ||
+         value.hasComment(commentAfterOnSameLine) ||
+         value.hasComment(commentAfter);
 }
 
+//////////////////////////
+// BuiltStyledStreamWriter
 
-bool 
-StyledStreamWriter::hasCommentForValue( const Value &value )
-{
-   return value.hasComment( commentBefore )
-          ||  value.hasComment( commentAfterOnSameLine )
-          ||  value.hasComment( commentAfter );
+/// Scoped enums are not available until C++11.
+struct CommentStyle {
+  /// Decide whether to write comments.
+  enum Enum {
+    None, ///< Drop all comments.
+    Most, ///< Recover odd behavior of previous versions (not implemented yet).
+    All   ///< Keep all comments.
+  };
+};
+
+struct BuiltStyledStreamWriter : public StreamWriter {
+  BuiltStyledStreamWriter(String indentation,
+                          CommentStyle::Enum cs,
+                          String colonSymbol,
+                          String nullSymbol,
+                          String endingLineFeedSymbol,
+                          bool useSpecialFloats,
+                          unsigned int precision,
+                          PrecisionType precisionType);
+  int write(Value const& root, OStream* sout) override;
+
+private:
+  void writeValue(Value const& value);
+  void writeArrayValue(Value const& value);
+  bool isMultilineArray(Value const& value);
+  void pushValue(String const& value);
+  void writeIndent();
+  void writeWithIndent(String const& value);
+  void indent();
+  void unindent();
+  void writeCommentBeforeValue(Value const& root);
+  void writeCommentAfterValueOnSameLine(Value const& root);
+  static bool hasCommentForValue(const Value& value);
+
+  typedef std::vector<String> ChildValues;
+
+  ChildValues childValues_;
+  String indentString_;
+  unsigned int rightMargin_;
+  String indentation_;
+  CommentStyle::Enum cs_;
+  String colonSymbol_;
+  String nullSymbol_;
+  String endingLineFeedSymbol_;
+  bool addChildValues_ : 1;
+  bool indented_ : 1;
+  bool useSpecialFloats_ : 1;
+  unsigned int precision_;
+  PrecisionType precisionType_;
+};
+BuiltStyledStreamWriter::BuiltStyledStreamWriter(String indentation,
+                                                 CommentStyle::Enum cs,
+                                                 String colonSymbol,
+                                                 String nullSymbol,
+                                                 String endingLineFeedSymbol,
+                                                 bool useSpecialFloats,
+                                                 unsigned int precision,
+                                                 PrecisionType precisionType)
+    : rightMargin_(74), indentation_(std::move(indentation)), cs_(cs),
+      colonSymbol_(std::move(colonSymbol)), nullSymbol_(std::move(nullSymbol)),
+      endingLineFeedSymbol_(std::move(endingLineFeedSymbol)),
+      addChildValues_(false), indented_(false),
+      useSpecialFloats_(useSpecialFloats), precision_(precision),
+      precisionType_(precisionType) {}
+int BuiltStyledStreamWriter::write(Value const& root, OStream* sout) {
+  sout_ = sout;
+  addChildValues_ = false;
+  indented_ = true;
+  indentString_.clear();
+  writeCommentBeforeValue(root);
+  if (!indented_)
+    writeIndent();
+  indented_ = true;
+  writeValue(root);
+  writeCommentAfterValueOnSameLine(root);
+  *sout_ << endingLineFeedSymbol_;
+  sout_ = nullptr;
+  return 0;
 }
-
-
-std::string 
-StyledStreamWriter::normalizeEOL( const std::string &text )
-{
-   std::string normalized;
-   normalized.reserve( text.length() );
-   const char *begin = text.c_str();
-   const char *end = begin + text.length();
-   const char *current = begin;
-   while ( current != end )
-   {
-      char c = *current++;
-      if ( c == '\r' ) // mac or dos EOL
-      {
-         if ( *current == '\n' ) // convert dos EOL
-            ++current;
-         normalized += '\n';
+void BuiltStyledStreamWriter::writeValue(Value const& value) {
+  switch (value.type()) {
+  case nullValue:
+    pushValue(nullSymbol_);
+    break;
+  case intValue:
+    pushValue(valueToString(value.asLargestInt()));
+    break;
+  case uintValue:
+    pushValue(valueToString(value.asLargestUInt()));
+    break;
+  case realValue:
+    pushValue(valueToString(value.asDouble(), useSpecialFloats_, precision_,
+                            precisionType_));
+    break;
+  case stringValue: {
+    // Is NULL is possible for value.string_? No.
+    char const* str;
+    char const* end;
+    bool ok = value.getString(&str, &end);
+    if (ok)
+      pushValue(valueToQuotedStringN(str, static_cast<unsigned>(end - str)));
+    else
+      pushValue("");
+    break;
+  }
+  case booleanValue:
+    pushValue(valueToString(value.asBool()));
+    break;
+  case arrayValue:
+    writeArrayValue(value);
+    break;
+  case objectValue: {
+    Value::Members members(value.getMemberNames());
+    if (members.empty())
+      pushValue("{}");
+    else {
+      writeWithIndent("{");
+      indent();
+      auto it = members.begin();
+      for (;;) {
+        String const& name = *it;
+        Value const& childValue = value[name];
+        writeCommentBeforeValue(childValue);
+        writeWithIndent(valueToQuotedStringN(
+            name.data(), static_cast<unsigned>(name.length())));
+        *sout_ << colonSymbol_;
+        writeValue(childValue);
+        if (++it == members.end()) {
+          writeCommentAfterValueOnSameLine(childValue);
+          break;
+        }
+        *sout_ << ",";
+        writeCommentAfterValueOnSameLine(childValue);
       }
-      else // handle unix EOL & other char
-         normalized += c;
-   }
-   return normalized;
+      unindent();
+      writeWithIndent("}");
+    }
+  } break;
+  }
 }
 
-
-std::ostream& operator<<( std::ostream &sout, const Value &root )
-{
-   Json::StyledStreamWriter writer;
-   writer.write(sout, root);
-   return sout;
+void BuiltStyledStreamWriter::writeArrayValue(Value const& value) {
+  unsigned size = value.size();
+  if (size == 0)
+    pushValue("[]");
+  else {
+    bool isMultiLine = (cs_ == CommentStyle::All) || isMultilineArray(value);
+    if (isMultiLine) {
+      writeWithIndent("[");
+      indent();
+      bool hasChildValue = !childValues_.empty();
+      unsigned index = 0;
+      for (;;) {
+        Value const& childValue = value[index];
+        writeCommentBeforeValue(childValue);
+        if (hasChildValue)
+          writeWithIndent(childValues_[index]);
+        else {
+          if (!indented_)
+            writeIndent();
+          indented_ = true;
+          writeValue(childValue);
+          indented_ = false;
+        }
+        if (++index == size) {
+          writeCommentAfterValueOnSameLine(childValue);
+          break;
+        }
+        *sout_ << ",";
+        writeCommentAfterValueOnSameLine(childValue);
+      }
+      unindent();
+      writeWithIndent("]");
+    } else // output on a single line
+    {
+      assert(childValues_.size() == size);
+      *sout_ << "[";
+      if (!indentation_.empty())
+        *sout_ << " ";
+      for (unsigned index = 0; index < size; ++index) {
+        if (index > 0)
+          *sout_ << ((!indentation_.empty()) ? ", " : ",");
+        *sout_ << childValues_[index];
+      }
+      if (!indentation_.empty())
+        *sout_ << " ";
+      *sout_ << "]";
+    }
+  }
 }
 
+bool BuiltStyledStreamWriter::isMultilineArray(Value const& value) {
+  ArrayIndex const size = value.size();
+  bool isMultiLine = size * 3 >= rightMargin_;
+  childValues_.clear();
+  for (ArrayIndex index = 0; index < size && !isMultiLine; ++index) {
+    Value const& childValue = value[index];
+    isMultiLine = ((childValue.isArray() || childValue.isObject()) &&
+                   !childValue.empty());
+  }
+  if (!isMultiLine) // check if line length > max line length
+  {
+    childValues_.reserve(size);
+    addChildValues_ = true;
+    ArrayIndex lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]'
+    for (ArrayIndex index = 0; index < size; ++index) {
+      if (hasCommentForValue(value[index])) {
+        isMultiLine = true;
+      }
+      writeValue(value[index]);
+      lineLength += static_cast<ArrayIndex>(childValues_[index].length());
+    }
+    addChildValues_ = false;
+    isMultiLine = isMultiLine || lineLength >= rightMargin_;
+  }
+  return isMultiLine;
+}
+
+void BuiltStyledStreamWriter::pushValue(String const& value) {
+  if (addChildValues_)
+    childValues_.push_back(value);
+  else
+    *sout_ << value;
+}
+
+void BuiltStyledStreamWriter::writeIndent() {
+  // blep intended this to look at the so-far-written string
+  // to determine whether we are already indented, but
+  // with a stream we cannot do that. So we rely on some saved state.
+  // The caller checks indented_.
+
+  if (!indentation_.empty()) {
+    // In this case, drop newlines too.
+    *sout_ << '\n' << indentString_;
+  }
+}
+
+void BuiltStyledStreamWriter::writeWithIndent(String const& value) {
+  if (!indented_)
+    writeIndent();
+  *sout_ << value;
+  indented_ = false;
+}
+
+void BuiltStyledStreamWriter::indent() { indentString_ += indentation_; }
+
+void BuiltStyledStreamWriter::unindent() {
+  assert(indentString_.size() >= indentation_.size());
+  indentString_.resize(indentString_.size() - indentation_.size());
+}
+
+void BuiltStyledStreamWriter::writeCommentBeforeValue(Value const& root) {
+  if (cs_ == CommentStyle::None)
+    return;
+  if (!root.hasComment(commentBefore))
+    return;
+
+  if (!indented_)
+    writeIndent();
+  const String& comment = root.getComment(commentBefore);
+  String::const_iterator iter = comment.begin();
+  while (iter != comment.end()) {
+    *sout_ << *iter;
+    if (*iter == '\n' && ((iter + 1) != comment.end() && *(iter + 1) == '/'))
+      // writeIndent();  // would write extra newline
+      *sout_ << indentString_;
+    ++iter;
+  }
+  indented_ = false;
+}
+
+void BuiltStyledStreamWriter::writeCommentAfterValueOnSameLine(
+    Value const& root) {
+  if (cs_ == CommentStyle::None)
+    return;
+  if (root.hasComment(commentAfterOnSameLine))
+    *sout_ << " " + root.getComment(commentAfterOnSameLine);
+
+  if (root.hasComment(commentAfter)) {
+    writeIndent();
+    *sout_ << root.getComment(commentAfter);
+  }
+}
+
+// static
+bool BuiltStyledStreamWriter::hasCommentForValue(const Value& value) {
+  return value.hasComment(commentBefore) ||
+         value.hasComment(commentAfterOnSameLine) ||
+         value.hasComment(commentAfter);
+}
+
+///////////////
+// StreamWriter
+
+StreamWriter::StreamWriter() : sout_(nullptr) {}
+StreamWriter::~StreamWriter() = default;
+StreamWriter::Factory::~Factory() = default;
+StreamWriterBuilder::StreamWriterBuilder() { setDefaults(&settings_); }
+StreamWriterBuilder::~StreamWriterBuilder() = default;
+StreamWriter* StreamWriterBuilder::newStreamWriter() const {
+  String indentation = settings_["indentation"].asString();
+  String cs_str = settings_["commentStyle"].asString();
+  String pt_str = settings_["precisionType"].asString();
+  bool eyc = settings_["enableYAMLCompatibility"].asBool();
+  bool dnp = settings_["dropNullPlaceholders"].asBool();
+  bool usf = settings_["useSpecialFloats"].asBool();
+  unsigned int pre = settings_["precision"].asUInt();
+  CommentStyle::Enum cs = CommentStyle::All;
+  if (cs_str == "All") {
+    cs = CommentStyle::All;
+  } else if (cs_str == "None") {
+    cs = CommentStyle::None;
+  } else {
+    throwRuntimeError("commentStyle must be 'All' or 'None'");
+  }
+  PrecisionType precisionType(significantDigits);
+  if (pt_str == "significant") {
+    precisionType = PrecisionType::significantDigits;
+  } else if (pt_str == "decimal") {
+    precisionType = PrecisionType::decimalPlaces;
+  } else {
+    throwRuntimeError("precisionType must be 'significant' or 'decimal'");
+  }
+  String colonSymbol = " : ";
+  if (eyc) {
+    colonSymbol = ": ";
+  } else if (indentation.empty()) {
+    colonSymbol = ":";
+  }
+  String nullSymbol = "null";
+  if (dnp) {
+    nullSymbol.clear();
+  }
+  if (pre > 17)
+    pre = 17;
+  String endingLineFeedSymbol;
+  return new BuiltStyledStreamWriter(indentation, cs, colonSymbol, nullSymbol,
+                                     endingLineFeedSymbol, usf, pre,
+                                     precisionType);
+}
+static void getValidWriterKeys(std::set<String>* valid_keys) {
+  valid_keys->clear();
+  valid_keys->insert("indentation");
+  valid_keys->insert("commentStyle");
+  valid_keys->insert("enableYAMLCompatibility");
+  valid_keys->insert("dropNullPlaceholders");
+  valid_keys->insert("useSpecialFloats");
+  valid_keys->insert("precision");
+  valid_keys->insert("precisionType");
+}
+bool StreamWriterBuilder::validate(Json::Value* invalid) const {
+  Json::Value my_invalid;
+  if (!invalid)
+    invalid = &my_invalid; // so we do not need to test for NULL
+  Json::Value& inv = *invalid;
+  std::set<String> valid_keys;
+  getValidWriterKeys(&valid_keys);
+  Value::Members keys = settings_.getMemberNames();
+  size_t n = keys.size();
+  for (size_t i = 0; i < n; ++i) {
+    String const& key = keys[i];
+    if (valid_keys.find(key) == valid_keys.end()) {
+      inv[key] = settings_[key];
+    }
+  }
+  return inv.empty();
+}
+Value& StreamWriterBuilder::operator[](const String& key) {
+  return settings_[key];
+}
+// static
+void StreamWriterBuilder::setDefaults(Json::Value* settings) {
+  //! [StreamWriterBuilderDefaults]
+  (*settings)["commentStyle"] = "All";
+  (*settings)["indentation"] = "\t";
+  (*settings)["enableYAMLCompatibility"] = false;
+  (*settings)["dropNullPlaceholders"] = false;
+  (*settings)["useSpecialFloats"] = false;
+  (*settings)["precision"] = 17;
+  (*settings)["precisionType"] = "significant";
+  //! [StreamWriterBuilderDefaults]
+}
+
+String writeString(StreamWriter::Factory const& factory, Value const& root) {
+  OStringStream sout;
+  StreamWriterPtr const writer(factory.newStreamWriter());
+  writer->write(root, &sout);
+  return sout.str();
+}
+
+OStream& operator<<(OStream& sout, Value const& root) {
+  StreamWriterBuilder builder;
+  StreamWriterPtr const writer(builder.newStreamWriter());
+  writer->write(root, &sout);
+  return sout;
+}
 
 } // namespace Json
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/sconscript b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/sconscript
deleted file mode 100644
index 6e7c6c8..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/sconscript
+++ /dev/null
@@ -1,8 +0,0 @@
-Import( 'env buildLibrary' )
-
-buildLibrary( env, Split( """
-    json_reader.cpp 
-    json_value.cpp 
-    json_writer.cpp
-     """ ),
-    'json' )
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/version.h.in b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/version.h.in
new file mode 100644
index 0000000..4cf439c
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/lib_json/version.h.in
@@ -0,0 +1,22 @@
+// DO NOT EDIT. This file (and "version") is a template used by the build system
+// (either CMake or Meson) to generate a "version.h" header file.
+#ifndef JSON_VERSION_H_INCLUDED
+#define JSON_VERSION_H_INCLUDED
+
+#define JSONCPP_VERSION_STRING "@JSONCPP_VERSION@"
+#define JSONCPP_VERSION_MAJOR @JSONCPP_VERSION_MAJOR@
+#define JSONCPP_VERSION_MINOR @JSONCPP_VERSION_MINOR@
+#define JSONCPP_VERSION_PATCH @JSONCPP_VERSION_PATCH@
+#define JSONCPP_VERSION_QUALIFIER
+#define JSONCPP_VERSION_HEXA ((JSONCPP_VERSION_MAJOR << 24) \
+                            | (JSONCPP_VERSION_MINOR << 16) \
+                            | (JSONCPP_VERSION_PATCH << 8))
+
+#ifdef JSONCPP_USING_SECURE_MEMORY
+#undef JSONCPP_USING_SECURE_MEMORY
+#endif
+#define JSONCPP_USING_SECURE_MEMORY @JSONCPP_USE_SECURE_MEMORY@
+// If non-zero, the library zeroes any memory that it has allocated before
+// it frees its memory.
+
+#endif // JSON_VERSION_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/CMakeLists.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/CMakeLists.txt
new file mode 100644
index 0000000..abb1813
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/CMakeLists.txt
@@ -0,0 +1,42 @@
+# vim: et ts=4 sts=4 sw=4 tw=0
+
+add_executable( jsoncpp_test
+                jsontest.cpp
+                jsontest.h
+                fuzz.cpp
+                fuzz.h
+                main.cpp
+                )
+
+
+if(BUILD_SHARED_LIBS)
+    add_compile_definitions( JSON_DLL )
+endif()
+target_link_libraries(jsoncpp_test jsoncpp_lib)
+
+# another way to solve issue #90
+#set_target_properties(jsoncpp_test PROPERTIES COMPILE_FLAGS -ffloat-store)
+
+# Run unit tests in post-build
+# (default cmake workflow hides away the test result into a file, resulting in poor dev workflow?!?)
+if(JSONCPP_WITH_POST_BUILD_UNITTEST)
+    if(BUILD_SHARED_LIBS)
+        # First, copy the shared lib, for Microsoft.
+        # Then, run the test executable.
+        add_custom_command( TARGET jsoncpp_test
+                            POST_BUILD
+                            COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:jsoncpp_lib> $<TARGET_FILE_DIR:jsoncpp_test>
+                            COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:jsoncpp_test>)
+    else(BUILD_SHARED_LIBS)
+        # Just run the test executable.
+        add_custom_command( TARGET jsoncpp_test
+                            POST_BUILD
+                            COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:jsoncpp_test>)
+    endif()
+    ## Create tests for dashboard submission, allows easy review of CI results https://my.cdash.org/index.php?project=jsoncpp
+    add_test(NAME jsoncpp_test
+       COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR}  $<TARGET_FILE:jsoncpp_test>
+    )
+endif()
+
+set_target_properties(jsoncpp_test PROPERTIES OUTPUT_NAME jsoncpp_test)
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/fuzz.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/fuzz.cpp
new file mode 100644
index 0000000..f79f19f
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/fuzz.cpp
@@ -0,0 +1,49 @@
+// Copyright 2007-2019 The JsonCpp Authors
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#include "fuzz.h"
+
+#include <cstdint>
+#include <json/config.h>
+#include <json/json.h>
+#include <memory>
+#include <stdint.h>
+#include <string>
+
+namespace Json {
+class Exception;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  Json::CharReaderBuilder builder;
+
+  if (size < sizeof(uint32_t)) {
+    return 0;
+  }
+
+  uint32_t hash_settings = *(const uint32_t*)data;
+  data += sizeof(uint32_t);
+
+  builder.settings_["failIfExtra"] = hash_settings & (1 << 0);
+  builder.settings_["allowComments_"] = hash_settings & (1 << 1);
+  builder.settings_["strictRoot_"] = hash_settings & (1 << 2);
+  builder.settings_["allowDroppedNullPlaceholders_"] = hash_settings & (1 << 3);
+  builder.settings_["allowNumericKeys_"] = hash_settings & (1 << 4);
+  builder.settings_["allowSingleQuotes_"] = hash_settings & (1 << 5);
+  builder.settings_["failIfExtra_"] = hash_settings & (1 << 6);
+  builder.settings_["rejectDupKeys_"] = hash_settings & (1 << 7);
+  builder.settings_["allowSpecialFloats_"] = hash_settings & (1 << 8);
+
+  std::unique_ptr<Json::CharReader> reader(builder.newCharReader());
+
+  Json::Value root;
+  const char* data_str = reinterpret_cast<const char*>(data);
+  try {
+    reader->parse(data_str, data_str + size, &root, nullptr);
+  } catch (Json::Exception const&) {
+  }
+  // Whether it succeeded or not doesn't matter.
+  return 0;
+}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/fuzz.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/fuzz.h
new file mode 100644
index 0000000..0816d27
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/fuzz.h
@@ -0,0 +1,14 @@
+// Copyright 2007-2010 The JsonCpp Authors
+// Distributed under MIT license, or public domain if desired and
+// recognized in your jurisdiction.
+// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+#ifndef FUZZ_H_INCLUDED
+#define FUZZ_H_INCLUDED
+
+#include <cstddef>
+#include <stdint.h>
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
+
+#endif // ifndef FUZZ_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.cpp
index 327d344..c0b5296 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.cpp
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.cpp
@@ -1,575 +1,435 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #define _CRT_SECURE_NO_WARNINGS 1 // Prevents deprecation warning with MSVC
 #include "jsontest.h"
-#include <stdio.h>
+#include <cstdio>
 #include <string>
 
 #if defined(_MSC_VER)
 // Used to install a report hook that prevent dialog on assertion and error.
-# include <crtdbg.h>
+#include <crtdbg.h>
 #endif // if defined(_MSC_VER)
 
 #if defined(_WIN32)
 // Used to prevent dialog on memory fault.
 // Limits headers included by Windows.h
-# define WIN32_LEAN_AND_MEAN
-# define NOSERVICE
-# define NOMCX
-# define NOIME
-# define NOSOUND
-# define NOCOMM
-# define NORPC
-# define NOGDI
-# define NOUSER
-# define NODRIVERS
-# define NOLOGERROR
-# define NOPROFILER
-# define NOMEMMGR
-# define NOLFILEIO
-# define NOOPENFILE
-# define NORESOURCE
-# define NOATOM
-# define NOLANGUAGE
-# define NOLSTRING
-# define NODBCS
-# define NOKEYBOARDINFO
-# define NOGDICAPMASKS
-# define NOCOLOR
-# define NOGDIOBJ
-# define NODRAWTEXT
-# define NOTEXTMETRIC
-# define NOSCALABLEFONT
-# define NOBITMAP
-# define NORASTEROPS
-# define NOMETAFILE
-# define NOSYSMETRICS
-# define NOSYSTEMPARAMSINFO
-# define NOMSG
-# define NOWINSTYLES
-# define NOWINOFFSETS
-# define NOSHOWWINDOW
-# define NODEFERWINDOWPOS
-# define NOVIRTUALKEYCODES
-# define NOKEYSTATES
-# define NOWH
-# define NOMENUS
-# define NOSCROLL
-# define NOCLIPBOARD
-# define NOICONS
-# define NOMB
-# define NOSYSCOMMANDS
-# define NOMDI
-# define NOCTLMGR
-# define NOWINMESSAGES
-# include <windows.h>
+#define WIN32_LEAN_AND_MEAN
+#define NOSERVICE
+#define NOMCX
+#define NOIME
+#define NOSOUND
+#define NOCOMM
+#define NORPC
+#define NOGDI
+#define NOUSER
+#define NODRIVERS
+#define NOLOGERROR
+#define NOPROFILER
+#define NOMEMMGR
+#define NOLFILEIO
+#define NOOPENFILE
+#define NORESOURCE
+#define NOATOM
+#define NOLANGUAGE
+#define NOLSTRING
+#define NODBCS
+#define NOKEYBOARDINFO
+#define NOGDICAPMASKS
+#define NOCOLOR
+#define NOGDIOBJ
+#define NODRAWTEXT
+#define NOTEXTMETRIC
+#define NOSCALABLEFONT
+#define NOBITMAP
+#define NORASTEROPS
+#define NOMETAFILE
+#define NOSYSMETRICS
+#define NOSYSTEMPARAMSINFO
+#define NOMSG
+#define NOWINSTYLES
+#define NOWINOFFSETS
+#define NOSHOWWINDOW
+#define NODEFERWINDOWPOS
+#define NOVIRTUALKEYCODES
+#define NOKEYSTATES
+#define NOWH
+#define NOMENUS
+#define NOSCROLL
+#define NOCLIPBOARD
+#define NOICONS
+#define NOMB
+#define NOSYSCOMMANDS
+#define NOMDI
+#define NOCTLMGR
+#define NOWINMESSAGES
+#include <windows.h>
 #endif // if defined(_WIN32)
 
 namespace JsonTest {
 
-
 // class TestResult
 // //////////////////////////////////////////////////////////////////
 
-TestResult::TestResult()
-   : predicateId_( 1 )
-   , lastUsedPredicateId_( 0 )
-   , messageTarget_( 0 )
-{
-   // The root predicate has id 0
-   rootPredicateNode_.id_ = 0;
-   rootPredicateNode_.next_ = 0;
-   predicateStackTail_ = &rootPredicateNode_;
+TestResult::TestResult() {
+  // The root predicate has id 0
+  rootPredicateNode_.id_ = 0;
+  rootPredicateNode_.next_ = nullptr;
+  predicateStackTail_ = &rootPredicateNode_;
 }
 
+void TestResult::setTestName(const Json::String& name) { name_ = name; }
 
-void 
-TestResult::setTestName( const std::string &name )
-{
-   name_ = name;
+TestResult&
+TestResult::addFailure(const char* file, unsigned int line, const char* expr) {
+  /// Walks the PredicateContext stack adding them to failures_ if not already
+  /// added.
+  unsigned int nestingLevel = 0;
+  PredicateContext* lastNode = rootPredicateNode_.next_;
+  for (; lastNode != nullptr; lastNode = lastNode->next_) {
+    if (lastNode->id_ > lastUsedPredicateId_) // new PredicateContext
+    {
+      lastUsedPredicateId_ = lastNode->id_;
+      addFailureInfo(lastNode->file_, lastNode->line_, lastNode->expr_,
+                     nestingLevel);
+      // Link the PredicateContext to the failure for message target when
+      // popping the PredicateContext.
+      lastNode->failure_ = &(failures_.back());
+    }
+    ++nestingLevel;
+  }
+
+  // Adds the failed assertion
+  addFailureInfo(file, line, expr, nestingLevel);
+  messageTarget_ = &(failures_.back());
+  return *this;
 }
 
-TestResult &
-TestResult::addFailure( const char *file, unsigned int line,
-                        const char *expr )
-{
-   /// Walks the PredicateContext stack adding them to failures_ if not already added.
-   unsigned int nestingLevel = 0;
-   PredicateContext *lastNode = rootPredicateNode_.next_;
-   for ( ; lastNode != 0; lastNode = lastNode->next_ )
-   {
-      if ( lastNode->id_ > lastUsedPredicateId_ ) // new PredicateContext
-      {
-         lastUsedPredicateId_ = lastNode->id_;
-         addFailureInfo( lastNode->file_, lastNode->line_, lastNode->expr_,
-                         nestingLevel );
-         // Link the PredicateContext to the failure for message target when 
-         // popping the PredicateContext.
-         lastNode->failure_ = &( failures_.back() );
-      }
-      ++nestingLevel;
-   }
-
-   // Adds the failed assertion
-   addFailureInfo( file, line, expr, nestingLevel );
-   messageTarget_ = &( failures_.back() );
-   return *this;
+void TestResult::addFailureInfo(const char* file,
+                                unsigned int line,
+                                const char* expr,
+                                unsigned int nestingLevel) {
+  Failure failure;
+  failure.file_ = file;
+  failure.line_ = line;
+  if (expr) {
+    failure.expr_ = expr;
+  }
+  failure.nestingLevel_ = nestingLevel;
+  failures_.push_back(failure);
 }
 
-
-void 
-TestResult::addFailureInfo( const char *file, unsigned int line,
-                            const char *expr, unsigned int nestingLevel )
-{
-   Failure failure;
-   failure.file_ = file;
-   failure.line_ = line;
-   if ( expr )
-   {
-      failure.expr_ = expr;
-   }
-   failure.nestingLevel_ = nestingLevel;
-   failures_.push_back( failure );
+TestResult& TestResult::popPredicateContext() {
+  PredicateContext* lastNode = &rootPredicateNode_;
+  while (lastNode->next_ != nullptr && lastNode->next_->next_ != nullptr) {
+    lastNode = lastNode->next_;
+  }
+  // Set message target to popped failure
+  PredicateContext* tail = lastNode->next_;
+  if (tail != nullptr && tail->failure_ != nullptr) {
+    messageTarget_ = tail->failure_;
+  }
+  // Remove tail from list
+  predicateStackTail_ = lastNode;
+  lastNode->next_ = nullptr;
+  return *this;
 }
 
+bool TestResult::failed() const { return !failures_.empty(); }
 
-TestResult &
-TestResult::popPredicateContext()
-{
-   PredicateContext *lastNode = &rootPredicateNode_;
-   while ( lastNode->next_ != 0  &&  lastNode->next_->next_ != 0 )
-   {
-      lastNode = lastNode->next_;
-   }
-   // Set message target to popped failure
-   PredicateContext *tail = lastNode->next_;
-   if ( tail != 0  &&  tail->failure_ != 0 )
-   {
-      messageTarget_ = tail->failure_;
-   }
-   // Remove tail from list
-   predicateStackTail_ = lastNode;
-   lastNode->next_ = 0;
-   return *this;
+void TestResult::printFailure(bool printTestName) const {
+  if (failures_.empty()) {
+    return;
+  }
+
+  if (printTestName) {
+    printf("* Detail of %s test failure:\n", name_.c_str());
+  }
+
+  // Print in reverse to display the callstack in the right order
+  for (const auto& failure : failures_) {
+    Json::String indent(failure.nestingLevel_ * 2, ' ');
+    if (failure.file_) {
+      printf("%s%s(%u): ", indent.c_str(), failure.file_, failure.line_);
+    }
+    if (!failure.expr_.empty()) {
+      printf("%s\n", failure.expr_.c_str());
+    } else if (failure.file_) {
+      printf("\n");
+    }
+    if (!failure.message_.empty()) {
+      Json::String reindented = indentText(failure.message_, indent + "  ");
+      printf("%s\n", reindented.c_str());
+    }
+  }
 }
 
-
-bool 
-TestResult::failed() const
-{
-   return !failures_.empty();
+Json::String TestResult::indentText(const Json::String& text,
+                                    const Json::String& indent) {
+  Json::String reindented;
+  Json::String::size_type lastIndex = 0;
+  while (lastIndex < text.size()) {
+    Json::String::size_type nextIndex = text.find('\n', lastIndex);
+    if (nextIndex == Json::String::npos) {
+      nextIndex = text.size() - 1;
+    }
+    reindented += indent;
+    reindented += text.substr(lastIndex, nextIndex - lastIndex + 1);
+    lastIndex = nextIndex + 1;
+  }
+  return reindented;
 }
 
-
-unsigned int 
-TestResult::getAssertionNestingLevel() const
-{
-   unsigned int level = 0;
-   const PredicateContext *lastNode = &rootPredicateNode_;
-   while ( lastNode->next_ != 0 )
-   {
-      lastNode = lastNode->next_;
-      ++level;
-   }
-   return level;
+TestResult& TestResult::addToLastFailure(const Json::String& message) {
+  if (messageTarget_ != nullptr) {
+    messageTarget_->message_ += message;
+  }
+  return *this;
 }
 
-
-void 
-TestResult::printFailure( bool printTestName ) const
-{
-   if ( failures_.empty() )
-   {
-      return;
-   }
-
-   if ( printTestName )
-   {
-      printf( "* Detail of %s test failure:\n", name_.c_str() );
-   }
-
-   // Print in reverse to display the callstack in the right order
-   Failures::const_iterator itEnd = failures_.end();
-   for ( Failures::const_iterator it = failures_.begin(); it != itEnd; ++it )
-   {
-      const Failure &failure = *it;
-      std::string indent( failure.nestingLevel_ * 2, ' ' );
-      if ( failure.file_ )
-      {
-         printf( "%s%s(%d): ", indent.c_str(), failure.file_, failure.line_ );
-      }
-      if ( !failure.expr_.empty() )
-      {
-         printf( "%s\n", failure.expr_.c_str() );
-      }
-      else if ( failure.file_ )
-      {
-         printf( "\n" );
-      }
-      if ( !failure.message_.empty() )
-      {
-         std::string reindented = indentText( failure.message_, indent + "  " );
-         printf( "%s\n", reindented.c_str() );
-      }
-   }
+TestResult& TestResult::operator<<(Json::Int64 value) {
+  return addToLastFailure(Json::valueToString(value));
 }
 
-
-std::string 
-TestResult::indentText( const std::string &text, 
-                        const std::string &indent )
-{
-   std::string reindented;
-   std::string::size_type lastIndex = 0;
-   while ( lastIndex < text.size() )
-   {
-      std::string::size_type nextIndex = text.find( '\n', lastIndex );
-      if ( nextIndex == std::string::npos )
-      {
-         nextIndex = text.size() - 1;
-      }
-      reindented += indent;
-      reindented += text.substr( lastIndex, nextIndex - lastIndex + 1 );
-      lastIndex = nextIndex + 1;
-   }
-   return reindented;
+TestResult& TestResult::operator<<(Json::UInt64 value) {
+  return addToLastFailure(Json::valueToString(value));
 }
 
-
-TestResult &
-TestResult::addToLastFailure( const std::string &message )
-{
-   if ( messageTarget_ != 0 )
-   {
-      messageTarget_->message_ += message;
-   }
-   return *this;
+TestResult& TestResult::operator<<(bool value) {
+  return addToLastFailure(value ? "true" : "false");
 }
 
-TestResult &
-TestResult::operator << ( Json::Int64 value ) {
-   return addToLastFailure( Json::valueToString(value) );
-}
-
-
-TestResult &
-TestResult::operator << ( Json::UInt64 value ) {
-   return addToLastFailure( Json::valueToString(value) );
-}
-
-
-TestResult &
-TestResult::operator << ( bool value ) {
-   return addToLastFailure(value ? "true" : "false");
-}
-
-
 // class TestCase
 // //////////////////////////////////////////////////////////////////
 
-TestCase::TestCase()
-   : result_( 0 )
-{
+TestCase::TestCase() = default;
+
+TestCase::~TestCase() = default;
+
+void TestCase::run(TestResult& result) {
+  result_ = &result;
+  runTestCase();
 }
 
-
-TestCase::~TestCase()
-{
-}
-
-
-void 
-TestCase::run( TestResult &result )
-{
-   result_ = &result;
-   runTestCase();
-}
-
-
-
 // class Runner
 // //////////////////////////////////////////////////////////////////
 
-Runner::Runner()
-{
+Runner::Runner() = default;
+
+Runner& Runner::add(TestCaseFactory factory) {
+  tests_.push_back(factory);
+  return *this;
 }
 
+size_t Runner::testCount() const { return tests_.size(); }
 
-Runner &
-Runner::add( TestCaseFactory factory )
-{
-   tests_.push_back( factory );
-   return *this;
+Json::String Runner::testNameAt(size_t index) const {
+  TestCase* test = tests_[index]();
+  Json::String name = test->testName();
+  delete test;
+  return name;
 }
 
-
-unsigned int 
-Runner::testCount() const
-{
-   return static_cast<unsigned int>( tests_.size() );
-}
-
-
-std::string 
-Runner::testNameAt( unsigned int index ) const
-{
-   TestCase *test = tests_[index]();
-   std::string name = test->testName();
-   delete test;
-   return name;
-}
-
-
-void 
-Runner::runTestAt( unsigned int index, TestResult &result ) const
-{
-   TestCase *test = tests_[index]();
-   result.setTestName( test->testName() );
-   printf( "Testing %s: ", test->testName() );
-   fflush( stdout );
+void Runner::runTestAt(size_t index, TestResult& result) const {
+  TestCase* test = tests_[index]();
+  result.setTestName(test->testName());
+  printf("Testing %s: ", test->testName());
+  fflush(stdout);
 #if JSON_USE_EXCEPTION
-   try 
-   {
+  try {
 #endif // if JSON_USE_EXCEPTION
-      test->run( result );
+    test->run(result);
 #if JSON_USE_EXCEPTION
-   } 
-   catch ( const std::exception &e ) 
-   {
-      result.addFailure( __FILE__, __LINE__, 
-         "Unexpected exception caught:" ) << e.what();
-   }
+  } catch (const std::exception& e) {
+    result.addFailure(__FILE__, __LINE__, "Unexpected exception caught:")
+        << e.what();
+  }
 #endif // if JSON_USE_EXCEPTION
-   delete test;
-   const char *status = result.failed() ? "FAILED" 
-                                        : "OK";
-   printf( "%s\n", status );
-   fflush( stdout );
+  delete test;
+  const char* status = result.failed() ? "FAILED" : "OK";
+  printf("%s\n", status);
+  fflush(stdout);
 }
 
+bool Runner::runAllTest(bool printSummary) const {
+  size_t const count = testCount();
+  std::deque<TestResult> failures;
+  for (size_t index = 0; index < count; ++index) {
+    TestResult result;
+    runTestAt(index, result);
+    if (result.failed()) {
+      failures.push_back(result);
+    }
+  }
 
-bool 
-Runner::runAllTest( bool printSummary ) const
-{
-   unsigned int count = testCount();
-   std::deque<TestResult> failures;
-   for ( unsigned int index = 0; index < count; ++index )
-   {
-      TestResult result;
-      runTestAt( index, result );
-      if ( result.failed() )
-      {
-         failures.push_back( result );
-      }
-   }
+  if (failures.empty()) {
+    if (printSummary) {
+      printf("All %zu tests passed\n", count);
+    }
+    return true;
+  } else {
+    for (auto& result : failures) {
+      result.printFailure(count > 1);
+    }
 
-   if ( failures.empty() )
-   {
-      if ( printSummary )
-      {
-         printf( "All %d tests passed\n", count );
-      }
+    if (printSummary) {
+      size_t const failedCount = failures.size();
+      size_t const passedCount = count - failedCount;
+      printf("%zu/%zu tests passed (%zu failure(s))\n", passedCount, count,
+             failedCount);
+    }
+    return false;
+  }
+}
+
+bool Runner::testIndex(const Json::String& testName, size_t& indexOut) const {
+  const size_t count = testCount();
+  for (size_t index = 0; index < count; ++index) {
+    if (testNameAt(index) == testName) {
+      indexOut = index;
       return true;
-   }
-   else
-   {
-      for ( unsigned int index = 0; index < failures.size(); ++index )
-      {
-         TestResult &result = failures[index];
-         result.printFailure( count > 1 );
-      }
-
-      if ( printSummary )
-      {
-         unsigned int failedCount = static_cast<unsigned int>( failures.size() );
-         unsigned int passedCount = count - failedCount;
-         printf( "%d/%d tests passed (%d failure(s))\n", passedCount, count, failedCount );
-      }
-      return false;
-   }
+    }
+  }
+  return false;
 }
 
-
-bool 
-Runner::testIndex( const std::string &testName, 
-                   unsigned int &indexOut ) const
-{
-   unsigned int count = testCount();
-   for ( unsigned int index = 0; index < count; ++index )
-   {
-      if ( testNameAt(index) == testName )
-      {
-         indexOut = index;
-         return true;
-      }
-   }
-   return false;
+void Runner::listTests() const {
+  const size_t count = testCount();
+  for (size_t index = 0; index < count; ++index) {
+    printf("%s\n", testNameAt(index).c_str());
+  }
 }
 
-
-void 
-Runner::listTests() const
-{
-   unsigned int count = testCount();
-   for ( unsigned int index = 0; index < count; ++index )
-   {
-      printf( "%s\n", testNameAt( index ).c_str() );
-   }
+int Runner::runCommandLine(int argc, const char* argv[]) const {
+  // typedef std::deque<String> TestNames;
+  Runner subrunner;
+  for (int index = 1; index < argc; ++index) {
+    Json::String opt = argv[index];
+    if (opt == "--list-tests") {
+      listTests();
+      return 0;
+    } else if (opt == "--test-auto") {
+      preventDialogOnCrash();
+    } else if (opt == "--test") {
+      ++index;
+      if (index < argc) {
+        size_t testNameIndex;
+        if (testIndex(argv[index], testNameIndex)) {
+          subrunner.add(tests_[testNameIndex]);
+        } else {
+          fprintf(stderr, "Test '%s' does not exist!\n", argv[index]);
+          return 2;
+        }
+      } else {
+        printUsage(argv[0]);
+        return 2;
+      }
+    } else {
+      printUsage(argv[0]);
+      return 2;
+    }
+  }
+  bool succeeded;
+  if (subrunner.testCount() > 0) {
+    succeeded = subrunner.runAllTest(subrunner.testCount() > 1);
+  } else {
+    succeeded = runAllTest(true);
+  }
+  return succeeded ? 0 : 1;
 }
 
-
-int 
-Runner::runCommandLine( int argc, const char *argv[] ) const
-{
-   typedef std::deque<std::string> TestNames;
-   Runner subrunner;
-   for ( int index = 1; index < argc; ++index )
-   {
-      std::string opt = argv[index];
-      if ( opt == "--list-tests" )
-      {
-         listTests();
-         return 0;
-      }
-      else if ( opt == "--test-auto" )
-      {
-         preventDialogOnCrash();
-      }
-      else if ( opt == "--test" )
-      {
-         ++index;
-         if ( index < argc )
-         {
-            unsigned int testNameIndex;
-            if ( testIndex( argv[index], testNameIndex ) )
-            {
-               subrunner.add( tests_[testNameIndex] );
-            }
-            else
-            {
-               fprintf( stderr, "Test '%s' does not exist!\n", argv[index] );
-               return 2;
-            }
-         }
-         else
-         {
-            printUsage( argv[0] );
-            return 2;
-         }
-      }
-      else
-      {
-         printUsage( argv[0] );
-         return 2;
-      }
-   }
-   bool succeeded;
-   if ( subrunner.testCount() > 0 )
-   {
-      succeeded = subrunner.runAllTest( subrunner.testCount() > 1 );
-   }
-   else
-   {
-      succeeded = runAllTest( true );
-   }
-   return succeeded ? 0 
-                    : 1;
-}
-
-
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && defined(_DEBUG)
 // Hook MSVCRT assertions to prevent dialog from appearing
-static int 
-msvcrtSilentReportHook( int reportType, char *message, int *returnValue )
-{
-   // The default CRT handling of error and assertion is to display
-   // an error dialog to the user.
-   // Instead, when an error or an assertion occurs, we force the 
-   // application to terminate using abort() after display
-   // the message on stderr. 
-   if ( reportType == _CRT_ERROR  ||  
-        reportType == _CRT_ASSERT )
-   {
-      // calling abort() cause the ReportHook to be called
-      // The following is used to detect this case and let's the
-      // error handler fallback on its default behaviour (
-      // display a warning message)
-      static volatile bool isAborting = false;
-      if ( isAborting ) 
-      {
-         return TRUE;
-      }
-      isAborting = true;
+static int
+msvcrtSilentReportHook(int reportType, char* message, int* /*returnValue*/) {
+  // The default CRT handling of error and assertion is to display
+  // an error dialog to the user.
+  // Instead, when an error or an assertion occurs, we force the
+  // application to terminate using abort() after display
+  // the message on stderr.
+  if (reportType == _CRT_ERROR || reportType == _CRT_ASSERT) {
+    // calling abort() cause the ReportHook to be called
+    // The following is used to detect this case and let's the
+    // error handler fallback on its default behaviour (
+    // display a warning message)
+    static volatile bool isAborting = false;
+    if (isAborting) {
+      return TRUE;
+    }
+    isAborting = true;
 
-      fprintf( stderr, "CRT Error/Assert:\n%s\n", message );
-      fflush( stderr );
-      abort();
-   }
-   // Let's other reportType (_CRT_WARNING) be handled as they would by default
-   return FALSE;
+    fprintf(stderr, "CRT Error/Assert:\n%s\n", message);
+    fflush(stderr);
+    abort();
+  }
+  // Let's other reportType (_CRT_WARNING) be handled as they would by default
+  return FALSE;
 }
 #endif // if defined(_MSC_VER)
 
-
-void 
-Runner::preventDialogOnCrash()
-{
-#if defined(_MSC_VER)
-   // Install a hook to prevent MSVCRT error and assertion from
-   // popping a dialog.
-   _CrtSetReportHook( &msvcrtSilentReportHook );
+void Runner::preventDialogOnCrash() {
+#if defined(_MSC_VER) && defined(_DEBUG)
+  // Install a hook to prevent MSVCRT error and assertion from
+  // popping a dialog
+  // This function a NO-OP in release configuration
+  // (which cause warning since msvcrtSilentReportHook is not referenced)
+  _CrtSetReportHook(&msvcrtSilentReportHook);
 #endif // if defined(_MSC_VER)
 
-   // @todo investiguate this handler (for buffer overflow)
-   // _set_security_error_handler
+  // @todo investigate this handler (for buffer overflow)
+  // _set_security_error_handler
 
 #if defined(_WIN32)
-   // Prevents the system from popping a dialog for debugging if the
-   // application fails due to invalid memory access.
-   SetErrorMode( SEM_FAILCRITICALERRORS 
-                 | SEM_NOGPFAULTERRORBOX 
-                 | SEM_NOOPENFILEERRORBOX );
+  // Prevents the system from popping a dialog for debugging if the
+  // application fails due to invalid memory access.
+  SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
+               SEM_NOOPENFILEERRORBOX);
 #endif // if defined(_WIN32)
 }
 
-void 
-Runner::printUsage( const char *appName )
-{
-   printf( 
-      "Usage: %s [options]\n"
-      "\n"
-      "If --test is not specified, then all the test cases be run.\n"
-      "\n"
-      "Valid options:\n"
-      "--list-tests: print the name of all test cases on the standard\n"
-      "              output and exit.\n"
-      "--test TESTNAME: executes the test case with the specified name.\n"
-      "                 May be repeated.\n"
-      "--test-auto: prevent dialog prompting for debugging on crash.\n"
-      , appName );
+void Runner::printUsage(const char* appName) {
+  printf("Usage: %s [options]\n"
+         "\n"
+         "If --test is not specified, then all the test cases be run.\n"
+         "\n"
+         "Valid options:\n"
+         "--list-tests: print the name of all test cases on the standard\n"
+         "              output and exit.\n"
+         "--test TESTNAME: executes the test case with the specified name.\n"
+         "                 May be repeated.\n"
+         "--test-auto: prevent dialog prompting for debugging on crash.\n",
+         appName);
 }
 
-
-
 // Assertion functions
 // //////////////////////////////////////////////////////////////////
 
-TestResult &
-checkStringEqual( TestResult &result, 
-                  const std::string &expected, const std::string &actual,
-                  const char *file, unsigned int line, const char *expr )
-{
-   if ( expected != actual )
-   {
-      result.addFailure( file, line, expr );
-      result << "Expected: '" << expected << "'\n";
-      result << "Actual  : '" << actual << "'";
-   }
-   return result;
+Json::String ToJsonString(const char* toConvert) {
+  return Json::String(toConvert);
 }
 
+Json::String ToJsonString(Json::String in) { return in; }
+
+#if JSONCPP_USING_SECURE_MEMORY
+Json::String ToJsonString(std::string in) {
+  return Json::String(in.data(), in.data() + in.length());
+}
+#endif
+
+TestResult& checkStringEqual(TestResult& result,
+                             const Json::String& expected,
+                             const Json::String& actual,
+                             const char* file,
+                             unsigned int line,
+                             const char* expr) {
+  if (expected != actual) {
+    result.addFailure(file, line, expr);
+    result << "Expected: '" << expected << "'\n";
+    result << "Actual  : '" << actual << "'";
+  }
+  return result;
+}
 
 } // namespace JsonTest
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.h b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.h
index 207692b..e9c11a4 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.h
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/jsontest.h
@@ -1,18 +1,18 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
 #ifndef JSONTEST_H_INCLUDED
-# define JSONTEST_H_INCLUDED
+#define JSONTEST_H_INCLUDED
 
-# include <json/config.h>
-# include <json/value.h>
-# include <json/writer.h>
-# include <stdio.h>
-# include <deque>
-# include <sstream>
-# include <string>
+#include <cstdio>
+#include <deque>
+#include <json/config.h>
+#include <json/value.h>
+#include <json/writer.h>
+#include <sstream>
+#include <string>
 
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
@@ -20,8 +20,6 @@
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 
-
-
 /** \brief Unit testing framework.
  * \warning: all assertions are non-aborting, test case execution will continue
  *           even if an assertion namespace.
@@ -30,244 +28,249 @@
  */
 namespace JsonTest {
 
+class Failure {
+public:
+  const char* file_;
+  unsigned int line_;
+  Json::String expr_;
+  Json::String message_;
+  unsigned int nestingLevel_;
+};
 
-   class Failure
-   {
-   public:
-      const char *file_;
-      unsigned int line_;
-      std::string expr_;
-      std::string message_;
-      unsigned int nestingLevel_;
-   };
+/// Context used to create the assertion callstack on failure.
+/// Must be a POD to allow inline initialisation without stepping
+/// into the debugger.
+struct PredicateContext {
+  typedef unsigned int Id;
+  Id id_;
+  const char* file_;
+  unsigned int line_;
+  const char* expr_;
+  PredicateContext* next_;
+  /// Related Failure, set when the PredicateContext is converted
+  /// into a Failure.
+  Failure* failure_;
+};
 
+class TestResult {
+public:
+  TestResult();
 
-   /// Context used to create the assertion callstack on failure.
-   /// Must be a POD to allow inline initialisation without stepping 
-   /// into the debugger.
-   struct PredicateContext
-   {
-      typedef unsigned int Id;
-      Id id_;
-      const char *file_;
-      unsigned int line_;
-      const char *expr_;
-      PredicateContext *next_;
-      /// Related Failure, set when the PredicateContext is converted
-      /// into a Failure.
-      Failure *failure_;
-   };
+  /// \internal Implementation detail for assertion macros
+  /// Not encapsulated to prevent step into when debugging failed assertions
+  /// Incremented by one on assertion predicate entry, decreased by one
+  /// by addPredicateContext().
+  PredicateContext::Id predicateId_{1};
 
-   class TestResult
-   {
-   public:
-      TestResult();
+  /// \internal Implementation detail for predicate macros
+  PredicateContext* predicateStackTail_;
 
-      /// \internal Implementation detail for assertion macros
-      /// Not encapsulated to prevent step into when debugging failed assertions
-      /// Incremented by one on assertion predicate entry, decreased by one
-      /// by addPredicateContext().
-      PredicateContext::Id predicateId_;
+  void setTestName(const Json::String& name);
 
-      /// \internal Implementation detail for predicate macros
-      PredicateContext *predicateStackTail_;
+  /// Adds an assertion failure.
+  TestResult&
+  addFailure(const char* file, unsigned int line, const char* expr = nullptr);
 
-      void setTestName( const std::string &name );
+  /// Removes the last PredicateContext added to the predicate stack
+  /// chained list.
+  /// Next messages will be targed at the PredicateContext that was removed.
+  TestResult& popPredicateContext();
 
-      /// Adds an assertion failure.
-      TestResult &addFailure( const char *file, unsigned int line,
-                              const char *expr = 0 );
+  bool failed() const;
 
-      /// Removes the last PredicateContext added to the predicate stack
-      /// chained list.
-      /// Next messages will be targed at the PredicateContext that was removed.
-      TestResult &popPredicateContext();
+  void printFailure(bool printTestName) const;
 
-      bool failed() const;
+  // Generic operator that will work with anything ostream can deal with.
+  template <typename T> TestResult& operator<<(const T& value) {
+    Json::OStringStream oss;
+    oss.precision(16);
+    oss.setf(std::ios_base::floatfield);
+    oss << value;
+    return addToLastFailure(oss.str());
+  }
 
-      void printFailure( bool printTestName ) const;
+  // Specialized versions.
+  TestResult& operator<<(bool value);
+  // std:ostream does not support 64bits integers on all STL implementation
+  TestResult& operator<<(Json::Int64 value);
+  TestResult& operator<<(Json::UInt64 value);
 
-      // Generic operator that will work with anything ostream can deal with.
-      template <typename T>
-      TestResult &operator << ( const T& value ) {
-         std::ostringstream oss;
-         oss.precision( 16 );
-         oss.setf( std::ios_base::floatfield );
-         oss << value;
-         return addToLastFailure(oss.str());
-      }
+private:
+  TestResult& addToLastFailure(const Json::String& message);
+  /// Adds a failure or a predicate context
+  void addFailureInfo(const char* file,
+                      unsigned int line,
+                      const char* expr,
+                      unsigned int nestingLevel);
+  static Json::String indentText(const Json::String& text,
+                                 const Json::String& indent);
 
-      // Specialized versions.
-      TestResult &operator << ( bool value );
-      // std:ostream does not support 64bits integers on all STL implementation
-      TestResult &operator << ( Json::Int64 value );
-      TestResult &operator << ( Json::UInt64 value );
+  typedef std::deque<Failure> Failures;
+  Failures failures_;
+  Json::String name_;
+  PredicateContext rootPredicateNode_;
+  PredicateContext::Id lastUsedPredicateId_{0};
+  /// Failure which is the target of the messages added using operator <<
+  Failure* messageTarget_{nullptr};
+};
 
-   private:
-      TestResult &addToLastFailure( const std::string &message );
-      unsigned int getAssertionNestingLevel() const;
-      /// Adds a failure or a predicate context
-      void addFailureInfo( const char *file, unsigned int line,
-                           const char *expr, unsigned int nestingLevel  );
-      static std::string indentText( const std::string &text, 
-                                     const std::string &indent );
+class TestCase {
+public:
+  TestCase();
 
-      typedef std::deque<Failure> Failures;
-      Failures failures_;
-      std::string name_;
-      PredicateContext rootPredicateNode_;
-      PredicateContext::Id lastUsedPredicateId_;
-      /// Failure which is the target of the messages added using operator <<
-      Failure *messageTarget_;
-   };
+  virtual ~TestCase();
 
+  void run(TestResult& result);
 
-   class TestCase
-   {
-   public:
-      TestCase();
+  virtual const char* testName() const = 0;
 
-      virtual ~TestCase();
+protected:
+  TestResult* result_{nullptr};
 
-      void run( TestResult &result );
+private:
+  virtual void runTestCase() = 0;
+};
 
-      virtual const char *testName() const = 0;
+/// Function pointer type for TestCase factory
+typedef TestCase* (*TestCaseFactory)();
 
-   protected:
-      TestResult *result_;
+class Runner {
+public:
+  Runner();
 
-   private:
-      virtual void runTestCase() = 0;
-   };
+  /// Adds a test to the suite
+  Runner& add(TestCaseFactory factory);
 
-   /// Function pointer type for TestCase factory
-   typedef TestCase *(*TestCaseFactory)();
+  /// Runs test as specified on the command-line
+  /// If no command-line arguments are provided, run all tests.
+  /// If --list-tests is provided, then print the list of all test cases
+  /// If --test <testname> is provided, then run test testname.
+  int runCommandLine(int argc, const char* argv[]) const;
 
-   class Runner
-   {
-   public:
-      Runner();
+  /// Runs all the test cases
+  bool runAllTest(bool printSummary) const;
 
-      /// Adds a test to the suite
-      Runner &add( TestCaseFactory factory );
+  /// Returns the number of test case in the suite
+  size_t testCount() const;
 
-      /// Runs test as specified on the command-line
-      /// If no command-line arguments are provided, run all tests.
-      /// If --list-tests is provided, then print the list of all test cases
-      /// If --test <testname> is provided, then run test testname.
-      int runCommandLine( int argc, const char *argv[] ) const;
+  /// Returns the name of the test case at the specified index
+  Json::String testNameAt(size_t index) const;
 
-      /// Runs all the test cases
-      bool runAllTest( bool printSummary ) const;
+  /// Runs the test case at the specified index using the specified TestResult
+  void runTestAt(size_t index, TestResult& result) const;
 
-      /// Returns the number of test case in the suite
-      unsigned int testCount() const;
+  static void printUsage(const char* appName);
 
-      /// Returns the name of the test case at the specified index
-      std::string testNameAt( unsigned int index ) const;
+private: // prevents copy construction and assignment
+  Runner(const Runner& other) = delete;
+  Runner& operator=(const Runner& other) = delete;
 
-      /// Runs the test case at the specified index using the specified TestResult
-      void runTestAt( unsigned int index, TestResult &result ) const;
+private:
+  void listTests() const;
+  bool testIndex(const Json::String& testName, size_t& indexOut) const;
+  static void preventDialogOnCrash();
 
-      static void printUsage( const char *appName );
+private:
+  typedef std::deque<TestCaseFactory> Factories;
+  Factories tests_;
+};
 
-   private: // prevents copy construction and assignment
-      Runner( const Runner &other );
-      Runner &operator =( const Runner &other );
+template <typename T, typename U>
+TestResult& checkEqual(TestResult& result,
+                       T expected,
+                       U actual,
+                       const char* file,
+                       unsigned int line,
+                       const char* expr) {
+  if (static_cast<U>(expected) != actual) {
+    result.addFailure(file, line, expr);
+    result << "Expected: " << static_cast<U>(expected) << "\n";
+    result << "Actual  : " << actual;
+  }
+  return result;
+}
 
-   private:
-      void listTests() const;
-      bool testIndex( const std::string &testName, unsigned int &index ) const;
-      static void preventDialogOnCrash();
+Json::String ToJsonString(const char* toConvert);
+Json::String ToJsonString(Json::String in);
+#if JSONCPP_USING_SECURE_MEMORY
+Json::String ToJsonString(std::string in);
+#endif
 
-   private:
-      typedef std::deque<TestCaseFactory> Factories;
-      Factories tests_;
-   };
-
-   template<typename T, typename U>
-   TestResult &
-   checkEqual( TestResult &result, const T &expected, const U &actual, 
-               const char *file, unsigned int line, const char *expr )
-   {
-      if ( expected != actual )
-      {
-         result.addFailure( file, line, expr );
-         result << "Expected: " << expected << "\n";
-         result << "Actual  : " << actual;
-      }
-      return result;
-   }
-
-
-   TestResult &
-   checkStringEqual( TestResult &result, 
-                     const std::string &expected, const std::string &actual,
-                     const char *file, unsigned int line, const char *expr );
+TestResult& checkStringEqual(TestResult& result,
+                             const Json::String& expected,
+                             const Json::String& actual,
+                             const char* file,
+                             unsigned int line,
+                             const char* expr);
 
 } // namespace JsonTest
 
-
 /// \brief Asserts that the given expression is true.
 /// JSONTEST_ASSERT( x == y ) << "x=" << x << ", y=" << y;
 /// JSONTEST_ASSERT( x == y );
-#define JSONTEST_ASSERT( expr )                                               \
-   if ( expr )                                                                \
-   {                                                                          \
-   }                                                                          \
-   else                                                                       \
-      result_->addFailure( __FILE__, __LINE__, #expr )
+#define JSONTEST_ASSERT(expr)                                                  \
+  if (expr) {                                                                  \
+  } else                                                                       \
+    result_->addFailure(__FILE__, __LINE__, #expr)
 
 /// \brief Asserts that the given predicate is true.
-/// The predicate may do other assertions and be a member function of the fixture.
-#define JSONTEST_ASSERT_PRED( expr )                                    \
-   {                                                                    \
-      JsonTest::PredicateContext _minitest_Context = {                  \
-         result_->predicateId_, __FILE__, __LINE__, #expr };            \
-      result_->predicateStackTail_->next_ = &_minitest_Context;         \
-      result_->predicateId_ += 1;                                       \
-      result_->predicateStackTail_ = &_minitest_Context;                \
-      (expr);                                                           \
-      result_->popPredicateContext();                                   \
-   }                                                                    \
-   *result_
+/// The predicate may do other assertions and be a member function of the
+/// fixture.
+#define JSONTEST_ASSERT_PRED(expr)                                             \
+  {                                                                            \
+    JsonTest::PredicateContext _minitest_Context = {                           \
+        result_->predicateId_, __FILE__, __LINE__, #expr, NULL, NULL};         \
+    result_->predicateStackTail_->next_ = &_minitest_Context;                  \
+    result_->predicateId_ += 1;                                                \
+    result_->predicateStackTail_ = &_minitest_Context;                         \
+    (expr);                                                                    \
+    result_->popPredicateContext();                                            \
+  }
 
 /// \brief Asserts that two values are equals.
-#define JSONTEST_ASSERT_EQUAL( expected, actual )          \
-   JsonTest::checkEqual( *result_, expected, actual,       \
-                         __FILE__, __LINE__,               \
-                         #expected " == " #actual )
+#define JSONTEST_ASSERT_EQUAL(expected, actual)                                \
+  JsonTest::checkEqual(*result_, expected, actual, __FILE__, __LINE__,         \
+                       #expected " == " #actual)
 
 /// \brief Asserts that two values are equals.
-#define JSONTEST_ASSERT_STRING_EQUAL( expected, actual ) \
-   JsonTest::checkStringEqual( *result_,                 \
-      std::string(expected), std::string(actual),        \
-      __FILE__, __LINE__,                                \
-      #expected " == " #actual )
+#define JSONTEST_ASSERT_STRING_EQUAL(expected, actual)                         \
+  JsonTest::checkStringEqual(*result_, JsonTest::ToJsonString(expected),       \
+                             JsonTest::ToJsonString(actual), __FILE__,         \
+                             __LINE__, #expected " == " #actual)
+
+/// \brief Asserts that a given expression throws an exception
+#define JSONTEST_ASSERT_THROWS(expr)                                           \
+  {                                                                            \
+    bool _threw = false;                                                       \
+    try {                                                                      \
+      expr;                                                                    \
+    } catch (...) {                                                            \
+      _threw = true;                                                           \
+    }                                                                          \
+    if (!_threw)                                                               \
+      result_->addFailure(__FILE__, __LINE__,                                  \
+                          "expected exception thrown: " #expr);                \
+  }
 
 /// \brief Begin a fixture test case.
-#define JSONTEST_FIXTURE( FixtureType, name )                  \
-   class Test##FixtureType##name : public FixtureType          \
-   {                                                           \
-   public:                                                     \
-      static JsonTest::TestCase *factory()                     \
-      {                                                        \
-         return new Test##FixtureType##name();                 \
-      }                                                        \
-   public: /* overidden from TestCase */                       \
-      virtual const char *testName() const                     \
-      {                                                        \
-         return #FixtureType "/" #name;                        \
-      }                                                        \
-      virtual void runTestCase();                              \
-   };                                                          \
-                                                               \
-   void Test##FixtureType##name::runTestCase()
+#define JSONTEST_FIXTURE(FixtureType, name)                                    \
+  class Test##FixtureType##name : public FixtureType {                         \
+  public:                                                                      \
+    static JsonTest::TestCase* factory() {                                     \
+      return new Test##FixtureType##name();                                    \
+    }                                                                          \
+                                                                               \
+  public: /* overridden from TestCase */                                       \
+    const char* testName() const override { return #FixtureType "/" #name; }   \
+    void runTestCase() override;                                               \
+  };                                                                           \
+                                                                               \
+  void Test##FixtureType##name::runTestCase()
 
-#define JSONTEST_FIXTURE_FACTORY( FixtureType, name ) \
-   &Test##FixtureType##name::factory
+#define JSONTEST_FIXTURE_FACTORY(FixtureType, name)                            \
+  &Test##FixtureType##name::factory
 
-#define JSONTEST_REGISTER_FIXTURE( runner, FixtureType, name ) \
-   (runner).add( JSONTEST_FIXTURE_FACTORY( FixtureType, name ) )
+#define JSONTEST_REGISTER_FIXTURE(runner, FixtureType, name)                   \
+  (runner).add(JSONTEST_FIXTURE_FACTORY(FixtureType, name))
 
 #endif // ifndef JSONTEST_H_INCLUDED
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/main.cpp b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/main.cpp
index c6ab619..fcda60e 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/main.cpp
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/main.cpp
@@ -1,11 +1,26 @@
-// Copyright 2007-2010 Baptiste Lepilleur
+// Copyright 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
 // Distributed under MIT license, or public domain if desired and
 // recognized in your jurisdiction.
 // See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#elif defined(_MSC_VER)
+#pragma warning(disable : 4996)
+#endif
+
+#include "fuzz.h"
+#include "jsontest.h"
+#include <cmath>
+#include <cstring>
+#include <iomanip>
+#include <iostream>
 #include <json/config.h>
 #include <json/json.h>
-#include "jsontest.h"
+#include <limits>
+#include <sstream>
+#include <string>
 
 // Make numeric limits more convenient to talk about.
 // Assumes int type in 32 bits.
@@ -16,12 +31,11 @@
 #define kint64min Json::Value::minInt64
 #define kuint64max Json::Value::maxUInt64
 
-static const double kdint64max = double(kint64max);
-static const float kfint64max = float(kint64max);
+// static const double kdint64max = double(kint64max);
+// static const float kfint64max = float(kint64max);
 static const float kfint32max = float(kint32max);
 static const float kfuint32max = float(kuint32max);
 
-
 // //////////////////////////////////////////////////////////////////
 // //////////////////////////////////////////////////////////////////
 // Json Library test cases
@@ -29,1396 +43,2615 @@
 // //////////////////////////////////////////////////////////////////
 
 #if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-static inline double uint64ToDouble( Json::UInt64 value )
-{
-    return static_cast<double>( value );
+static inline double uint64ToDouble(Json::UInt64 value) {
+  return static_cast<double>(value);
 }
-#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
-static inline double uint64ToDouble( Json::UInt64 value )
-{
-    return static_cast<double>( Json::Int64(value/2) ) * 2.0 + Json::Int64(value & 1);
+#else  // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
+static inline double uint64ToDouble(Json::UInt64 value) {
+  return static_cast<double>(Json::Int64(value / 2)) * 2.0 +
+         static_cast<double>(Json::Int64(value & 1));
 }
 #endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION)
 
-struct ValueTest : JsonTest::TestCase
-{
-   Json::Value null_;
-   Json::Value emptyArray_;
-   Json::Value emptyObject_;
-   Json::Value integer_;
-   Json::Value unsignedInteger_;
-   Json::Value smallUnsignedInteger_;
-   Json::Value real_;
-   Json::Value float_;
-   Json::Value array1_;
-   Json::Value object1_;
-   Json::Value emptyString_;
-   Json::Value string1_;
-   Json::Value string_;
-   Json::Value true_;
-   Json::Value false_;
+struct ValueTest : JsonTest::TestCase {
+  Json::Value null_;
+  Json::Value emptyArray_;
+  Json::Value emptyObject_;
+  Json::Value integer_;
+  Json::Value unsignedInteger_;
+  Json::Value smallUnsignedInteger_;
+  Json::Value real_;
+  Json::Value float_;
+  Json::Value array1_;
+  Json::Value object1_;
+  Json::Value emptyString_;
+  Json::Value string1_;
+  Json::Value string_;
+  Json::Value true_;
+  Json::Value false_;
 
+  ValueTest()
+      : emptyArray_(Json::arrayValue), emptyObject_(Json::objectValue),
+        integer_(123456789), unsignedInteger_(34567890u),
+        smallUnsignedInteger_(Json::Value::UInt(Json::Value::maxInt)),
+        real_(1234.56789), float_(0.00390625f), emptyString_(""), string1_("a"),
+        string_("sometext with space"), true_(true), false_(false) {
+    array1_.append(1234);
+    object1_["id"] = 1234;
+  }
 
-   ValueTest()
-      : emptyArray_( Json::arrayValue )
-      , emptyObject_( Json::objectValue )
-      , integer_( 123456789 )
-      , smallUnsignedInteger_( Json::Value::UInt( Json::Value::maxInt ) )
-      , unsignedInteger_( 34567890u )
-      , real_( 1234.56789 )
-      , float_( 0.00390625f )
-      , emptyString_( "" )
-      , string1_( "a" )
-      , string_( "sometext with space" )
-      , true_( true )
-      , false_( false )
-   {
-      array1_.append( 1234 );
-      object1_["id"] = 1234;
-   }
+  struct IsCheck {
+    /// Initialize all checks to \c false by default.
+    IsCheck();
 
-   struct IsCheck
-   {
-      /// Initialize all checks to \c false by default.
-      IsCheck();
+    bool isObject_{false};
+    bool isArray_{false};
+    bool isBool_{false};
+    bool isString_{false};
+    bool isNull_{false};
 
-      bool isObject_;
-      bool isArray_;
-      bool isBool_;
-      bool isString_;
-      bool isNull_;
+    bool isInt_{false};
+    bool isInt64_{false};
+    bool isUInt_{false};
+    bool isUInt64_{false};
+    bool isIntegral_{false};
+    bool isDouble_{false};
+    bool isNumeric_{false};
+  };
 
-      bool isInt_;
-      bool isInt64_;
-      bool isUInt_;
-      bool isUInt64_;
-      bool isIntegral_;
-      bool isDouble_;
-      bool isNumeric_;
-   };
+  void checkConstMemberCount(const Json::Value& value,
+                             unsigned int expectedCount);
 
-   void checkConstMemberCount( const Json::Value &value, unsigned int expectedCount );
+  void checkMemberCount(Json::Value& value, unsigned int expectedCount);
 
-   void checkMemberCount( Json::Value &value, unsigned int expectedCount );
+  void checkIs(const Json::Value& value, const IsCheck& check);
 
-   void checkIs( const Json::Value &value, const IsCheck &check );
+  void checkIsLess(const Json::Value& x, const Json::Value& y);
 
-   void checkIsLess( const Json::Value &x, const Json::Value &y );
+  void checkIsEqual(const Json::Value& x, const Json::Value& y);
 
-   void checkIsEqual( const Json::Value &x, const Json::Value &y );
-
-   /// Normalize the representation of floating-point number by stripped leading 0 in exponent.
-   static std::string normalizeFloatingPointStr( const std::string &s );
+  /// Normalize the representation of floating-point number by stripped leading
+  /// 0 in exponent.
+  static Json::String normalizeFloatingPointStr(const Json::String& s);
 };
 
-
-std::string 
-ValueTest::normalizeFloatingPointStr( const std::string &s )
-{
-    std::string::size_type index = s.find_last_of( "eE" );
-    if ( index != std::string::npos )
+Json::String ValueTest::normalizeFloatingPointStr(const Json::String& s) {
+  Json::String::size_type index = s.find_last_of("eE");
+  if (index != Json::String::npos) {
+    Json::String::size_type hasSign =
+        (s[index + 1] == '+' || s[index + 1] == '-') ? 1 : 0;
+    Json::String::size_type exponentStartIndex = index + 1 + hasSign;
+    Json::String normalized = s.substr(0, exponentStartIndex);
+    Json::String::size_type indexDigit =
+        s.find_first_not_of('0', exponentStartIndex);
+    Json::String exponent = "0";
+    if (indexDigit != Json::String::npos) // There is an exponent different
+                                          // from 0
     {
-        std::string::size_type hasSign = (s[index+1] == '+' || s[index+1] == '-') ? 1 : 0;
-        std::string::size_type exponentStartIndex = index + 1 + hasSign;
-        std::string normalized = s.substr( 0, exponentStartIndex );
-        std::string::size_type indexDigit = s.find_first_not_of( '0', exponentStartIndex );
-        std::string exponent = "0";
-        if ( indexDigit != std::string::npos ) // There is an exponent different from 0
-        {
-            exponent = s.substr( indexDigit );
-        }
-        return normalized + exponent;
+      exponent = s.substr(indexDigit);
     }
-    return s;
+    return normalized + exponent;
+  }
+  return s;
 }
 
-
-JSONTEST_FIXTURE( ValueTest, checkNormalizeFloatingPointStr )
-{
-    JSONTEST_ASSERT_STRING_EQUAL( "0.0", normalizeFloatingPointStr("0.0") );
-    JSONTEST_ASSERT_STRING_EQUAL( "0e0", normalizeFloatingPointStr("0e0") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234.0", normalizeFloatingPointStr("1234.0") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234.0e0", normalizeFloatingPointStr("1234.0e0") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234.0e+0", normalizeFloatingPointStr("1234.0e+0") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e-1", normalizeFloatingPointStr("1234e-1") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e10", normalizeFloatingPointStr("1234e10") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e10", normalizeFloatingPointStr("1234e010") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e+10", normalizeFloatingPointStr("1234e+010") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e-10", normalizeFloatingPointStr("1234e-010") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e+100", normalizeFloatingPointStr("1234e+100") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e-100", normalizeFloatingPointStr("1234e-100") );
-    JSONTEST_ASSERT_STRING_EQUAL( "1234e+1", normalizeFloatingPointStr("1234e+001") );
+JSONTEST_FIXTURE(ValueTest, checkNormalizeFloatingPointStr) {
+  JSONTEST_ASSERT_STRING_EQUAL("0.0", normalizeFloatingPointStr("0.0"));
+  JSONTEST_ASSERT_STRING_EQUAL("0e0", normalizeFloatingPointStr("0e0"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234.0", normalizeFloatingPointStr("1234.0"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234.0e0",
+                               normalizeFloatingPointStr("1234.0e0"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234.0e+0",
+                               normalizeFloatingPointStr("1234.0e+0"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e-1", normalizeFloatingPointStr("1234e-1"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e10", normalizeFloatingPointStr("1234e10"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e10",
+                               normalizeFloatingPointStr("1234e010"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e+10",
+                               normalizeFloatingPointStr("1234e+010"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e-10",
+                               normalizeFloatingPointStr("1234e-010"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e+100",
+                               normalizeFloatingPointStr("1234e+100"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e-100",
+                               normalizeFloatingPointStr("1234e-100"));
+  JSONTEST_ASSERT_STRING_EQUAL("1234e+1",
+                               normalizeFloatingPointStr("1234e+001"));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, memberCount )
-{
-   JSONTEST_ASSERT_PRED( checkMemberCount(emptyArray_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(emptyObject_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(array1_, 1) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(object1_, 1) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(null_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(integer_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(unsignedInteger_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(smallUnsignedInteger_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(real_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(emptyString_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(string_, 0) );
-   JSONTEST_ASSERT_PRED( checkMemberCount(true_, 0) );
+JSONTEST_FIXTURE(ValueTest, memberCount) {
+  JSONTEST_ASSERT_PRED(checkMemberCount(emptyArray_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(emptyObject_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(array1_, 1));
+  JSONTEST_ASSERT_PRED(checkMemberCount(object1_, 1));
+  JSONTEST_ASSERT_PRED(checkMemberCount(null_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(integer_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(unsignedInteger_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(smallUnsignedInteger_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(real_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(emptyString_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(string_, 0));
+  JSONTEST_ASSERT_PRED(checkMemberCount(true_, 0));
 }
 
+JSONTEST_FIXTURE(ValueTest, objects) {
+  // Types
+  IsCheck checks;
+  checks.isObject_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(emptyObject_, checks));
+  JSONTEST_ASSERT_PRED(checkIs(object1_, checks));
 
-JSONTEST_FIXTURE( ValueTest, objects )
-{
-   // Types
-   IsCheck checks;
-   checks.isObject_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( emptyObject_, checks ) );
-   JSONTEST_ASSERT_PRED( checkIs( object1_, checks ) );
+  JSONTEST_ASSERT_EQUAL(Json::objectValue, emptyObject_.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::objectValue, emptyObject_.type());
+  // Empty object okay
+  JSONTEST_ASSERT(emptyObject_.isConvertibleTo(Json::nullValue));
 
-   // Empty object okay
-   JSONTEST_ASSERT(emptyObject_.isConvertibleTo(Json::nullValue));
+  // Non-empty object not okay
+  JSONTEST_ASSERT(!object1_.isConvertibleTo(Json::nullValue));
 
-   // Non-empty object not okay
-   JSONTEST_ASSERT(!object1_.isConvertibleTo(Json::nullValue));
+  // Always okay
+  JSONTEST_ASSERT(emptyObject_.isConvertibleTo(Json::objectValue));
 
-   // Always okay
-   JSONTEST_ASSERT(emptyObject_.isConvertibleTo(Json::objectValue));
+  // Never okay
+  JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::stringValue));
 
-   // Never okay
-   JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(!emptyObject_.isConvertibleTo(Json::stringValue));
+  // Access through const reference
+  const Json::Value& constObject = object1_;
 
-   // Access through const reference
-   const Json::Value &constObject = object1_;
+  JSONTEST_ASSERT_EQUAL(Json::Value(1234), constObject["id"]);
+  JSONTEST_ASSERT_EQUAL(Json::Value(), constObject["unknown id"]);
 
-   JSONTEST_ASSERT_EQUAL(Json::Value(1234), constObject["id"]);
-   JSONTEST_ASSERT_EQUAL(Json::Value(), constObject["unknown id"]);
+  // Access through find()
+  const char idKey[] = "id";
+  const Json::Value* foundId = object1_.find(idKey, idKey + strlen(idKey));
+  JSONTEST_ASSERT(foundId != nullptr);
+  JSONTEST_ASSERT_EQUAL(Json::Value(1234), *foundId);
 
-   // Access through non-const reference
-   JSONTEST_ASSERT_EQUAL(Json::Value(1234), object1_["id"]);
-   JSONTEST_ASSERT_EQUAL(Json::Value(), object1_["unknown id"]);
+  const char unknownIdKey[] = "unknown id";
+  const Json::Value* foundUnknownId =
+      object1_.find(unknownIdKey, unknownIdKey + strlen(unknownIdKey));
+  JSONTEST_ASSERT_EQUAL(nullptr, foundUnknownId);
 
-   object1_["some other id"] = "foo";
-   JSONTEST_ASSERT_EQUAL(Json::Value("foo"), object1_["some other id"]);
+  // Access through demand()
+  const char yetAnotherIdKey[] = "yet another id";
+  const Json::Value* foundYetAnotherId =
+      object1_.find(yetAnotherIdKey, yetAnotherIdKey + strlen(yetAnotherIdKey));
+  JSONTEST_ASSERT_EQUAL(nullptr, foundYetAnotherId);
+  Json::Value* demandedYetAnotherId = object1_.demand(
+      yetAnotherIdKey, yetAnotherIdKey + strlen(yetAnotherIdKey));
+  JSONTEST_ASSERT(demandedYetAnotherId != nullptr);
+  *demandedYetAnotherId = "baz";
+
+  JSONTEST_ASSERT_EQUAL(Json::Value("baz"), object1_["yet another id"]);
+
+  // Access through non-const reference
+  JSONTEST_ASSERT_EQUAL(Json::Value(1234), object1_["id"]);
+  JSONTEST_ASSERT_EQUAL(Json::Value(), object1_["unknown id"]);
+
+  object1_["some other id"] = "foo";
+  JSONTEST_ASSERT_EQUAL(Json::Value("foo"), object1_["some other id"]);
+  JSONTEST_ASSERT_EQUAL(Json::Value("foo"), object1_["some other id"]);
+
+  // Remove.
+  Json::Value got;
+  bool did;
+  did = object1_.removeMember("some other id", &got);
+  JSONTEST_ASSERT_EQUAL(Json::Value("foo"), got);
+  JSONTEST_ASSERT_EQUAL(true, did);
+  got = Json::Value("bar");
+  did = object1_.removeMember("some other id", &got);
+  JSONTEST_ASSERT_EQUAL(Json::Value("bar"), got);
+  JSONTEST_ASSERT_EQUAL(false, did);
+
+  object1_["some other id"] = "foo";
+  Json::Value* gotPtr = nullptr;
+  did = object1_.removeMember("some other id", gotPtr);
+  JSONTEST_ASSERT_EQUAL(nullptr, gotPtr);
+  JSONTEST_ASSERT_EQUAL(true, did);
 }
 
+JSONTEST_FIXTURE(ValueTest, arrays) {
+  const unsigned int index0 = 0;
 
-JSONTEST_FIXTURE( ValueTest, arrays )
-{
-   const unsigned int index0 = 0;
+  // Types
+  IsCheck checks;
+  checks.isArray_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(emptyArray_, checks));
+  JSONTEST_ASSERT_PRED(checkIs(array1_, checks));
 
-   // Types
-   IsCheck checks;
-   checks.isArray_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( emptyArray_, checks ) );
-   JSONTEST_ASSERT_PRED( checkIs( array1_, checks ) );
+  JSONTEST_ASSERT_EQUAL(Json::arrayValue, array1_.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::arrayValue, array1_.type());
+  // Empty array okay
+  JSONTEST_ASSERT(emptyArray_.isConvertibleTo(Json::nullValue));
 
-   // Empty array okay
-   JSONTEST_ASSERT(emptyArray_.isConvertibleTo(Json::nullValue));
+  // Non-empty array not okay
+  JSONTEST_ASSERT(!array1_.isConvertibleTo(Json::nullValue));
 
-   // Non-empty array not okay
-   JSONTEST_ASSERT(!array1_.isConvertibleTo(Json::nullValue));
+  // Always okay
+  JSONTEST_ASSERT(emptyArray_.isConvertibleTo(Json::arrayValue));
 
-   // Always okay
-   JSONTEST_ASSERT(emptyArray_.isConvertibleTo(Json::arrayValue));
+  // Never okay
+  JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::stringValue));
 
-   // Never okay
-   JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::objectValue));
-   JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(!emptyArray_.isConvertibleTo(Json::stringValue));
+  // Access through const reference
+  const Json::Value& constArray = array1_;
+  JSONTEST_ASSERT_EQUAL(Json::Value(1234), constArray[index0]);
+  JSONTEST_ASSERT_EQUAL(Json::Value(1234), constArray[0]);
 
-   // Access through const reference
-   const Json::Value &constArray = array1_;
-   JSONTEST_ASSERT_EQUAL(Json::Value(1234), constArray[index0]);
-   JSONTEST_ASSERT_EQUAL(Json::Value(1234), constArray[0]);
+  // Access through non-const reference
+  JSONTEST_ASSERT_EQUAL(Json::Value(1234), array1_[index0]);
+  JSONTEST_ASSERT_EQUAL(Json::Value(1234), array1_[0]);
 
-   // Access through non-const reference
-   JSONTEST_ASSERT_EQUAL(Json::Value(1234), array1_[index0]);
-   JSONTEST_ASSERT_EQUAL(Json::Value(1234), array1_[0]);
-
-   array1_[2] = Json::Value(17);
-   JSONTEST_ASSERT_EQUAL(Json::Value(), array1_[1]);
-   JSONTEST_ASSERT_EQUAL(Json::Value(17), array1_[2]);
+  array1_[2] = Json::Value(17);
+  JSONTEST_ASSERT_EQUAL(Json::Value(), array1_[1]);
+  JSONTEST_ASSERT_EQUAL(Json::Value(17), array1_[2]);
+  Json::Value got;
+  JSONTEST_ASSERT_EQUAL(true, array1_.removeIndex(2, &got));
+  JSONTEST_ASSERT_EQUAL(Json::Value(17), got);
+  JSONTEST_ASSERT_EQUAL(false, array1_.removeIndex(2, &got)); // gone now
+}
+JSONTEST_FIXTURE(ValueTest, arrayIssue252) {
+  int count = 5;
+  Json::Value root;
+  Json::Value item;
+  root["array"] = Json::Value::nullRef;
+  for (int i = 0; i < count; i++) {
+    item["a"] = i;
+    item["b"] = i;
+    root["array"][i] = item;
+  }
+  // JSONTEST_ASSERT_EQUAL(5, root["array"].size());
 }
 
+JSONTEST_FIXTURE(ValueTest, null) {
+  JSONTEST_ASSERT_EQUAL(Json::nullValue, null_.type());
 
-JSONTEST_FIXTURE( ValueTest, null )
-{
-   JSONTEST_ASSERT_EQUAL(Json::nullValue, null_.type());
+  IsCheck checks;
+  checks.isNull_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(null_, checks));
 
-   IsCheck checks;
-   checks.isNull_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( null_, checks ) );
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(null_.isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(null_.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT_EQUAL(Json::Int(0), null_.asInt());
+  JSONTEST_ASSERT_EQUAL(Json::LargestInt(0), null_.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(Json::UInt(0), null_.asUInt());
+  JSONTEST_ASSERT_EQUAL(Json::LargestUInt(0), null_.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, null_.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, null_.asFloat());
+  JSONTEST_ASSERT_STRING_EQUAL("", null_.asString());
 
-   JSONTEST_ASSERT_EQUAL(Json::Int(0), null_.asInt());
-   JSONTEST_ASSERT_EQUAL(Json::LargestInt(0), null_.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(Json::UInt(0), null_.asUInt());
-   JSONTEST_ASSERT_EQUAL(Json::LargestUInt(0), null_.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, null_.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, null_.asFloat());
-   JSONTEST_ASSERT_STRING_EQUAL("", null_.asString());
+  JSONTEST_ASSERT_EQUAL(Json::Value::null, null_);
+
+  // Test using a Value in a boolean context (false iff null)
+  JSONTEST_ASSERT_EQUAL(null_, false);
+  JSONTEST_ASSERT_EQUAL(object1_, true);
+  JSONTEST_ASSERT_EQUAL(!null_, true);
+  JSONTEST_ASSERT_EQUAL(!object1_, false);
 }
 
+JSONTEST_FIXTURE(ValueTest, strings) {
+  JSONTEST_ASSERT_EQUAL(Json::stringValue, string1_.type());
 
-JSONTEST_FIXTURE( ValueTest, strings )
-{
-   JSONTEST_ASSERT_EQUAL(Json::stringValue, string1_.type());
+  IsCheck checks;
+  checks.isString_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(emptyString_, checks));
+  JSONTEST_ASSERT_PRED(checkIs(string_, checks));
+  JSONTEST_ASSERT_PRED(checkIs(string1_, checks));
 
-   IsCheck checks;
-   checks.isString_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( emptyString_, checks ) );
-   JSONTEST_ASSERT_PRED( checkIs( string_, checks ) );
-   JSONTEST_ASSERT_PRED( checkIs( string1_, checks ) );
+  // Empty string okay
+  JSONTEST_ASSERT(emptyString_.isConvertibleTo(Json::nullValue));
 
-   // Empty string okay
-   JSONTEST_ASSERT(emptyString_.isConvertibleTo(Json::nullValue));
+  // Non-empty string not okay
+  JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::nullValue));
 
-   // Non-empty string not okay
-   JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::nullValue));
+  // Always okay
+  JSONTEST_ASSERT(string1_.isConvertibleTo(Json::stringValue));
 
-   // Always okay
-   JSONTEST_ASSERT(string1_.isConvertibleTo(Json::stringValue));
+  // Never okay
+  JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::realValue));
 
-   // Never okay
-   JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::objectValue));
-   JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(!string1_.isConvertibleTo(Json::realValue));
-
-   JSONTEST_ASSERT_STRING_EQUAL("a", string1_.asString());
-   JSONTEST_ASSERT_STRING_EQUAL("a", string1_.asCString());
+  JSONTEST_ASSERT_STRING_EQUAL("a", string1_.asString());
+  JSONTEST_ASSERT_STRING_EQUAL("a", string1_.asCString());
 }
 
+JSONTEST_FIXTURE(ValueTest, bools) {
+  JSONTEST_ASSERT_EQUAL(Json::booleanValue, false_.type());
 
-JSONTEST_FIXTURE( ValueTest, bools )
-{
-   JSONTEST_ASSERT_EQUAL(Json::booleanValue, false_.type());
+  IsCheck checks;
+  checks.isBool_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(false_, checks));
+  JSONTEST_ASSERT_PRED(checkIs(true_, checks));
 
-   IsCheck checks;
-   checks.isBool_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( false_, checks ) );
-   JSONTEST_ASSERT_PRED( checkIs( true_, checks ) );
+  // False okay
+  JSONTEST_ASSERT(false_.isConvertibleTo(Json::nullValue));
 
-   // False okay
-   JSONTEST_ASSERT(false_.isConvertibleTo(Json::nullValue));
+  // True not okay
+  JSONTEST_ASSERT(!true_.isConvertibleTo(Json::nullValue));
 
-   // True not okay
-   JSONTEST_ASSERT(!true_.isConvertibleTo(Json::nullValue));
+  // Always okay
+  JSONTEST_ASSERT(true_.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(true_.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(true_.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(true_.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(true_.isConvertibleTo(Json::stringValue));
 
-   // Always okay
-   JSONTEST_ASSERT(true_.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(true_.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(true_.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(true_.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(true_.isConvertibleTo(Json::stringValue));
+  // Never okay
+  JSONTEST_ASSERT(!true_.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!true_.isConvertibleTo(Json::objectValue));
 
-   // Never okay
-   JSONTEST_ASSERT(!true_.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!true_.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT_EQUAL(true, true_.asBool());
+  JSONTEST_ASSERT_EQUAL(1, true_.asInt());
+  JSONTEST_ASSERT_EQUAL(1, true_.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(1, true_.asUInt());
+  JSONTEST_ASSERT_EQUAL(1, true_.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(1.0, true_.asDouble());
+  JSONTEST_ASSERT_EQUAL(1.0, true_.asFloat());
 
-   JSONTEST_ASSERT_EQUAL(true, true_.asBool());
-   JSONTEST_ASSERT_EQUAL(1, true_.asInt());
-   JSONTEST_ASSERT_EQUAL(1, true_.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(1, true_.asUInt());
-   JSONTEST_ASSERT_EQUAL(1, true_.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(1.0, true_.asDouble());
-   JSONTEST_ASSERT_EQUAL(1.0, true_.asFloat());
-
-   JSONTEST_ASSERT_EQUAL(false, false_.asBool());
-   JSONTEST_ASSERT_EQUAL(0, false_.asInt());
-   JSONTEST_ASSERT_EQUAL(0, false_.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(0, false_.asUInt());
-   JSONTEST_ASSERT_EQUAL(0, false_.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, false_.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, false_.asFloat());
+  JSONTEST_ASSERT_EQUAL(false, false_.asBool());
+  JSONTEST_ASSERT_EQUAL(0, false_.asInt());
+  JSONTEST_ASSERT_EQUAL(0, false_.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(0, false_.asUInt());
+  JSONTEST_ASSERT_EQUAL(0, false_.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, false_.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, false_.asFloat());
 }
 
+JSONTEST_FIXTURE(ValueTest, integers) {
+  IsCheck checks;
+  Json::Value val;
 
-JSONTEST_FIXTURE( ValueTest, integers )
-{
-   IsCheck checks;
-   Json::Value val;
+  // Conversions that don't depend on the value.
+  JSONTEST_ASSERT(Json::Value(17).isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(Json::Value(17).isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(Json::Value(17).isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(!Json::Value(17).isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!Json::Value(17).isConvertibleTo(Json::objectValue));
 
-   // Conversions that don't depend on the value.
-   JSONTEST_ASSERT(Json::Value(17).isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(Json::Value(17).isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(Json::Value(17).isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(!Json::Value(17).isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!Json::Value(17).isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT(Json::Value(17U).isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(Json::Value(17U).isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(Json::Value(17U).isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(!Json::Value(17U).isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!Json::Value(17U).isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT(Json::Value(17U).isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(Json::Value(17U).isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(Json::Value(17U).isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(!Json::Value(17U).isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!Json::Value(17U).isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT(Json::Value(17.0).isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(Json::Value(17.0).isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(Json::Value(17.0).isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(!Json::Value(17.0).isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!Json::Value(17.0).isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT(Json::Value(17.0).isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(Json::Value(17.0).isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(Json::Value(17.0).isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(!Json::Value(17.0).isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!Json::Value(17.0).isConvertibleTo(Json::objectValue));
+  // Default int
+  val = Json::Value(Json::intValue);
 
-   // Default int
-   val = Json::Value(Json::intValue);
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT_EQUAL(0, val.asInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(false, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(0, val.asInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(false, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
+  // Default uint
+  val = Json::Value(Json::uintValue);
 
-   // Default uint
-   val = Json::Value(Json::uintValue);
+  JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT_EQUAL(0, val.asInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(false, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(0, val.asInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(false, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
+  // Default real
+  val = Json::Value(Json::realValue);
 
-   // Default real
-   val = Json::Value(Json::realValue);
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT_EQUAL(0, val.asInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(false, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("0.0", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(0, val.asInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(false, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("0.0", val.asString());
+  // Zero (signed constructor arg)
+  val = Json::Value(0);
 
-   // Zero (signed constructor arg)
-   val = Json::Value(0);
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT_EQUAL(0, val.asInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(false, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(0, val.asInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(false, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
+  // Zero (unsigned constructor arg)
+  val = Json::Value(0u);
 
-   // Zero (unsigned constructor arg)
-   val = Json::Value(0u);
+  JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT_EQUAL(0, val.asInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(false, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(0, val.asInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(false, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("0", val.asString());
+  // Zero (floating-point constructor arg)
+  val = Json::Value(0.0);
 
-   // Zero (floating-point constructor arg)
-   val = Json::Value(0.0);
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
-
-   JSONTEST_ASSERT_EQUAL(0, val.asInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(false, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("0.0", val.asString());
+  JSONTEST_ASSERT_EQUAL(0, val.asInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(0, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(0.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(false, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("0.0", val.asString());
 
-   // 2^20 (signed constructor arg)
-   val = Json::Value(1 << 20);
+  // 2^20 (signed constructor arg)
+  val = Json::Value(1 << 20);
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asUInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asDouble());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1048576", val.asString());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asUInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asDouble());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("1048576", val.asString());
 
-   // 2^20 (unsigned constructor arg)
-   val = Json::Value(Json::UInt(1 << 20));
+  // 2^20 (unsigned constructor arg)
+  val = Json::Value(Json::UInt(1 << 20));
 
-   JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asUInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asDouble());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1048576", val.asString());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asUInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asDouble());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("1048576", val.asString());
 
-   // 2^20 (floating-point constructor arg)
-   val = Json::Value((1 << 20) / 1.0);
+  // 2^20 (floating-point constructor arg)
+  val = Json::Value((1 << 20) / 1.0);
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asUInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asDouble());
-   JSONTEST_ASSERT_EQUAL((1 << 20), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1048576.0", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asUInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asDouble());
+  JSONTEST_ASSERT_EQUAL((1 << 20), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "1048576.0",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 
-   // -2^20
-   val = Json::Value(-(1 << 20));
+  // -2^20
+  val = Json::Value(-(1 << 20));
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asInt());
-   JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("-1048576", val.asString());
+  JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asInt());
+  JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(-(1 << 20), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("-1048576", val.asString());
 
-   // int32 max
-   val = Json::Value(kint32max);
+  // int32 max
+  val = Json::Value(kint32max);
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(kint32max, val.asInt());
-   JSONTEST_ASSERT_EQUAL(kint32max, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(kint32max, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(kint32max, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(kint32max, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(kfint32max, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("2147483647", val.asString());
+  JSONTEST_ASSERT_EQUAL(kint32max, val.asInt());
+  JSONTEST_ASSERT_EQUAL(kint32max, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(kint32max, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(kint32max, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(kint32max, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(kfint32max, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("2147483647", val.asString());
 
-   // int32 min
-   val = Json::Value(kint32min);
+  // int32 min
+  val = Json::Value(kint32min);
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt_ = true;
-   checks.isInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt_ = true;
+  checks.isInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(kint32min, val.asInt());
-   JSONTEST_ASSERT_EQUAL(kint32min, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(kint32min, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(kint32min, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("-2147483648", val.asString());
+  JSONTEST_ASSERT_EQUAL(kint32min, val.asInt());
+  JSONTEST_ASSERT_EQUAL(kint32min, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(kint32min, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(kint32min, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("-2147483648", val.asString());
 
-   // uint32 max
-   val = Json::Value(kuint32max);
+  // uint32 max
+  val = Json::Value(kuint32max);
 
-   JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isUInt_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isUInt_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
 
 #ifndef JSON_NO_INT64
-   JSONTEST_ASSERT_EQUAL(kuint32max, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(kuint32max, val.asLargestInt());
 #endif
-   JSONTEST_ASSERT_EQUAL(kuint32max, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(kuint32max, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(kuint32max, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(kfuint32max, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("4294967295", val.asString());
+  JSONTEST_ASSERT_EQUAL(kuint32max, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(kuint32max, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(kuint32max, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(kfuint32max, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("4294967295", val.asString());
 
 #ifdef JSON_NO_INT64
-   // int64 max
-   val = Json::Value(double(kint64max));
+  // int64 max
+  val = Json::Value(double(kint64max));
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(double(kint64max), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(kint64max), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("9.22337e+18", val.asString());
+  JSONTEST_ASSERT_EQUAL(double(kint64max), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(kint64max), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("9.22337e+18", val.asString());
 
-   // int64 min
-   val = Json::Value(double(kint64min));
+  // int64 min
+  val = Json::Value(double(kint64min));
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(double(kint64min), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(kint64min), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("-9.22337e+18", val.asString());
+  JSONTEST_ASSERT_EQUAL(double(kint64min), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(kint64min), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("-9.22337e+18", val.asString());
 
-   // uint64 max
-   val = Json::Value(double(kuint64max));
+  // uint64 max
+  val = Json::Value(double(kuint64max));
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(double(kuint64max), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(kuint64max), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1.84467e+19", val.asString());
-#else  // ifdef JSON_NO_INT64
-   // 2^40 (signed constructor arg)
-   val = Json::Value(Json::Int64(1) << 40);
+  JSONTEST_ASSERT_EQUAL(double(kuint64max), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(kuint64max), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("1.84467e+19", val.asString());
+#else // ifdef JSON_NO_INT64
+  // 2^40 (signed constructor arg)
+  val = Json::Value(Json::Int64(1) << 40);
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asInt64());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asUInt64());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asDouble());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1099511627776", val.asString());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asInt64());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asUInt64());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asDouble());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("1099511627776", val.asString());
 
-   // 2^40 (unsigned constructor arg)
-   val = Json::Value(Json::UInt64(1) << 40);
+  // 2^40 (unsigned constructor arg)
+  val = Json::Value(Json::UInt64(1) << 40);
 
-   JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asInt64());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asUInt64());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asDouble());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1099511627776", val.asString());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asInt64());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asUInt64());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asDouble());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("1099511627776", val.asString());
 
-   // 2^40 (floating-point constructor arg)
-   val = Json::Value((Json::Int64(1) << 40) / 1.0);
+  // 2^40 (floating-point constructor arg)
+  val = Json::Value((Json::Int64(1) << 40) / 1.0);
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asInt64());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asUInt64());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asDouble());
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1099511627776.0", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asInt64());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asUInt64());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asDouble());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 40), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "1099511627776.0",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 
-   // -2^40
-   val = Json::Value(-(Json::Int64(1) << 40));
+  // -2^40
+  val = Json::Value(-(Json::Int64(1) << 40));
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asInt64());
-   JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("-1099511627776", val.asString());
+  JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asInt64());
+  JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 40), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("-1099511627776", val.asString());
 
-   // int64 max
-   val = Json::Value(Json::Int64(kint64max));
+  // int64 max
+  val = Json::Value(Json::Int64(kint64max));
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(kint64max, val.asInt64());
-   JSONTEST_ASSERT_EQUAL(kint64max, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(kint64max, val.asUInt64());
-   JSONTEST_ASSERT_EQUAL(kint64max, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(double(kint64max), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(kint64max), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("9223372036854775807", val.asString());
+  JSONTEST_ASSERT_EQUAL(kint64max, val.asInt64());
+  JSONTEST_ASSERT_EQUAL(kint64max, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(kint64max, val.asUInt64());
+  JSONTEST_ASSERT_EQUAL(kint64max, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(double(kint64max), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(kint64max), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("9223372036854775807", val.asString());
 
-   // int64 max (floating point constructor). Note that kint64max is not exactly
-   // representable as a double, and will be rounded up to be higher.
-   val = Json::Value(double(kint64max));
+  // int64 max (floating point constructor). Note that kint64max is not exactly
+  // representable as a double, and will be rounded up to be higher.
+  val = Json::Value(double(kint64max));
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   JSONTEST_ASSERT_EQUAL(Json::UInt64(1) << 63, val.asUInt64());
-   JSONTEST_ASSERT_EQUAL(Json::UInt64(1) << 63, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(uint64ToDouble(Json::UInt64(1) << 63), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(uint64ToDouble(Json::UInt64(1) << 63)), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("9.223372036854776e+18", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT_EQUAL(Json::UInt64(1) << 63, val.asUInt64());
+  JSONTEST_ASSERT_EQUAL(Json::UInt64(1) << 63, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(uint64ToDouble(Json::UInt64(1) << 63), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(Json::UInt64(1) << 63), val.asFloat());
 
-   // int64 min
-   val = Json::Value(Json::Int64(kint64min));
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "9.2233720368547758e+18",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 
-   JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
+  // int64 min
+  val = Json::Value(Json::Int64(kint64min));
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT_EQUAL(Json::intValue, val.type());
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT_EQUAL(kint64min, val.asInt64());
-   JSONTEST_ASSERT_EQUAL(kint64min, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(double(kint64min), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(kint64min), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("-9223372036854775808", val.asString());
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   // int64 min (floating point constructor). Note that kint64min *is* exactly
-   // representable as a double.
-   val = Json::Value(double(kint64min));
+  JSONTEST_ASSERT_EQUAL(kint64min, val.asInt64());
+  JSONTEST_ASSERT_EQUAL(kint64min, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(double(kint64min), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(kint64min), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("-9223372036854775808", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  // int64 min (floating point constructor). Note that kint64min *is* exactly
+  // representable as a double.
+  val = Json::Value(double(kint64min));
 
-   checks = IsCheck();
-   checks.isInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  checks = IsCheck();
+  checks.isInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT_EQUAL(kint64min, val.asInt64());
-   JSONTEST_ASSERT_EQUAL(kint64min, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(-9223372036854775808.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(-9223372036854775808.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("-9.223372036854776e+18", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   // uint64 max
-   val = Json::Value(Json::UInt64(kuint64max));
+  JSONTEST_ASSERT_EQUAL(kint64min, val.asInt64());
+  JSONTEST_ASSERT_EQUAL(kint64min, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(-9223372036854775808.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(-9223372036854775808.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "-9.2233720368547758e+18",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 
-   JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
+  // 10^19
+  const auto ten_to_19 = static_cast<Json::UInt64>(1e19);
+  val = Json::Value(Json::UInt64(ten_to_19));
 
-   checks = IsCheck();
-   checks.isUInt64_ = true;
-   checks.isIntegral_ = true;
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  checks = IsCheck();
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT_EQUAL(kuint64max, val.asUInt64());
-   JSONTEST_ASSERT_EQUAL(kuint64max, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(uint64ToDouble(kuint64max), val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(uint64ToDouble(kuint64max)), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("18446744073709551615", val.asString());
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
 
-   // uint64 max (floating point constructor). Note that kuint64max is not
-   // exactly representable as a double, and will be rounded up to be higher.
-   val = Json::Value(uint64ToDouble(kuint64max));
+  JSONTEST_ASSERT_EQUAL(ten_to_19, val.asUInt64());
+  JSONTEST_ASSERT_EQUAL(ten_to_19, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(uint64ToDouble(ten_to_19), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(uint64ToDouble(ten_to_19)), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("10000000000000000000", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  // 10^19 (double constructor). Note that 10^19 is not exactly representable
+  // as a double.
+  val = Json::Value(uint64ToDouble(ten_to_19));
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  checks = IsCheck();
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT_EQUAL(18446744073709551616.0, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(18446744073709551616.0, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_STRING_EQUAL("1.844674407370955e+19", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+
+  JSONTEST_ASSERT_EQUAL(1e19, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(1e19, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "1e+19",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
+
+  // uint64 max
+  val = Json::Value(Json::UInt64(kuint64max));
+
+  JSONTEST_ASSERT_EQUAL(Json::uintValue, val.type());
+
+  checks = IsCheck();
+  checks.isUInt64_ = true;
+  checks.isIntegral_ = true;
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
+
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+
+  JSONTEST_ASSERT_EQUAL(kuint64max, val.asUInt64());
+  JSONTEST_ASSERT_EQUAL(kuint64max, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(uint64ToDouble(kuint64max), val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(uint64ToDouble(kuint64max)), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL("18446744073709551615", val.asString());
+
+  // uint64 max (floating point constructor). Note that kuint64max is not
+  // exactly representable as a double, and will be rounded up to be higher.
+  val = Json::Value(uint64ToDouble(kuint64max));
+
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
+
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+
+  JSONTEST_ASSERT_EQUAL(18446744073709551616.0, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(18446744073709551616.0, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "1.8446744073709552e+19",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 #endif
 }
 
+JSONTEST_FIXTURE(ValueTest, nonIntegers) {
+  IsCheck checks;
+  Json::Value val;
 
-JSONTEST_FIXTURE( ValueTest, nonIntegers )
-{
-   IsCheck checks;
-   Json::Value val;
+  // Small positive number
+  val = Json::Value(1.5);
 
-   // Small positive number
-   val = Json::Value(1.5);
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT_EQUAL(1.5, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(1.5, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(1, val.asInt());
+  JSONTEST_ASSERT_EQUAL(1, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(1, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(1, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_EQUAL("1.5", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(1.5, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(1.5, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(1, val.asInt());
-   JSONTEST_ASSERT_EQUAL(1, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(1, val.asUInt());
-   JSONTEST_ASSERT_EQUAL(1, val.asLargestUInt());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_EQUAL("1.50", val.asString());
+  // Small negative number
+  val = Json::Value(-1.5);
 
-   // Small negative number
-   val = Json::Value(-1.5);
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT_EQUAL(-1.5, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(-1.5, val.asFloat());
+  JSONTEST_ASSERT_EQUAL(-1, val.asInt());
+  JSONTEST_ASSERT_EQUAL(-1, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_EQUAL("-1.5", val.asString());
 
-   JSONTEST_ASSERT_EQUAL(-1.5, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(-1.5, val.asFloat());
-   JSONTEST_ASSERT_EQUAL(-1, val.asInt());
-   JSONTEST_ASSERT_EQUAL(-1, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_EQUAL("-1.50", val.asString());
+  // A bit over int32 max
+  val = Json::Value(kint32max + 0.5);
 
-   // A bit over int32 max
-   val = Json::Value(kint32max + 0.5);
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
-
-   JSONTEST_ASSERT_EQUAL(2147483647.5, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(2147483647.5), val.asFloat());
-   JSONTEST_ASSERT_EQUAL(2147483647U, val.asUInt());
+  JSONTEST_ASSERT_EQUAL(2147483647.5, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(2147483647.5), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(2147483647U, val.asUInt());
 #ifdef JSON_HAS_INT64
-   JSONTEST_ASSERT_EQUAL(2147483647L, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL(2147483647U, val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL(2147483647L, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(2147483647U, val.asLargestUInt());
 #endif
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_EQUAL("2147483647.50", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_EQUAL(
+      "2147483647.5",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 
-   // A bit under int32 min
-   val = Json::Value(kint32min - 0.5);
+  // A bit under int32 min
+  val = Json::Value(kint32min - 0.5);
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT_EQUAL(-2147483648.5, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(-2147483648.5), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(-2147483648.5, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(-2147483648.5), val.asFloat());
 #ifdef JSON_HAS_INT64
-   JSONTEST_ASSERT_EQUAL(-Json::Int64(1)<< 31, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL(-(Json::Int64(1) << 31), val.asLargestInt());
 #endif
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_EQUAL("-2147483648.50", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_EQUAL(
+      "-2147483648.5",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 
-   // A bit over uint32 max
-   val = Json::Value(kuint32max + 0.5);
+  // A bit over uint32 max
+  val = Json::Value(kuint32max + 0.5);
 
-   JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
+  JSONTEST_ASSERT_EQUAL(Json::realValue, val.type());
 
-   checks = IsCheck();
-   checks.isDouble_ = true;
-   checks.isNumeric_ = true;
-   JSONTEST_ASSERT_PRED( checkIs( val, checks ) );
+  checks = IsCheck();
+  checks.isDouble_ = true;
+  checks.isNumeric_ = true;
+  JSONTEST_ASSERT_PRED(checkIs(val, checks));
 
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
-   JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
-   JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::realValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::booleanValue));
+  JSONTEST_ASSERT(val.isConvertibleTo(Json::stringValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::nullValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::intValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::uintValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::arrayValue));
+  JSONTEST_ASSERT(!val.isConvertibleTo(Json::objectValue));
 
-   JSONTEST_ASSERT_EQUAL(4294967295.5, val.asDouble());
-   JSONTEST_ASSERT_EQUAL(float(4294967295.5), val.asFloat());
+  JSONTEST_ASSERT_EQUAL(4294967295.5, val.asDouble());
+  JSONTEST_ASSERT_EQUAL(float(4294967295.5), val.asFloat());
 #ifdef JSON_HAS_INT64
-   JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 32)-1, val.asLargestInt());
-   JSONTEST_ASSERT_EQUAL((Json::UInt64(1) << 32)-Json::UInt64(1), val.asLargestUInt());
+  JSONTEST_ASSERT_EQUAL((Json::Int64(1) << 32) - 1, val.asLargestInt());
+  JSONTEST_ASSERT_EQUAL((Json::UInt64(1) << 32) - Json::UInt64(1),
+                        val.asLargestUInt());
 #endif
-   JSONTEST_ASSERT_EQUAL(true, val.asBool());
-   JSONTEST_ASSERT_EQUAL("4294967295.50", normalizeFloatingPointStr(val.asString()));
+  JSONTEST_ASSERT_EQUAL(true, val.asBool());
+  JSONTEST_ASSERT_EQUAL(
+      "4294967295.5",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 
-   val = Json::Value(1.2345678901234);
-   JSONTEST_ASSERT_STRING_EQUAL( "1.23456789012340", normalizeFloatingPointStr(val.asString()));
+  val = Json::Value(1.2345678901234);
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "1.2345678901234001",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
+
+  // A 16-digit floating point number.
+  val = Json::Value(2199023255552000.0f);
+  JSONTEST_ASSERT_EQUAL(float(2199023255552000.0f), val.asFloat());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "2199023255552000.0",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
+
+  // A very large floating point number.
+  val = Json::Value(3.402823466385289e38);
+  JSONTEST_ASSERT_EQUAL(float(3.402823466385289e38), val.asFloat());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "3.402823466385289e+38",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
+
+  // An even larger floating point number.
+  val = Json::Value(1.2345678e300);
+  JSONTEST_ASSERT_EQUAL(double(1.2345678e300), val.asDouble());
+  JSONTEST_ASSERT_STRING_EQUAL(
+      "1.2345678e+300",
+      normalizeFloatingPointStr(JsonTest::ToJsonString(val.asString())));
 }
 
-
-void
-ValueTest::checkConstMemberCount( const Json::Value &value, unsigned int expectedCount )
-{
-   unsigned int count = 0;
-   Json::Value::const_iterator itEnd = value.end();
-   for ( Json::Value::const_iterator it = value.begin(); it != itEnd; ++it )
-   {
-      ++count;
-   }
-   JSONTEST_ASSERT_EQUAL( expectedCount, count ) << "Json::Value::const_iterator";
+void ValueTest::checkConstMemberCount(const Json::Value& value,
+                                      unsigned int expectedCount) {
+  unsigned int count = 0;
+  Json::Value::const_iterator itEnd = value.end();
+  for (Json::Value::const_iterator it = value.begin(); it != itEnd; ++it) {
+    ++count;
+  }
+  JSONTEST_ASSERT_EQUAL(expectedCount, count) << "Json::Value::const_iterator";
 }
 
-void
-ValueTest::checkMemberCount( Json::Value &value, unsigned int expectedCount )
-{
-   JSONTEST_ASSERT_EQUAL(expectedCount, value.size() );
+void ValueTest::checkMemberCount(Json::Value& value,
+                                 unsigned int expectedCount) {
+  JSONTEST_ASSERT_EQUAL(expectedCount, value.size());
 
-   unsigned int count = 0;
-   Json::Value::iterator itEnd = value.end();
-   for ( Json::Value::iterator it = value.begin(); it != itEnd; ++it )
-   {
-      ++count;
-   }
-   JSONTEST_ASSERT_EQUAL( expectedCount, count ) << "Json::Value::iterator";
+  unsigned int count = 0;
+  Json::Value::iterator itEnd = value.end();
+  for (Json::Value::iterator it = value.begin(); it != itEnd; ++it) {
+    ++count;
+  }
+  JSONTEST_ASSERT_EQUAL(expectedCount, count) << "Json::Value::iterator";
 
-   JSONTEST_ASSERT_PRED( checkConstMemberCount(value, expectedCount) );
+  JSONTEST_ASSERT_PRED(checkConstMemberCount(value, expectedCount));
 }
 
-
 ValueTest::IsCheck::IsCheck()
-   : isObject_( false )
-   , isArray_( false )
-   , isBool_( false )
-   , isString_( false )
-   , isNull_( false )
-   , isInt_( false )
-   , isInt64_( false )
-   , isUInt_( false )
-   , isUInt64_( false )
-   , isIntegral_( false )
-   , isDouble_( false )
-   , isNumeric_( false )
-{
-}
 
+    = default;
 
-void 
-ValueTest::checkIs( const Json::Value &value, const IsCheck &check )
-{
-   JSONTEST_ASSERT_EQUAL(check.isObject_, value.isObject() );
-   JSONTEST_ASSERT_EQUAL(check.isArray_, value.isArray() );
-   JSONTEST_ASSERT_EQUAL(check.isBool_, value.isBool() );
-   JSONTEST_ASSERT_EQUAL(check.isDouble_, value.isDouble() );
-   JSONTEST_ASSERT_EQUAL(check.isInt_, value.isInt() );
-   JSONTEST_ASSERT_EQUAL(check.isUInt_, value.isUInt() );
-   JSONTEST_ASSERT_EQUAL(check.isIntegral_, value.isIntegral() );
-   JSONTEST_ASSERT_EQUAL(check.isNumeric_, value.isNumeric() );
-   JSONTEST_ASSERT_EQUAL(check.isString_, value.isString() );
-   JSONTEST_ASSERT_EQUAL(check.isNull_, value.isNull() );
+void ValueTest::checkIs(const Json::Value& value, const IsCheck& check) {
+  JSONTEST_ASSERT_EQUAL(check.isObject_, value.isObject());
+  JSONTEST_ASSERT_EQUAL(check.isArray_, value.isArray());
+  JSONTEST_ASSERT_EQUAL(check.isBool_, value.isBool());
+  JSONTEST_ASSERT_EQUAL(check.isDouble_, value.isDouble());
+  JSONTEST_ASSERT_EQUAL(check.isInt_, value.isInt());
+  JSONTEST_ASSERT_EQUAL(check.isUInt_, value.isUInt());
+  JSONTEST_ASSERT_EQUAL(check.isIntegral_, value.isIntegral());
+  JSONTEST_ASSERT_EQUAL(check.isNumeric_, value.isNumeric());
+  JSONTEST_ASSERT_EQUAL(check.isString_, value.isString());
+  JSONTEST_ASSERT_EQUAL(check.isNull_, value.isNull());
 
 #ifdef JSON_HAS_INT64
-   JSONTEST_ASSERT_EQUAL(check.isInt64_, value.isInt64() );
-   JSONTEST_ASSERT_EQUAL(check.isUInt64_, value.isUInt64() );
+  JSONTEST_ASSERT_EQUAL(check.isInt64_, value.isInt64());
+  JSONTEST_ASSERT_EQUAL(check.isUInt64_, value.isUInt64());
 #else
-   JSONTEST_ASSERT_EQUAL(false, value.isInt64() );
-   JSONTEST_ASSERT_EQUAL(false, value.isUInt64() );
+  JSONTEST_ASSERT_EQUAL(false, value.isInt64());
+  JSONTEST_ASSERT_EQUAL(false, value.isUInt64());
 #endif
 }
 
-JSONTEST_FIXTURE( ValueTest, compareNull )
-{
-    JSONTEST_ASSERT_PRED( checkIsEqual( Json::Value(), Json::Value() ) );
+JSONTEST_FIXTURE(ValueTest, compareNull) {
+  JSONTEST_ASSERT_PRED(checkIsEqual(Json::Value(), Json::Value()));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareInt )
-{
-    JSONTEST_ASSERT_PRED( checkIsLess( 0, 10 ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( 10, 10 ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( -10, -10 ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( -10, 0 ) );
+JSONTEST_FIXTURE(ValueTest, compareInt) {
+  JSONTEST_ASSERT_PRED(checkIsLess(0, 10));
+  JSONTEST_ASSERT_PRED(checkIsEqual(10, 10));
+  JSONTEST_ASSERT_PRED(checkIsEqual(-10, -10));
+  JSONTEST_ASSERT_PRED(checkIsLess(-10, 0));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareUInt )
-{
-    JSONTEST_ASSERT_PRED( checkIsLess( 0u, 10u ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( 0u, Json::Value::maxUInt ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( 10u, 10u ) );
+JSONTEST_FIXTURE(ValueTest, compareUInt) {
+  JSONTEST_ASSERT_PRED(checkIsLess(0u, 10u));
+  JSONTEST_ASSERT_PRED(checkIsLess(0u, Json::Value::maxUInt));
+  JSONTEST_ASSERT_PRED(checkIsEqual(10u, 10u));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareDouble )
-{
-    JSONTEST_ASSERT_PRED( checkIsLess( 0.0, 10.0 ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( 10.0, 10.0 ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( -10.0, -10.0 ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( -10.0, 0.0 ) );
+JSONTEST_FIXTURE(ValueTest, compareDouble) {
+  JSONTEST_ASSERT_PRED(checkIsLess(0.0, 10.0));
+  JSONTEST_ASSERT_PRED(checkIsEqual(10.0, 10.0));
+  JSONTEST_ASSERT_PRED(checkIsEqual(-10.0, -10.0));
+  JSONTEST_ASSERT_PRED(checkIsLess(-10.0, 0.0));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareString )
-{
-    JSONTEST_ASSERT_PRED( checkIsLess( "", " " ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( "", "a" ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( "abcd", "zyui" ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( "abc", "abcd" ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( "abcd", "abcd" ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( " ", " " ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( "ABCD", "abcd" ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( "ABCD", "ABCD" ) );
+JSONTEST_FIXTURE(ValueTest, compareString) {
+  JSONTEST_ASSERT_PRED(checkIsLess("", " "));
+  JSONTEST_ASSERT_PRED(checkIsLess("", "a"));
+  JSONTEST_ASSERT_PRED(checkIsLess("abcd", "zyui"));
+  JSONTEST_ASSERT_PRED(checkIsLess("abc", "abcd"));
+  JSONTEST_ASSERT_PRED(checkIsEqual("abcd", "abcd"));
+  JSONTEST_ASSERT_PRED(checkIsEqual(" ", " "));
+  JSONTEST_ASSERT_PRED(checkIsLess("ABCD", "abcd"));
+  JSONTEST_ASSERT_PRED(checkIsEqual("ABCD", "ABCD"));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareBoolean )
-{
-    JSONTEST_ASSERT_PRED( checkIsLess( false, true ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( false, false ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( true, true ) );
+JSONTEST_FIXTURE(ValueTest, compareBoolean) {
+  JSONTEST_ASSERT_PRED(checkIsLess(false, true));
+  JSONTEST_ASSERT_PRED(checkIsEqual(false, false));
+  JSONTEST_ASSERT_PRED(checkIsEqual(true, true));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareArray )
-{
-    // array compare size then content
-    Json::Value emptyArray(Json::arrayValue);
-    Json::Value l1aArray;
-    l1aArray.append( 0 );
-    Json::Value l1bArray;
-    l1bArray.append( 10 );
-    Json::Value l2aArray;
-    l2aArray.append( 0 );
-    l2aArray.append( 0 );
-    Json::Value l2bArray;
-    l2bArray.append( 0 );
-    l2bArray.append( 10 );
-    JSONTEST_ASSERT_PRED( checkIsLess( emptyArray, l1aArray ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( emptyArray, l2aArray ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( l1aArray, l2aArray ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( l2aArray, l2bArray ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( emptyArray, Json::Value( emptyArray ) ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( l1aArray, Json::Value( l1aArray) ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( l2bArray, Json::Value( l2bArray) ) );
+JSONTEST_FIXTURE(ValueTest, compareArray) {
+  // array compare size then content
+  Json::Value emptyArray(Json::arrayValue);
+  Json::Value l1aArray;
+  l1aArray.append(0);
+  Json::Value l1bArray;
+  l1bArray.append(10);
+  Json::Value l2aArray;
+  l2aArray.append(0);
+  l2aArray.append(0);
+  Json::Value l2bArray;
+  l2bArray.append(0);
+  l2bArray.append(10);
+  JSONTEST_ASSERT_PRED(checkIsLess(emptyArray, l1aArray));
+  JSONTEST_ASSERT_PRED(checkIsLess(emptyArray, l2aArray));
+  JSONTEST_ASSERT_PRED(checkIsLess(l1aArray, l2aArray));
+  JSONTEST_ASSERT_PRED(checkIsLess(l2aArray, l2bArray));
+  JSONTEST_ASSERT_PRED(checkIsEqual(emptyArray, Json::Value(emptyArray)));
+  JSONTEST_ASSERT_PRED(checkIsEqual(l1aArray, Json::Value(l1aArray)));
+  JSONTEST_ASSERT_PRED(checkIsEqual(l2bArray, Json::Value(l2bArray)));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareObject )
-{
-    // object compare size then content
-    Json::Value emptyObject(Json::objectValue);
-    Json::Value l1aObject;
-    l1aObject["key1"] = 0;
-    Json::Value l1bObject;
-    l1aObject["key1"] = 10;
-    Json::Value l2aObject;
-    l2aObject["key1"] = 0;
-    l2aObject["key2"] = 0;
-    JSONTEST_ASSERT_PRED( checkIsLess( emptyObject, l1aObject ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( emptyObject, l2aObject ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( l1aObject, l2aObject ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( emptyObject, Json::Value( emptyObject ) ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( l1aObject, Json::Value( l1aObject ) ) );
-    JSONTEST_ASSERT_PRED( checkIsEqual( l2aObject, Json::Value( l2aObject ) ) );
+JSONTEST_FIXTURE(ValueTest, compareObject) {
+  // object compare size then content
+  Json::Value emptyObject(Json::objectValue);
+  Json::Value l1aObject;
+  l1aObject["key1"] = 0;
+  Json::Value l1bObject;
+  l1aObject["key1"] = 10;
+  Json::Value l2aObject;
+  l2aObject["key1"] = 0;
+  l2aObject["key2"] = 0;
+  JSONTEST_ASSERT_PRED(checkIsLess(emptyObject, l1aObject));
+  JSONTEST_ASSERT_PRED(checkIsLess(emptyObject, l2aObject));
+  JSONTEST_ASSERT_PRED(checkIsLess(l1aObject, l2aObject));
+  JSONTEST_ASSERT_PRED(checkIsEqual(emptyObject, Json::Value(emptyObject)));
+  JSONTEST_ASSERT_PRED(checkIsEqual(l1aObject, Json::Value(l1aObject)));
+  JSONTEST_ASSERT_PRED(checkIsEqual(l2aObject, Json::Value(l2aObject)));
 }
 
-
-JSONTEST_FIXTURE( ValueTest, compareType )
-{
-    // object of different type are ordered according to their type
-    JSONTEST_ASSERT_PRED( checkIsLess( Json::Value(), Json::Value(1) ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( Json::Value(1), Json::Value(1u) ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( Json::Value(1u), Json::Value(1.0) ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( Json::Value(1.0), Json::Value("a") ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( Json::Value("a"), Json::Value(true) ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( Json::Value(true), Json::Value(Json::arrayValue) ) );
-    JSONTEST_ASSERT_PRED( checkIsLess( Json::Value(Json::arrayValue), Json::Value(Json::objectValue) ) );
+JSONTEST_FIXTURE(ValueTest, compareType) {
+  // object of different type are ordered according to their type
+  JSONTEST_ASSERT_PRED(checkIsLess(Json::Value(), Json::Value(1)));
+  JSONTEST_ASSERT_PRED(checkIsLess(Json::Value(1), Json::Value(1u)));
+  JSONTEST_ASSERT_PRED(checkIsLess(Json::Value(1u), Json::Value(1.0)));
+  JSONTEST_ASSERT_PRED(checkIsLess(Json::Value(1.0), Json::Value("a")));
+  JSONTEST_ASSERT_PRED(checkIsLess(Json::Value("a"), Json::Value(true)));
+  JSONTEST_ASSERT_PRED(
+      checkIsLess(Json::Value(true), Json::Value(Json::arrayValue)));
+  JSONTEST_ASSERT_PRED(checkIsLess(Json::Value(Json::arrayValue),
+                                   Json::Value(Json::objectValue)));
 }
 
-
-void 
-ValueTest::checkIsLess( const Json::Value &x, const Json::Value &y )
-{
-    JSONTEST_ASSERT( x < y );
-    JSONTEST_ASSERT( y > x );
-    JSONTEST_ASSERT( x <= y );
-    JSONTEST_ASSERT( y >= x );
-    JSONTEST_ASSERT( !(x == y) );
-    JSONTEST_ASSERT( !(y == x) );
-    JSONTEST_ASSERT( !(x >= y) );
-    JSONTEST_ASSERT( !(y <= x) );
-    JSONTEST_ASSERT( !(x > y) );
-    JSONTEST_ASSERT( !(y < x) );
-    JSONTEST_ASSERT( x.compare( y ) < 0 );
-    JSONTEST_ASSERT( y.compare( x ) >= 0 );
+JSONTEST_FIXTURE(ValueTest, CopyObject) {
+  Json::Value arrayVal;
+  arrayVal.append("val1");
+  arrayVal.append("val2");
+  arrayVal.append("val3");
+  Json::Value stringVal("string value");
+  Json::Value copy1, copy2;
+  {
+    Json::Value arrayCopy, stringCopy;
+    arrayCopy.copy(arrayVal);
+    stringCopy.copy(stringVal);
+    JSONTEST_ASSERT_PRED(checkIsEqual(arrayCopy, arrayVal));
+    JSONTEST_ASSERT_PRED(checkIsEqual(stringCopy, stringVal));
+    arrayCopy.append("val4");
+    JSONTEST_ASSERT(arrayCopy.size() == 4);
+    arrayVal.append("new4");
+    arrayVal.append("new5");
+    JSONTEST_ASSERT(arrayVal.size() == 5);
+    JSONTEST_ASSERT(!(arrayCopy == arrayVal));
+    stringCopy = "another string";
+    JSONTEST_ASSERT(!(stringCopy == stringVal));
+    copy1.copy(arrayCopy);
+    copy2.copy(stringCopy);
+  }
+  JSONTEST_ASSERT(arrayVal.size() == 5);
+  JSONTEST_ASSERT(stringVal == "string value");
+  JSONTEST_ASSERT(copy1.size() == 4);
+  JSONTEST_ASSERT(copy2 == "another string");
+  copy1.copy(stringVal);
+  JSONTEST_ASSERT(copy1 == "string value");
+  copy2.copy(arrayVal);
+  JSONTEST_ASSERT(copy2.size() == 5);
 }
 
-
-void 
-ValueTest::checkIsEqual( const Json::Value &x, const Json::Value &y )
-{
-    JSONTEST_ASSERT( x == y );
-    JSONTEST_ASSERT( y == x );
-    JSONTEST_ASSERT( x <= y );
-    JSONTEST_ASSERT( y <= x );
-    JSONTEST_ASSERT( x >= y );
-    JSONTEST_ASSERT( y >= x );
-    JSONTEST_ASSERT( !(x < y) );
-    JSONTEST_ASSERT( !(y < x) );
-    JSONTEST_ASSERT( !(x > y) );
-    JSONTEST_ASSERT( !(y > x) );
-    JSONTEST_ASSERT( x.compare( y ) == 0 );
-    JSONTEST_ASSERT( y.compare( x ) == 0 );
+void ValueTest::checkIsLess(const Json::Value& x, const Json::Value& y) {
+  JSONTEST_ASSERT(x < y);
+  JSONTEST_ASSERT(y > x);
+  JSONTEST_ASSERT(x <= y);
+  JSONTEST_ASSERT(y >= x);
+  JSONTEST_ASSERT(!(x == y));
+  JSONTEST_ASSERT(!(y == x));
+  JSONTEST_ASSERT(!(x >= y));
+  JSONTEST_ASSERT(!(y <= x));
+  JSONTEST_ASSERT(!(x > y));
+  JSONTEST_ASSERT(!(y < x));
+  JSONTEST_ASSERT(x.compare(y) < 0);
+  JSONTEST_ASSERT(y.compare(x) >= 0);
 }
 
-int main( int argc, const char *argv[] )
-{
-   JsonTest::Runner runner;
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, checkNormalizeFloatingPointStr );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, memberCount );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, objects );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, arrays );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, null );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, strings );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, bools );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, integers );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, nonIntegers );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareNull );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareInt );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareUInt );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareDouble );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareString );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareBoolean );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareArray );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareObject );
-   JSONTEST_REGISTER_FIXTURE( runner, ValueTest, compareType );
-   return runner.runCommandLine( argc, argv );
+void ValueTest::checkIsEqual(const Json::Value& x, const Json::Value& y) {
+  JSONTEST_ASSERT(x == y);
+  JSONTEST_ASSERT(y == x);
+  JSONTEST_ASSERT(x <= y);
+  JSONTEST_ASSERT(y <= x);
+  JSONTEST_ASSERT(x >= y);
+  JSONTEST_ASSERT(y >= x);
+  JSONTEST_ASSERT(!(x < y));
+  JSONTEST_ASSERT(!(y < x));
+  JSONTEST_ASSERT(!(x > y));
+  JSONTEST_ASSERT(!(y > x));
+  JSONTEST_ASSERT(x.compare(y) == 0);
+  JSONTEST_ASSERT(y.compare(x) == 0);
 }
+
+JSONTEST_FIXTURE(ValueTest, typeChecksThrowExceptions) {
+#if JSON_USE_EXCEPTION
+
+  Json::Value intVal(1);
+  Json::Value strVal("Test");
+  Json::Value objVal(Json::objectValue);
+  Json::Value arrVal(Json::arrayValue);
+
+  JSONTEST_ASSERT_THROWS(intVal["test"]);
+  JSONTEST_ASSERT_THROWS(strVal["test"]);
+  JSONTEST_ASSERT_THROWS(arrVal["test"]);
+
+  JSONTEST_ASSERT_THROWS(intVal.removeMember("test"));
+  JSONTEST_ASSERT_THROWS(strVal.removeMember("test"));
+  JSONTEST_ASSERT_THROWS(arrVal.removeMember("test"));
+
+  JSONTEST_ASSERT_THROWS(intVal.getMemberNames());
+  JSONTEST_ASSERT_THROWS(strVal.getMemberNames());
+  JSONTEST_ASSERT_THROWS(arrVal.getMemberNames());
+
+  JSONTEST_ASSERT_THROWS(intVal[0]);
+  JSONTEST_ASSERT_THROWS(objVal[0]);
+  JSONTEST_ASSERT_THROWS(strVal[0]);
+
+  JSONTEST_ASSERT_THROWS(intVal.clear());
+
+  JSONTEST_ASSERT_THROWS(intVal.resize(1));
+  JSONTEST_ASSERT_THROWS(strVal.resize(1));
+  JSONTEST_ASSERT_THROWS(objVal.resize(1));
+
+  JSONTEST_ASSERT_THROWS(intVal.asCString());
+
+  JSONTEST_ASSERT_THROWS(objVal.asString());
+  JSONTEST_ASSERT_THROWS(arrVal.asString());
+
+  JSONTEST_ASSERT_THROWS(strVal.asInt());
+  JSONTEST_ASSERT_THROWS(objVal.asInt());
+  JSONTEST_ASSERT_THROWS(arrVal.asInt());
+
+  JSONTEST_ASSERT_THROWS(strVal.asUInt());
+  JSONTEST_ASSERT_THROWS(objVal.asUInt());
+  JSONTEST_ASSERT_THROWS(arrVal.asUInt());
+
+  JSONTEST_ASSERT_THROWS(strVal.asInt64());
+  JSONTEST_ASSERT_THROWS(objVal.asInt64());
+  JSONTEST_ASSERT_THROWS(arrVal.asInt64());
+
+  JSONTEST_ASSERT_THROWS(strVal.asUInt64());
+  JSONTEST_ASSERT_THROWS(objVal.asUInt64());
+  JSONTEST_ASSERT_THROWS(arrVal.asUInt64());
+
+  JSONTEST_ASSERT_THROWS(strVal.asDouble());
+  JSONTEST_ASSERT_THROWS(objVal.asDouble());
+  JSONTEST_ASSERT_THROWS(arrVal.asDouble());
+
+  JSONTEST_ASSERT_THROWS(strVal.asFloat());
+  JSONTEST_ASSERT_THROWS(objVal.asFloat());
+  JSONTEST_ASSERT_THROWS(arrVal.asFloat());
+
+  JSONTEST_ASSERT_THROWS(strVal.asBool());
+  JSONTEST_ASSERT_THROWS(objVal.asBool());
+  JSONTEST_ASSERT_THROWS(arrVal.asBool());
+
+#endif
+}
+
+JSONTEST_FIXTURE(ValueTest, offsetAccessors) {
+  Json::Value x;
+  JSONTEST_ASSERT(x.getOffsetStart() == 0);
+  JSONTEST_ASSERT(x.getOffsetLimit() == 0);
+  x.setOffsetStart(10);
+  x.setOffsetLimit(20);
+  JSONTEST_ASSERT(x.getOffsetStart() == 10);
+  JSONTEST_ASSERT(x.getOffsetLimit() == 20);
+  Json::Value y(x);
+  JSONTEST_ASSERT(y.getOffsetStart() == 10);
+  JSONTEST_ASSERT(y.getOffsetLimit() == 20);
+  Json::Value z;
+  z.swap(y);
+  JSONTEST_ASSERT(z.getOffsetStart() == 10);
+  JSONTEST_ASSERT(z.getOffsetLimit() == 20);
+  JSONTEST_ASSERT(y.getOffsetStart() == 0);
+  JSONTEST_ASSERT(y.getOffsetLimit() == 0);
+}
+
+JSONTEST_FIXTURE(ValueTest, StaticString) {
+  char mutant[] = "hello";
+  Json::StaticString ss(mutant);
+  Json::String regular(mutant);
+  mutant[1] = 'a';
+  JSONTEST_ASSERT_STRING_EQUAL("hallo", ss.c_str());
+  JSONTEST_ASSERT_STRING_EQUAL("hello", regular.c_str());
+  {
+    Json::Value root;
+    root["top"] = ss;
+    JSONTEST_ASSERT_STRING_EQUAL("hallo", root["top"].asString());
+    mutant[1] = 'u';
+    JSONTEST_ASSERT_STRING_EQUAL("hullo", root["top"].asString());
+  }
+  {
+    Json::Value root;
+    root["top"] = regular;
+    JSONTEST_ASSERT_STRING_EQUAL("hello", root["top"].asString());
+    mutant[1] = 'u';
+    JSONTEST_ASSERT_STRING_EQUAL("hello", root["top"].asString());
+  }
+}
+
+JSONTEST_FIXTURE(ValueTest, WideString) {
+  // https://github.com/open-source-parsers/jsoncpp/issues/756
+  const std::string uni = u8"式,进"; // "\u5f0f\uff0c\u8fdb"
+  std::string styled;
+  {
+    Json::Value v;
+    v["abc"] = uni;
+    styled = v.toStyledString();
+  }
+  Json::Value root;
+  {
+    JSONCPP_STRING errs;
+    std::istringstream iss(styled);
+    bool ok = parseFromStream(Json::CharReaderBuilder(), iss, &root, &errs);
+    JSONTEST_ASSERT(ok);
+    if (!ok) {
+      std::cerr << "errs: " << errs << std::endl;
+    }
+  }
+  JSONTEST_ASSERT_STRING_EQUAL(root["abc"].asString(), uni);
+}
+
+JSONTEST_FIXTURE(ValueTest, CommentBefore) {
+  Json::Value val; // fill val
+  val.setComment(Json::String("// this comment should appear before"),
+                 Json::commentBefore);
+  Json::StreamWriterBuilder wbuilder;
+  wbuilder.settings_["commentStyle"] = "All";
+  {
+    char const expected[] = "// this comment should appear before\nnull";
+    Json::String result = Json::writeString(wbuilder, val);
+    JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+    Json::String res2 = val.toStyledString();
+    Json::String exp2 = "\n";
+    exp2 += expected;
+    exp2 += "\n";
+    JSONTEST_ASSERT_STRING_EQUAL(exp2, res2);
+  }
+  Json::Value other = "hello";
+  val.swapPayload(other);
+  {
+    char const expected[] = "// this comment should appear before\n\"hello\"";
+    Json::String result = Json::writeString(wbuilder, val);
+    JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+    Json::String res2 = val.toStyledString();
+    Json::String exp2 = "\n";
+    exp2 += expected;
+    exp2 += "\n";
+    JSONTEST_ASSERT_STRING_EQUAL(exp2, res2);
+    JSONTEST_ASSERT_STRING_EQUAL("null\n", other.toStyledString());
+  }
+  val = "hello";
+  // val.setComment("// this comment should appear before",
+  // Json::CommentPlacement::commentBefore); Assignment over-writes comments.
+  {
+    char const expected[] = "\"hello\"";
+    Json::String result = Json::writeString(wbuilder, val);
+    JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+    Json::String res2 = val.toStyledString();
+    Json::String exp2 = "";
+    exp2 += expected;
+    exp2 += "\n";
+    JSONTEST_ASSERT_STRING_EQUAL(exp2, res2);
+  }
+}
+
+JSONTEST_FIXTURE(ValueTest, zeroes) {
+  char const cstr[] = "h\0i";
+  Json::String binary(cstr, sizeof(cstr)); // include trailing 0
+  JSONTEST_ASSERT_EQUAL(4U, binary.length());
+  Json::StreamWriterBuilder b;
+  {
+    Json::Value root;
+    root = binary;
+    JSONTEST_ASSERT_STRING_EQUAL(binary, root.asString());
+  }
+  {
+    char const top[] = "top";
+    Json::Value root;
+    root[top] = binary;
+    JSONTEST_ASSERT_STRING_EQUAL(binary, root[top].asString());
+    Json::Value removed;
+    bool did;
+    did = root.removeMember(top, top + sizeof(top) - 1U, &removed);
+    JSONTEST_ASSERT(did);
+    JSONTEST_ASSERT_STRING_EQUAL(binary, removed.asString());
+    did = root.removeMember(top, top + sizeof(top) - 1U, &removed);
+    JSONTEST_ASSERT(!did);
+    JSONTEST_ASSERT_STRING_EQUAL(binary, removed.asString()); // still
+  }
+}
+
+JSONTEST_FIXTURE(ValueTest, zeroesInKeys) {
+  char const cstr[] = "h\0i";
+  Json::String binary(cstr, sizeof(cstr)); // include trailing 0
+  JSONTEST_ASSERT_EQUAL(4U, binary.length());
+  {
+    Json::Value root;
+    root[binary] = "there";
+    JSONTEST_ASSERT_STRING_EQUAL("there", root[binary].asString());
+    JSONTEST_ASSERT(!root.isMember("h"));
+    JSONTEST_ASSERT(root.isMember(binary));
+    JSONTEST_ASSERT_STRING_EQUAL(
+        "there", root.get(binary, Json::Value::nullRef).asString());
+    Json::Value removed;
+    bool did;
+    did = root.removeMember(binary.data(), binary.data() + binary.length(),
+                            &removed);
+    JSONTEST_ASSERT(did);
+    JSONTEST_ASSERT_STRING_EQUAL("there", removed.asString());
+    did = root.removeMember(binary.data(), binary.data() + binary.length(),
+                            &removed);
+    JSONTEST_ASSERT(!did);
+    JSONTEST_ASSERT_STRING_EQUAL("there", removed.asString()); // still
+    JSONTEST_ASSERT(!root.isMember(binary));
+    JSONTEST_ASSERT_STRING_EQUAL(
+        "", root.get(binary, Json::Value::nullRef).asString());
+  }
+}
+
+JSONTEST_FIXTURE(ValueTest, specialFloats) {
+  Json::StreamWriterBuilder b;
+  b.settings_["useSpecialFloats"] = true;
+
+  Json::Value v = std::numeric_limits<double>::quiet_NaN();
+  Json::String expected = "NaN";
+  Json::String result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  v = std::numeric_limits<double>::infinity();
+  expected = "Infinity";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  v = -std::numeric_limits<double>::infinity();
+  expected = "-Infinity";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+}
+
+JSONTEST_FIXTURE(ValueTest, precision) {
+  Json::StreamWriterBuilder b;
+  b.settings_["precision"] = 5;
+
+  Json::Value v = 100.0 / 3;
+  Json::String expected = "33.333";
+  Json::String result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  v = 0.25000000;
+  expected = "0.25";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  v = 0.2563456;
+  expected = "0.25635";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  b.settings_["precision"] = 1;
+  expected = "0.3";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  b.settings_["precision"] = 17;
+  v = 1234857476305.256345694873740545068;
+  expected = "1234857476305.2563";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  b.settings_["precision"] = 24;
+  v = 0.256345694873740545068;
+  expected = "0.25634569487374054";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  b.settings_["precision"] = 5;
+  b.settings_["precisionType"] = "decimal";
+  v = 0.256345694873740545068;
+  expected = "0.25635";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  b.settings_["precision"] = 1;
+  b.settings_["precisionType"] = "decimal";
+  v = 0.256345694873740545068;
+  expected = "0.3";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+
+  b.settings_["precision"] = 10;
+  b.settings_["precisionType"] = "decimal";
+  v = 0.23300000;
+  expected = "0.233";
+  result = Json::writeString(b, v);
+  JSONTEST_ASSERT_STRING_EQUAL(expected, result);
+}
+
+struct WriterTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(WriterTest, dropNullPlaceholders) {
+  Json::FastWriter writer;
+  Json::Value nullValue;
+  JSONTEST_ASSERT(writer.write(nullValue) == "null\n");
+
+  writer.dropNullPlaceholders();
+  JSONTEST_ASSERT(writer.write(nullValue) == "\n");
+}
+
+struct StreamWriterTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(StreamWriterTest, dropNullPlaceholders) {
+  Json::StreamWriterBuilder b;
+  Json::Value nullValue;
+  b.settings_["dropNullPlaceholders"] = false;
+  JSONTEST_ASSERT(Json::writeString(b, nullValue) == "null");
+  b.settings_["dropNullPlaceholders"] = true;
+  JSONTEST_ASSERT(Json::writeString(b, nullValue).empty());
+}
+
+JSONTEST_FIXTURE(StreamWriterTest, writeZeroes) {
+  Json::String binary("hi", 3); // include trailing 0
+  JSONTEST_ASSERT_EQUAL(3, binary.length());
+  Json::String expected("\"hi\\u0000\""); // unicoded zero
+  Json::StreamWriterBuilder b;
+  {
+    Json::Value root;
+    root = binary;
+    JSONTEST_ASSERT_STRING_EQUAL(binary, root.asString());
+    Json::String out = Json::writeString(b, root);
+    JSONTEST_ASSERT_EQUAL(expected.size(), out.size());
+    JSONTEST_ASSERT_STRING_EQUAL(expected, out);
+  }
+  {
+    Json::Value root;
+    root["top"] = binary;
+    JSONTEST_ASSERT_STRING_EQUAL(binary, root["top"].asString());
+    Json::String out = Json::writeString(b, root["top"]);
+    JSONTEST_ASSERT_STRING_EQUAL(expected, out);
+  }
+}
+
+struct ReaderTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(ReaderTest, parseWithNoErrors) {
+  Json::Reader reader;
+  Json::Value root;
+  bool ok = reader.parse("{ \"property\" : \"value\" }", root);
+  JSONTEST_ASSERT(ok);
+  JSONTEST_ASSERT(reader.getFormattedErrorMessages().empty());
+  JSONTEST_ASSERT(reader.getStructuredErrors().empty());
+}
+
+JSONTEST_FIXTURE(ReaderTest, parseWithNoErrorsTestingOffsets) {
+  Json::Reader reader;
+  Json::Value root;
+  bool ok = reader.parse("{ \"property\" : [\"value\", \"value2\"], \"obj\" : "
+                         "{ \"nested\" : 123, \"bool\" : true}, \"null\" : "
+                         "null, \"false\" : false }",
+                         root);
+  JSONTEST_ASSERT(ok);
+  JSONTEST_ASSERT(reader.getFormattedErrorMessages().empty());
+  JSONTEST_ASSERT(reader.getStructuredErrors().empty());
+  JSONTEST_ASSERT(root["property"].getOffsetStart() == 15);
+  JSONTEST_ASSERT(root["property"].getOffsetLimit() == 34);
+  JSONTEST_ASSERT(root["property"][0].getOffsetStart() == 16);
+  JSONTEST_ASSERT(root["property"][0].getOffsetLimit() == 23);
+  JSONTEST_ASSERT(root["property"][1].getOffsetStart() == 25);
+  JSONTEST_ASSERT(root["property"][1].getOffsetLimit() == 33);
+  JSONTEST_ASSERT(root["obj"].getOffsetStart() == 44);
+  JSONTEST_ASSERT(root["obj"].getOffsetLimit() == 76);
+  JSONTEST_ASSERT(root["obj"]["nested"].getOffsetStart() == 57);
+  JSONTEST_ASSERT(root["obj"]["nested"].getOffsetLimit() == 60);
+  JSONTEST_ASSERT(root["obj"]["bool"].getOffsetStart() == 71);
+  JSONTEST_ASSERT(root["obj"]["bool"].getOffsetLimit() == 75);
+  JSONTEST_ASSERT(root["null"].getOffsetStart() == 87);
+  JSONTEST_ASSERT(root["null"].getOffsetLimit() == 91);
+  JSONTEST_ASSERT(root["false"].getOffsetStart() == 103);
+  JSONTEST_ASSERT(root["false"].getOffsetLimit() == 108);
+  JSONTEST_ASSERT(root.getOffsetStart() == 0);
+  JSONTEST_ASSERT(root.getOffsetLimit() == 110);
+}
+
+JSONTEST_FIXTURE(ReaderTest, parseWithOneError) {
+  Json::Reader reader;
+  Json::Value root;
+  bool ok = reader.parse("{ \"property\" :: \"value\" }", root);
+  JSONTEST_ASSERT(!ok);
+  JSONTEST_ASSERT(reader.getFormattedErrorMessages() ==
+                  "* Line 1, Column 15\n  Syntax error: value, object or array "
+                  "expected.\n");
+  std::vector<Json::Reader::StructuredError> errors =
+      reader.getStructuredErrors();
+  JSONTEST_ASSERT(errors.size() == 1);
+  JSONTEST_ASSERT(errors.at(0).offset_start == 14);
+  JSONTEST_ASSERT(errors.at(0).offset_limit == 15);
+  JSONTEST_ASSERT(errors.at(0).message ==
+                  "Syntax error: value, object or array expected.");
+}
+
+JSONTEST_FIXTURE(ReaderTest, parseChineseWithOneError) {
+  Json::Reader reader;
+  Json::Value root;
+  bool ok = reader.parse("{ \"pr佐藤erty\" :: \"value\" }", root);
+  JSONTEST_ASSERT(!ok);
+  JSONTEST_ASSERT(reader.getFormattedErrorMessages() ==
+                  "* Line 1, Column 19\n  Syntax error: value, object or array "
+                  "expected.\n");
+  std::vector<Json::Reader::StructuredError> errors =
+      reader.getStructuredErrors();
+  JSONTEST_ASSERT(errors.size() == 1);
+  JSONTEST_ASSERT(errors.at(0).offset_start == 18);
+  JSONTEST_ASSERT(errors.at(0).offset_limit == 19);
+  JSONTEST_ASSERT(errors.at(0).message ==
+                  "Syntax error: value, object or array expected.");
+}
+
+JSONTEST_FIXTURE(ReaderTest, parseWithDetailError) {
+  Json::Reader reader;
+  Json::Value root;
+  bool ok = reader.parse("{ \"property\" : \"v\\alue\" }", root);
+  JSONTEST_ASSERT(!ok);
+  JSONTEST_ASSERT(reader.getFormattedErrorMessages() ==
+                  "* Line 1, Column 16\n  Bad escape sequence in string\nSee "
+                  "Line 1, Column 20 for detail.\n");
+  std::vector<Json::Reader::StructuredError> errors =
+      reader.getStructuredErrors();
+  JSONTEST_ASSERT(errors.size() == 1);
+  JSONTEST_ASSERT(errors.at(0).offset_start == 15);
+  JSONTEST_ASSERT(errors.at(0).offset_limit == 23);
+  JSONTEST_ASSERT(errors.at(0).message == "Bad escape sequence in string");
+}
+
+struct CharReaderTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithNoErrors) {
+  Json::CharReaderBuilder b;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  Json::Value root;
+  char const doc[] = "{ \"property\" : \"value\" }";
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(ok);
+  JSONTEST_ASSERT(errs.empty());
+  delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithNoErrorsTestingOffsets) {
+  Json::CharReaderBuilder b;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  Json::Value root;
+  char const doc[] = "{ \"property\" : [\"value\", \"value2\"], \"obj\" : "
+                     "{ \"nested\" : 123, \"bool\" : true}, \"null\" : "
+                     "null, \"false\" : false }";
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(ok);
+  JSONTEST_ASSERT(errs.empty());
+  delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithOneError) {
+  Json::CharReaderBuilder b;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  Json::Value root;
+  char const doc[] = "{ \"property\" :: \"value\" }";
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(!ok);
+  JSONTEST_ASSERT(errs ==
+                  "* Line 1, Column 15\n  Syntax error: value, object or array "
+                  "expected.\n");
+  delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseChineseWithOneError) {
+  Json::CharReaderBuilder b;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  Json::Value root;
+  char const doc[] = "{ \"pr佐藤erty\" :: \"value\" }";
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(!ok);
+  JSONTEST_ASSERT(errs ==
+                  "* Line 1, Column 19\n  Syntax error: value, object or array "
+                  "expected.\n");
+  delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithDetailError) {
+  Json::CharReaderBuilder b;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  Json::Value root;
+  char const doc[] = "{ \"property\" : \"v\\alue\" }";
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(!ok);
+  JSONTEST_ASSERT(errs ==
+                  "* Line 1, Column 16\n  Bad escape sequence in string\nSee "
+                  "Line 1, Column 20 for detail.\n");
+  delete reader;
+}
+
+JSONTEST_FIXTURE(CharReaderTest, parseWithStackLimit) {
+  Json::CharReaderBuilder b;
+  Json::Value root;
+  char const doc[] = "{ \"property\" : \"value\" }";
+  {
+    b.settings_["stackLimit"] = 2;
+    Json::CharReader* reader(b.newCharReader());
+    Json::String errs;
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT(errs.empty());
+    JSONTEST_ASSERT_EQUAL("value", root["property"]);
+    delete reader;
+  }
+  {
+    b.settings_["stackLimit"] = 1;
+    Json::CharReader* reader(b.newCharReader());
+    Json::String errs;
+    JSONTEST_ASSERT_THROWS(
+        reader->parse(doc, doc + std::strlen(doc), &root, &errs));
+    delete reader;
+  }
+}
+
+struct CharReaderStrictModeTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderStrictModeTest, dupKeys) {
+  Json::CharReaderBuilder b;
+  Json::Value root;
+  char const doc[] =
+      "{ \"property\" : \"value\", \"key\" : \"val1\", \"key\" : \"val2\" }";
+  {
+    b.strictMode(&b.settings_);
+    Json::CharReader* reader(b.newCharReader());
+    Json::String errs;
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(!ok);
+    JSONTEST_ASSERT_STRING_EQUAL("* Line 1, Column 41\n"
+                                 "  Duplicate key: 'key'\n",
+                                 errs);
+    JSONTEST_ASSERT_EQUAL("val1", root["key"]); // so far
+    delete reader;
+  }
+}
+struct CharReaderFailIfExtraTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, issue164) {
+  // This is interpreted as a string value followed by a colon.
+  Json::CharReaderBuilder b;
+  Json::Value root;
+  char const doc[] = " \"property\" : \"value\" }";
+  {
+    b.settings_["failIfExtra"] = false;
+    Json::CharReader* reader(b.newCharReader());
+    Json::String errs;
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT(errs.empty());
+    JSONTEST_ASSERT_EQUAL("property", root);
+    delete reader;
+  }
+  {
+    b.settings_["failIfExtra"] = true;
+    Json::CharReader* reader(b.newCharReader());
+    Json::String errs;
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(!ok);
+    JSONTEST_ASSERT_STRING_EQUAL(errs,
+                                 "* Line 1, Column 13\n"
+                                 "  Extra non-whitespace after JSON value.\n");
+    JSONTEST_ASSERT_EQUAL("property", root);
+    delete reader;
+  }
+  {
+    b.settings_["failIfExtra"] = false;
+    b.strictMode(&b.settings_);
+    Json::CharReader* reader(b.newCharReader());
+    Json::String errs;
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(!ok);
+    JSONTEST_ASSERT_STRING_EQUAL(errs,
+                                 "* Line 1, Column 13\n"
+                                 "  Extra non-whitespace after JSON value.\n");
+    JSONTEST_ASSERT_EQUAL("property", root);
+    delete reader;
+  }
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, issue107) {
+  // This is interpreted as an int value followed by a colon.
+  Json::CharReaderBuilder b;
+  Json::Value root;
+  char const doc[] = "1:2:3";
+  b.settings_["failIfExtra"] = true;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(!ok);
+  JSONTEST_ASSERT_STRING_EQUAL("* Line 1, Column 2\n"
+                               "  Extra non-whitespace after JSON value.\n",
+                               errs);
+  JSONTEST_ASSERT_EQUAL(1, root.asInt());
+  delete reader;
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, commentAfterObject) {
+  Json::CharReaderBuilder b;
+  Json::Value root;
+  {
+    char const doc[] = "{ \"property\" : \"value\" } //trailing\n//comment\n";
+    b.settings_["failIfExtra"] = true;
+    Json::CharReader* reader(b.newCharReader());
+    Json::String errs;
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL("value", root["property"]);
+    delete reader;
+  }
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, commentAfterArray) {
+  Json::CharReaderBuilder b;
+  Json::Value root;
+  char const doc[] = "[ \"property\" , \"value\" ] //trailing\n//comment\n";
+  b.settings_["failIfExtra"] = true;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(ok);
+  JSONTEST_ASSERT_STRING_EQUAL("", errs);
+  JSONTEST_ASSERT_EQUAL("value", root[1u]);
+  delete reader;
+}
+JSONTEST_FIXTURE(CharReaderFailIfExtraTest, commentAfterBool) {
+  Json::CharReaderBuilder b;
+  Json::Value root;
+  char const doc[] = " true /*trailing\ncomment*/";
+  b.settings_["failIfExtra"] = true;
+  Json::CharReader* reader(b.newCharReader());
+  Json::String errs;
+  bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+  JSONTEST_ASSERT(ok);
+  JSONTEST_ASSERT_STRING_EQUAL("", errs);
+  JSONTEST_ASSERT_EQUAL(true, root.asBool());
+  delete reader;
+}
+struct CharReaderAllowDropNullTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderAllowDropNullTest, issue178) {
+  Json::CharReaderBuilder b;
+  b.settings_["allowDroppedNullPlaceholders"] = true;
+  Json::Value root;
+  Json::String errs;
+  Json::CharReader* reader(b.newCharReader());
+  {
+    char const doc[] = "{\"a\":,\"b\":true}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+    JSONTEST_ASSERT_EQUAL(Json::nullValue, root.get("a", true));
+  }
+  {
+    char const doc[] = "{\"a\":}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(1u, root.size());
+    JSONTEST_ASSERT_EQUAL(Json::nullValue, root.get("a", true));
+  }
+  {
+    char const doc[] = "[]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT(errs.empty());
+    JSONTEST_ASSERT_EQUAL(0u, root.size());
+    JSONTEST_ASSERT_EQUAL(Json::arrayValue, root);
+  }
+  {
+    char const doc[] = "[null]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT(errs.empty());
+    JSONTEST_ASSERT_EQUAL(1u, root.size());
+  }
+  {
+    char const doc[] = "[,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+  }
+  {
+    char const doc[] = "[,,,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(4u, root.size());
+  }
+  {
+    char const doc[] = "[null,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+  }
+  {
+    char const doc[] = "[,null]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT(errs.empty());
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+  }
+  {
+    char const doc[] = "[,,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(3u, root.size());
+  }
+  {
+    char const doc[] = "[null,,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(3u, root.size());
+  }
+  {
+    char const doc[] = "[,null,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(3u, root.size());
+  }
+  {
+    char const doc[] = "[,,null]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT(errs.empty());
+    JSONTEST_ASSERT_EQUAL(3u, root.size());
+  }
+  {
+    char const doc[] = "[[],,,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(4u, root.size());
+    JSONTEST_ASSERT_EQUAL(Json::arrayValue, root[0u]);
+  }
+  {
+    char const doc[] = "[,[],,]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(4u, root.size());
+    JSONTEST_ASSERT_EQUAL(Json::arrayValue, root[1u]);
+  }
+  {
+    char const doc[] = "[,,,[]]";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT(errs.empty());
+    JSONTEST_ASSERT_EQUAL(4u, root.size());
+    JSONTEST_ASSERT_EQUAL(Json::arrayValue, root[3u]);
+  }
+  delete reader;
+}
+
+struct CharReaderAllowSingleQuotesTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderAllowSingleQuotesTest, issue182) {
+  Json::CharReaderBuilder b;
+  b.settings_["allowSingleQuotes"] = true;
+  Json::Value root;
+  Json::String errs;
+  Json::CharReader* reader(b.newCharReader());
+  {
+    char const doc[] = "{'a':true,\"b\":true}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+    JSONTEST_ASSERT_EQUAL(true, root.get("a", false));
+    JSONTEST_ASSERT_EQUAL(true, root.get("b", false));
+  }
+  {
+    char const doc[] = "{'a': 'x', \"b\":'y'}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+    JSONTEST_ASSERT_STRING_EQUAL("x", root["a"].asString());
+    JSONTEST_ASSERT_STRING_EQUAL("y", root["b"].asString());
+  }
+  delete reader;
+}
+
+struct CharReaderAllowZeroesTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderAllowZeroesTest, issue176) {
+  Json::CharReaderBuilder b;
+  b.settings_["allowSingleQuotes"] = true;
+  Json::Value root;
+  Json::String errs;
+  Json::CharReader* reader(b.newCharReader());
+  {
+    char const doc[] = "{'a':true,\"b\":true}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+    JSONTEST_ASSERT_EQUAL(true, root.get("a", false));
+    JSONTEST_ASSERT_EQUAL(true, root.get("b", false));
+  }
+  {
+    char const doc[] = "{'a': 'x', \"b\":'y'}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+    JSONTEST_ASSERT_STRING_EQUAL("x", root["a"].asString());
+    JSONTEST_ASSERT_STRING_EQUAL("y", root["b"].asString());
+  }
+  delete reader;
+}
+
+struct CharReaderAllowSpecialFloatsTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(CharReaderAllowSpecialFloatsTest, issue209) {
+  Json::CharReaderBuilder b;
+  b.settings_["allowSpecialFloats"] = true;
+  Json::Value root;
+  Json::String errs;
+  Json::CharReader* reader(b.newCharReader());
+  {
+    char const doc[] = "{\"a\":NaN,\"b\":Infinity,\"c\":-Infinity}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(3u, root.size());
+    double n = root["a"].asDouble();
+    JSONTEST_ASSERT(std::isnan(n));
+    JSONTEST_ASSERT_EQUAL(std::numeric_limits<double>::infinity(),
+                          root.get("b", 0.0));
+    JSONTEST_ASSERT_EQUAL(-std::numeric_limits<double>::infinity(),
+                          root.get("c", 0.0));
+  }
+
+  struct TestData {
+    int line;
+    bool ok;
+    Json::String in;
+  };
+  const TestData test_data[] = {
+      {__LINE__, true, "{\"a\":9}"},          //
+      {__LINE__, false, "{\"a\":0Infinity}"}, //
+      {__LINE__, false, "{\"a\":1Infinity}"}, //
+      {__LINE__, false, "{\"a\":9Infinity}"}, //
+      {__LINE__, false, "{\"a\":0nfinity}"},  //
+      {__LINE__, false, "{\"a\":1nfinity}"},  //
+      {__LINE__, false, "{\"a\":9nfinity}"},  //
+      {__LINE__, false, "{\"a\":nfinity}"},   //
+      {__LINE__, false, "{\"a\":.nfinity}"},  //
+      {__LINE__, false, "{\"a\":9nfinity}"},  //
+      {__LINE__, false, "{\"a\":-nfinity}"},  //
+      {__LINE__, true, "{\"a\":Infinity}"},   //
+      {__LINE__, false, "{\"a\":.Infinity}"}, //
+      {__LINE__, false, "{\"a\":_Infinity}"}, //
+      {__LINE__, false, "{\"a\":_nfinity}"},  //
+      {__LINE__, true, "{\"a\":-Infinity}"}   //
+  };
+  for (const auto& td : test_data) {
+    bool ok = reader->parse(&*td.in.begin(), &*td.in.begin() + td.in.size(),
+                            &root, &errs);
+    JSONTEST_ASSERT(td.ok == ok) << "line:" << td.line << "\n"
+                                 << "  expected: {"
+                                 << "ok:" << td.ok << ", in:\'" << td.in << "\'"
+                                 << "}\n"
+                                 << "  actual: {"
+                                 << "ok:" << ok << "}\n";
+  }
+
+  {
+    char const doc[] = "{\"posInf\": Infinity, \"NegInf\": -Infinity}";
+    bool ok = reader->parse(doc, doc + std::strlen(doc), &root, &errs);
+    JSONTEST_ASSERT(ok);
+    JSONTEST_ASSERT_STRING_EQUAL("", errs);
+    JSONTEST_ASSERT_EQUAL(2u, root.size());
+    JSONTEST_ASSERT_EQUAL(std::numeric_limits<double>::infinity(),
+                          root["posInf"].asDouble());
+    JSONTEST_ASSERT_EQUAL(-std::numeric_limits<double>::infinity(),
+                          root["NegInf"].asDouble());
+  }
+  delete reader;
+}
+
+struct BuilderTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(BuilderTest, settings) {
+  {
+    Json::Value errs;
+    Json::CharReaderBuilder rb;
+    JSONTEST_ASSERT_EQUAL(false, rb.settings_.isMember("foo"));
+    JSONTEST_ASSERT_EQUAL(true, rb.validate(&errs));
+    rb["foo"] = "bar";
+    JSONTEST_ASSERT_EQUAL(true, rb.settings_.isMember("foo"));
+    JSONTEST_ASSERT_EQUAL(false, rb.validate(&errs));
+  }
+  {
+    Json::Value errs;
+    Json::StreamWriterBuilder wb;
+    JSONTEST_ASSERT_EQUAL(false, wb.settings_.isMember("foo"));
+    JSONTEST_ASSERT_EQUAL(true, wb.validate(&errs));
+    wb["foo"] = "bar";
+    JSONTEST_ASSERT_EQUAL(true, wb.settings_.isMember("foo"));
+    JSONTEST_ASSERT_EQUAL(false, wb.validate(&errs));
+  }
+}
+
+struct IteratorTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(IteratorTest, distance) {
+  Json::Value json;
+  json["k1"] = "a";
+  json["k2"] = "b";
+  int dist = 0;
+  Json::String str;
+  for (Json::ValueIterator it = json.begin(); it != json.end(); ++it) {
+    dist = it - json.begin();
+    str = it->asString().c_str();
+  }
+  JSONTEST_ASSERT_EQUAL(1, dist);
+  JSONTEST_ASSERT_STRING_EQUAL("b", str);
+}
+
+JSONTEST_FIXTURE(IteratorTest, names) {
+  Json::Value json;
+  json["k1"] = "a";
+  json["k2"] = "b";
+  Json::ValueIterator it = json.begin();
+  JSONTEST_ASSERT(it != json.end());
+  JSONTEST_ASSERT_EQUAL(Json::Value("k1"), it.key());
+  JSONTEST_ASSERT_STRING_EQUAL("k1", it.name());
+  JSONTEST_ASSERT_EQUAL(-1, it.index());
+  ++it;
+  JSONTEST_ASSERT(it != json.end());
+  JSONTEST_ASSERT_EQUAL(Json::Value("k2"), it.key());
+  JSONTEST_ASSERT_STRING_EQUAL("k2", it.name());
+  JSONTEST_ASSERT_EQUAL(-1, it.index());
+  ++it;
+  JSONTEST_ASSERT(it == json.end());
+}
+
+JSONTEST_FIXTURE(IteratorTest, indexes) {
+  Json::Value json;
+  json[0] = "a";
+  json[1] = "b";
+  Json::ValueIterator it = json.begin();
+  JSONTEST_ASSERT(it != json.end());
+  JSONTEST_ASSERT_EQUAL(Json::Value(Json::ArrayIndex(0)), it.key());
+  JSONTEST_ASSERT_STRING_EQUAL("", it.name());
+  JSONTEST_ASSERT_EQUAL(0, it.index());
+  ++it;
+  JSONTEST_ASSERT(it != json.end());
+  JSONTEST_ASSERT_EQUAL(Json::Value(Json::ArrayIndex(1)), it.key());
+  JSONTEST_ASSERT_STRING_EQUAL("", it.name());
+  JSONTEST_ASSERT_EQUAL(1, it.index());
+  ++it;
+  JSONTEST_ASSERT(it == json.end());
+}
+
+JSONTEST_FIXTURE(IteratorTest, const) {
+  Json::Value const v;
+  JSONTEST_ASSERT_THROWS(
+      Json::Value::iterator it(v.begin()) // Compile, but throw.
+  );
+
+  Json::Value value;
+
+  for (int i = 9; i < 12; ++i) {
+    Json::OStringStream out;
+    out << std::setw(2) << i;
+    Json::String str = out.str();
+    value[str] = str;
+  }
+
+  Json::OStringStream out;
+  // in old code, this will get a compile error
+  Json::Value::const_iterator iter = value.begin();
+  for (; iter != value.end(); ++iter) {
+    out << *iter << ',';
+  }
+  Json::String expected = "\" 9\",\"10\",\"11\",";
+  JSONTEST_ASSERT_STRING_EQUAL(expected, out.str());
+}
+
+struct RValueTest : JsonTest::TestCase {};
+
+JSONTEST_FIXTURE(RValueTest, moveConstruction) {
+  Json::Value json;
+  json["key"] = "value";
+  Json::Value moved = std::move(json);
+  JSONTEST_ASSERT(moved != json); // Possibly not nullValue; definitely not
+                                  // equal.
+  JSONTEST_ASSERT_EQUAL(Json::objectValue, moved.type());
+  JSONTEST_ASSERT_EQUAL(Json::stringValue, moved["key"].type());
+}
+
+struct FuzzTest : JsonTest::TestCase {};
+
+// Build and run the fuzz test without any fuzzer, so that it's guaranteed not
+// go out of date, even if it's never run as an actual fuzz test.
+JSONTEST_FIXTURE(FuzzTest, fuzzDoesntCrash) {
+  const std::string example = "{}";
+  JSONTEST_ASSERT_EQUAL(
+      0,
+      LLVMFuzzerTestOneInput(reinterpret_cast<const uint8_t*>(example.c_str()),
+                             example.size()));
+}
+
+int main(int argc, const char* argv[]) {
+  JsonTest::Runner runner;
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, checkNormalizeFloatingPointStr);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, memberCount);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, objects);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, arrays);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, arrayIssue252);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, null);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, strings);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, bools);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, integers);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, nonIntegers);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareNull);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareInt);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareUInt);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareDouble);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareString);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareBoolean);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareArray);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareObject);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, compareType);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, CopyObject);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, offsetAccessors);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, typeChecksThrowExceptions);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, StaticString);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, WideString);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, CommentBefore);
+  // JSONTEST_REGISTER_FIXTURE(runner, ValueTest, nulls);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, zeroes);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, zeroesInKeys);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, specialFloats);
+  JSONTEST_REGISTER_FIXTURE(runner, ValueTest, precision);
+
+  JSONTEST_REGISTER_FIXTURE(runner, WriterTest, dropNullPlaceholders);
+  JSONTEST_REGISTER_FIXTURE(runner, StreamWriterTest, dropNullPlaceholders);
+  JSONTEST_REGISTER_FIXTURE(runner, StreamWriterTest, writeZeroes);
+
+  JSONTEST_REGISTER_FIXTURE(runner, ReaderTest, parseWithNoErrors);
+  JSONTEST_REGISTER_FIXTURE(runner, ReaderTest,
+                            parseWithNoErrorsTestingOffsets);
+  JSONTEST_REGISTER_FIXTURE(runner, ReaderTest, parseWithOneError);
+  JSONTEST_REGISTER_FIXTURE(runner, ReaderTest, parseChineseWithOneError);
+  JSONTEST_REGISTER_FIXTURE(runner, ReaderTest, parseWithDetailError);
+
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithNoErrors);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest,
+                            parseWithNoErrorsTestingOffsets);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithOneError);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseChineseWithOneError);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithDetailError);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderTest, parseWithStackLimit);
+
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderStrictModeTest, dupKeys);
+
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest, issue164);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest, issue107);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest,
+                            commentAfterObject);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest,
+                            commentAfterArray);
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderFailIfExtraTest,
+                            commentAfterBool);
+
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderAllowDropNullTest, issue178);
+
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderAllowSingleQuotesTest, issue182);
+
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderAllowZeroesTest, issue176);
+
+  JSONTEST_REGISTER_FIXTURE(runner, CharReaderAllowSpecialFloatsTest, issue209);
+
+  JSONTEST_REGISTER_FIXTURE(runner, BuilderTest, settings);
+
+  JSONTEST_REGISTER_FIXTURE(runner, IteratorTest, distance);
+  JSONTEST_REGISTER_FIXTURE(runner, IteratorTest, names);
+  JSONTEST_REGISTER_FIXTURE(runner, IteratorTest, indexes);
+  JSONTEST_REGISTER_FIXTURE(runner, IteratorTest, const);
+
+  JSONTEST_REGISTER_FIXTURE(runner, RValueTest, moveConstruction);
+
+  JSONTEST_REGISTER_FIXTURE(runner, FuzzTest, fuzzDoesntCrash);
+
+  return runner.runCommandLine(argc, argv);
+}
+
+#if defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/sconscript b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/sconscript
deleted file mode 100644
index 915fd01..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/src/test_lib_json/sconscript
+++ /dev/null
@@ -1,10 +0,0 @@
-Import( 'env_testing buildUnitTests' )
-
-buildUnitTests( env_testing, Split( """
-    main.cpp
-    jsontest.cpp
-     """ ),
-    'test_lib_json' )
-
-# For 'check' to work, 'libs' must be built first.
-env_testing.Depends('test_lib_json', '#libs')
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/cleantests.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/cleantests.py
index c38fd8f..36d5b9b 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/cleantests.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/cleantests.py
@@ -1,10 +1,16 @@
-# removes all files created during testing
+# Copyright 2007 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+"""Removes all files created during testing."""
+
 import glob
 import os
 
 paths = []
 for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
-    paths += glob.glob( 'data/' + pattern )
+    paths += glob.glob('data/' + pattern)
 
 for path in paths:
-    os.unlink( path )
+    os.unlink(path)
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/fail_test_stack_limit.json b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/fail_test_stack_limit.json
new file mode 100644
index 0000000..7524e0b
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/fail_test_stack_limit.json
@@ -0,0 +1 @@
+[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_08.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_08.expected
index c8db822..caf5352 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_08.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_08.expected
@@ -1,2 +1,3 @@
+// C++ style comment
 .=null
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_09.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_09.expected
index c8db822..8b129da4 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_09.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_basic_09.expected
@@ -1,2 +1,4 @@
+/* C style comment
+ */
 .=null
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_00.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_00.expected
new file mode 100644
index 0000000..284a797
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_00.expected
@@ -0,0 +1,4 @@
+// Comment for array
+.=[]
+// Comment within array
+.[0]="one-element"
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_00.json b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_00.json
new file mode 100644
index 0000000..4df577a
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_00.json
@@ -0,0 +1,5 @@
+// Comment for array
+[
+   // Comment within array
+   "one-element"
+]
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.expected
index 2a7f00c..1ed01ba 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.expected
@@ -1,5 +1,7 @@
 .={}
+// Comment for array
 .test=[]
+// Comment within array
 .test[0]={}
 .test[0].a="aaa"
 .test[1]={}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.json b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.json
index 7363490..6defe40 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.json
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_01.json
@@ -1,6 +1,8 @@
 {
     "test":
+    // Comment for array
     [
+       // Comment within array
        { "a" : "aaa" }, // Comment for a
        { "b" : "bbb" }, // Comment for b
        { "c" : "ccc" } // Comment for c
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_02.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_02.expected
new file mode 100644
index 0000000..8986dba
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_02.expected
@@ -0,0 +1,23 @@
+.={}
+/* C-style comment
+
+    C-style-2 comment */
+.c-test={}
+.c-test.a=1
+/* Internal comment c-style */
+.c-test.b=2
+// C++-style comment
+.cpp-test={}
+// Multiline comment cpp-style
+// Second line
+.cpp-test.c=3
+// Comment before double
+.cpp-test.d=4.1
+// Comment before string
+.cpp-test.e="e-string"
+// Comment before true
+.cpp-test.f=true
+// Comment before false
+.cpp-test.g=false
+// Comment before null
+.cpp-test.h=null
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_02.json b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_02.json
new file mode 100644
index 0000000..f5042e0
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_comment_02.json
@@ -0,0 +1,26 @@
+{
+   /* C-style comment
+
+    C-style-2 comment */
+   "c-test" : {
+      "a" : 1,
+      /* Internal comment c-style */
+      "b" : 2
+   },
+   // C++-style comment
+   "cpp-test" : {
+      // Multiline comment cpp-style
+      // Second line
+      "c" : 3,
+      // Comment before double
+      "d" : 4.1,
+      // Comment before string
+      "e" : "e-string",
+      // Comment before true
+      "f" : true,
+      // Comment before false
+      "g" : false,
+      // Comment before null
+      "h" : null
+   }
+}
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_01.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_01.expected
index 593f1db..463e149 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_01.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_01.expected
@@ -1 +1,2 @@
+// Max signed integer
 .=2147483647
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_02.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_02.expected
index 4b83bd7..0773e08 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_02.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_02.expected
@@ -1 +1,2 @@
+// Min signed integer
 .=-2147483648
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_03.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_03.expected
index 37c1cb1..c7efff7 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_03.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_03.expected
@@ -1 +1,2 @@
+// Max unsigned integer
 .=4294967295
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_04.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_04.expected
index b7b548e..39f8567 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_04.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_integer_04.expected
@@ -1,2 +1,3 @@
+// Min unsigned integer
 .=0
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_preserve_comment_01.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_preserve_comment_01.expected
index 8d88041..2797aa7 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_preserve_comment_01.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_preserve_comment_01.expected
@@ -1,3 +1,11 @@
+/* A comment
+   at the beginning of the file.
+ */
 .={}
 .first=1
+/* Comment before 'second'
+ */
 .second=2
+/* A comment at 
+   the end of the file.
+ */
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_01.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_01.expected
index ae23572..9514827 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_01.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_01.expected
@@ -1,2 +1,3 @@
+// 2^33 => out of integer range, switch to double
 .=8589934592
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_02.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_02.expected
index df8de42..b80c004 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_02.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_02.expected
@@ -1,2 +1,3 @@
+// -2^32 => out of signed integer range, switch to double
 .=-4294967295
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_03.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_03.expected
index df8de42..b80c004 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_03.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_03.expected
@@ -1,2 +1,3 @@
+// -2^32 => out of signed integer range, switch to double
 .=-4294967295
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_04.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_04.expected
index d726abe..ff71a23 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_04.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_04.expected
@@ -1,2 +1,3 @@
+// 1.2345678
 .=1.2345678
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_05.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_05.expected
index 949fd8f..7a46093 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_05.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_05.expected
@@ -1,3 +1,4 @@
+// 1234567.8
 .=1234567.8
 
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_06.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_06.expected
index 03b7d7f..a4a004d 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_06.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_06.expected
@@ -1,3 +1,4 @@
+// -1.2345678
 .=-1.2345678
 
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_07.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_07.expected
index 12025a4..dc02a89 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_07.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_07.expected
@@ -1,3 +1,4 @@
+// -1234567.8
 .=-1234567.8
 
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_08.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_08.expected
index 9a5f062..b1deef9 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_08.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_08.expected
@@ -1 +1,4 @@
+// Out of 32-bit integer range, switch to double in 32-bit mode. Length the
+// same as UINT_MAX in base 10 and digit less than UINT_MAX's last digit in
+// order to catch a bug in the parsing code.
 .=4300000001
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_09.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_09.expected
index 6da815e..aa2dbb2 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_09.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_09.expected
@@ -1 +1,4 @@
+// Out of 64-bit integer range, switch to double in all modes. Length the same
+// as ULONG_MAX in base 10 and digit less than ULONG_MAX's last digit in order
+// to catch a bug in the parsing code.
 .=1.9e+19
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_10.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_10.expected
index 01126bf..d28a430 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_10.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_10.expected
@@ -1 +1,4 @@
+// Out of 32-bit signed integer range, switch to double in all modes. Length
+// the same as INT_MIN in base 10 and digit less than INT_MIN's last digit in
+// order to catch a bug in the parsing code.
 .=-2200000001
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_11.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_11.expected
index 17f4187..2551946 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_11.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_11.expected
@@ -1 +1,4 @@
+// Out of 64-bit signed integer range, switch to double in all modes. Length
+// the same as LONG_MIN in base 10 and digit less than LONG_MIN's last digit in
+// order to catch a bug in the parsing code.
 .=-9.3e+18
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_12.expected b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_12.expected
index a000319..93e2417 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_12.expected
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/data/test_real_12.expected
@@ -1 +1,2 @@
+// 2^64 -> switch to double.
 .=1.844674407370955e+19
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/generate_expected.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/generate_expected.py
index 5b215c4..e049ab5 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/generate_expected.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/generate_expected.py
@@ -1,11 +1,17 @@
+# Copyright 2007 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+from __future__ import print_function
 import glob
 import os.path
-for path in glob.glob( '*.json' ):
+for path in glob.glob('*.json'):
     text = file(path,'rt').read()
     target = os.path.splitext(path)[0] + '.expected'
-    if os.path.exists( target ):
-        print 'skipping:', target
+    if os.path.exists(target):
+        print('skipping:', target)
     else:
-        print 'creating:', target
+        print('creating:', target)
         file(target,'wt').write(text)
 
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/pyjsontestrunner.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/pyjsontestrunner.py
index 504f3db..bd749b5 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/pyjsontestrunner.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/pyjsontestrunner.py
@@ -1,12 +1,19 @@
-# Simple implementation of a json test runner to run the test against json-py.
+# Copyright 2007 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
 
+"""Simple implementation of a json test runner to run the test against
+json-py."""
+
+from __future__ import print_function
 import sys
 import os.path
 import json
 import types
 
 if len(sys.argv) != 2:
-    print "Usage: %s input-json-file", sys.argv[0]
+    print("Usage: %s input-json-file", sys.argv[0])
     sys.exit(3)
     
 input_path = sys.argv[1]
@@ -15,50 +22,50 @@
 rewrite_path = base_path + '.rewrite'
 rewrite_actual_path = base_path + '.actual-rewrite'
 
-def valueTreeToString( fout, value, path = '.' ):
+def valueTreeToString(fout, value, path = '.'):
     ty = type(value) 
     if ty  is types.DictType:
-        fout.write( '%s={}\n' % path )
+        fout.write('%s={}\n' % path)
         suffix = path[-1] != '.' and '.' or ''
         names = value.keys()
         names.sort()
         for name in names:
-            valueTreeToString( fout, value[name], path + suffix + name )
+            valueTreeToString(fout, value[name], path + suffix + name)
     elif ty is types.ListType:
-        fout.write( '%s=[]\n' % path )
-        for index, childValue in zip( xrange(0,len(value)), value ):
-            valueTreeToString( fout, childValue, path + '[%d]' % index )
+        fout.write('%s=[]\n' % path)
+        for index, childValue in zip(xrange(0,len(value)), value):
+            valueTreeToString(fout, childValue, path + '[%d]' % index)
     elif ty is types.StringType:
-        fout.write( '%s="%s"\n' % (path,value) )
+        fout.write('%s="%s"\n' % (path,value))
     elif ty is types.IntType:
-        fout.write( '%s=%d\n' % (path,value) )
+        fout.write('%s=%d\n' % (path,value))
     elif ty is types.FloatType:
-        fout.write( '%s=%.16g\n' % (path,value) )
+        fout.write('%s=%.16g\n' % (path,value))
     elif value is True:
-        fout.write( '%s=true\n' % path )
+        fout.write('%s=true\n' % path)
     elif value is False:
-        fout.write( '%s=false\n' % path )
+        fout.write('%s=false\n' % path)
     elif value is None:
-        fout.write( '%s=null\n' % path )
+        fout.write('%s=null\n' % path)
     else:
         assert False and "Unexpected value type"
         
-def parseAndSaveValueTree( input, actual_path ):
-    root = json.loads( input )
-    fout = file( actual_path, 'wt' )
-    valueTreeToString( fout, root )
+def parseAndSaveValueTree(input, actual_path):
+    root = json.loads(input)
+    fout = file(actual_path, 'wt')
+    valueTreeToString(fout, root)
     fout.close()
     return root
 
-def rewriteValueTree( value, rewrite_path ):
-    rewrite = json.dumps( value )
+def rewriteValueTree(value, rewrite_path):
+    rewrite = json.dumps(value)
     #rewrite = rewrite[1:-1]  # Somehow the string is quoted ! jsonpy bug ?
-    file( rewrite_path, 'wt').write( rewrite + '\n' )
+    file(rewrite_path, 'wt').write(rewrite + '\n')
     return rewrite
     
-input = file( input_path, 'rt' ).read()
-root = parseAndSaveValueTree( input, actual_path )
-rewrite = rewriteValueTree( json.write( root ), rewrite_path )
-rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
+input = file(input_path, 'rt').read()
+root = parseAndSaveValueTree(input, actual_path)
+rewrite = rewriteValueTree(json.write(root), rewrite_path)
+rewrite_root = parseAndSaveValueTree(rewrite, rewrite_actual_path)
 
-sys.exit( 0 )
+sys.exit(0)
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/runjsontests.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/runjsontests.py
index ffe8bd5..dfdeca3 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/runjsontests.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/runjsontests.py
@@ -1,17 +1,42 @@
+# Copyright 2007 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+from __future__ import print_function
+from __future__ import unicode_literals
+from io import open
+from glob import glob
 import sys
 import os
 import os.path
-from glob import glob
 import optparse
 
 VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes '
 
-def compareOutputs( expected, actual, message ):
+def getStatusOutput(cmd):
+    """
+    Return int, unicode (for both Python 2 and 3).
+    Note: os.popen().close() would return None for 0.
+    """
+    print(cmd, file=sys.stderr)
+    pipe = os.popen(cmd)
+    process_output = pipe.read()
+    try:
+        # We have been using os.popen(). When we read() the result
+        # we get 'str' (bytes) in py2, and 'str' (unicode) in py3.
+        # Ugh! There must be a better way to handle this.
+        process_output = process_output.decode('utf-8')
+    except AttributeError:
+        pass  # python3
+    status = pipe.close()
+    return status, process_output
+def compareOutputs(expected, actual, message):
     expected = expected.strip().replace('\r','').split('\n')
     actual = actual.strip().replace('\r','').split('\n')
     diff_line = 0
-    max_line_to_compare = min( len(expected), len(actual) )
-    for index in xrange(0,max_line_to_compare):
+    max_line_to_compare = min(len(expected), len(actual))
+    for index in range(0,max_line_to_compare):
         if expected[index].strip() != actual[index].strip():
             diff_line = index + 1
             break
@@ -19,7 +44,7 @@
         diff_line = max_line_to_compare+1
     if diff_line == 0:
         return None
-    def safeGetLine( lines, index ):
+    def safeGetLine(lines, index):
         index += -1
         if index >= len(lines):
             return ''
@@ -29,85 +54,121 @@
   Actual:   '%s'
 """ % (message, diff_line,
        safeGetLine(expected,diff_line),
-       safeGetLine(actual,diff_line) )
-        
-def safeReadFile( path ):
+       safeGetLine(actual,diff_line))
+
+def safeReadFile(path):
     try:
-        return file( path, 'rt' ).read()
-    except IOError, e:
+        return open(path, 'rt', encoding = 'utf-8').read()
+    except IOError as e:
         return '<File "%s" is missing: %s>' % (path,e)
 
-def runAllTests( jsontest_executable_path, input_dir = None,
-                 use_valgrind=False, with_json_checker=False ):
+def runAllTests(jsontest_executable_path, input_dir = None,
+                 use_valgrind=False, with_json_checker=False,
+                 writerClass='StyledWriter'):
     if not input_dir:
-        input_dir = os.path.join( os.getcwd(), 'data' )
-    tests = glob( os.path.join( input_dir, '*.json' ) )
+        input_dir = os.path.join(os.getcwd(), 'data')
+    tests = glob(os.path.join(input_dir, '*.json'))
     if with_json_checker:
-        test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
+        all_test_jsonchecker = glob(os.path.join(input_dir, '../jsonchecker', '*.json'))
+        # These tests fail with strict json support, but pass with jsoncpp extra lieniency
+        """
+        Failure details:
+        * Test ../jsonchecker/fail25.json
+        Parsing should have failed:
+        ["	tab	character	in	string	"]
+
+        * Test ../jsonchecker/fail13.json
+        Parsing should have failed:
+        {"Numbers cannot have leading zeroes": 013}
+
+        * Test ../jsonchecker/fail18.json
+        Parsing should have failed:
+        [[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]
+
+        * Test ../jsonchecker/fail8.json
+        Parsing should have failed:
+        ["Extra close"]]
+
+        * Test ../jsonchecker/fail7.json
+        Parsing should have failed:
+        ["Comma after the close"],
+
+        * Test ../jsonchecker/fail10.json
+        Parsing should have failed:
+        {"Extra value after close": true} "misplaced quoted value"
+
+        * Test ../jsonchecker/fail27.json
+        Parsing should have failed:
+        ["line
+        break"]
+        """
+        known_differences_withjsonchecker = [ "fail25.json", "fail13.json", "fail18.json", "fail8.json",
+                                              "fail7.json", "fail10.json", "fail27.json" ]
+        test_jsonchecker = [ test for test in all_test_jsonchecker if os.path.basename(test) not in known_differences_withjsonchecker ]
+
     else:
         test_jsonchecker = []
     failed_tests = []
     valgrind_path = use_valgrind and VALGRIND_CMD or ''
     for input_path in tests + test_jsonchecker:
-        expect_failure = os.path.basename( input_path ).startswith( 'fail' )
+        expect_failure = os.path.basename(input_path).startswith('fail')
         is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
-        print 'TESTING:', input_path,
+        print('TESTING:', input_path, end=' ')
         options = is_json_checker_test and '--json-checker' or ''
-        pipe = os.popen( "%s%s %s %s" % (
-            valgrind_path, jsontest_executable_path, options,
-            input_path) )
-        process_output = pipe.read()
-        status = pipe.close()
+        options += ' --json-writer %s'%writerClass
+        cmd = '%s%s %s "%s"' % (            valgrind_path, jsontest_executable_path, options,
+            input_path)
+        status, process_output = getStatusOutput(cmd)
         if is_json_checker_test:
             if expect_failure:
-                if status is None:
-                    print 'FAILED'
-                    failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
-                                          safeReadFile(input_path)) )
+                if not status:
+                    print('FAILED')
+                    failed_tests.append((input_path, 'Parsing should have failed:\n%s' %
+                                          safeReadFile(input_path)))
                 else:
-                    print 'OK'
+                    print('OK')
             else:
-                if status is not None:
-                    print 'FAILED'
-                    failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
+                if status:
+                    print('FAILED')
+                    failed_tests.append((input_path, 'Parsing failed:\n' + process_output))
                 else:
-                    print 'OK'
+                    print('OK')
         else:
             base_path = os.path.splitext(input_path)[0]
-            actual_output = safeReadFile( base_path + '.actual' )
-            actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
-            file(base_path + '.process-output','wt').write( process_output )
+            actual_output = safeReadFile(base_path + '.actual')
+            actual_rewrite_output = safeReadFile(base_path + '.actual-rewrite')
+            open(base_path + '.process-output', 'wt', encoding = 'utf-8').write(process_output)
             if status:
-                print 'parsing failed'
-                failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
+                print('parsing failed')
+                failed_tests.append((input_path, 'Parsing failed:\n' + process_output))
             else:
                 expected_output_path = os.path.splitext(input_path)[0] + '.expected'
-                expected_output = file( expected_output_path, 'rt' ).read()
-                detail = ( compareOutputs( expected_output, actual_output, 'input' )
-                            or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
+                expected_output = open(expected_output_path, 'rt', encoding = 'utf-8').read()
+                detail = (compareOutputs(expected_output, actual_output, 'input')
+                            or compareOutputs(expected_output, actual_rewrite_output, 'rewrite'))
                 if detail:
-                    print 'FAILED'
-                    failed_tests.append( (input_path, detail) )
+                    print('FAILED')
+                    failed_tests.append((input_path, detail))
                 else:
-                    print 'OK'
+                    print('OK')
 
     if failed_tests:
-        print
-        print 'Failure details:'
+        print()
+        print('Failure details:')
         for failed_test in failed_tests:
-            print '* Test', failed_test[0]
-            print failed_test[1]
-            print
-        print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
-                                                       len(failed_tests) )
+            print('* Test', failed_test[0])
+            print(failed_test[1])
+            print()
+        print('Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
+                                                       len(failed_tests)))
         return 1
     else:
-        print 'All %d tests passed.' % len(tests)
+        print('All %d tests passed.' % len(tests))
         return 0
 
 def main():
     from optparse import OptionParser
-    parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
+    parser = OptionParser(usage="%prog [options] <path to jsontestrunner.exe> [test case directory]")
     parser.add_option("--valgrind",
                   action="store_true", dest="valgrind", default=False,
                   help="run all the tests using valgrind to detect memory leaks")
@@ -118,17 +179,32 @@
     options, args = parser.parse_args()
 
     if len(args) < 1 or len(args) > 2:
-        parser.error( 'Must provides at least path to jsontestrunner executable.' )
-        sys.exit( 1 )
+        parser.error('Must provides at least path to jsontestrunner executable.')
+        sys.exit(1)
 
-    jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
+    jsontest_executable_path = os.path.normpath(os.path.abspath(args[0]))
     if len(args) > 1:
-        input_path = os.path.normpath( os.path.abspath( args[1] ) )
+        input_path = os.path.normpath(os.path.abspath(args[1]))
     else:
         input_path = None
-    status = runAllTests( jsontest_executable_path, input_path,
-                          use_valgrind=options.valgrind, with_json_checker=options.with_json_checker )
-    sys.exit( status )
+    status = runAllTests(jsontest_executable_path, input_path,
+                         use_valgrind=options.valgrind,
+                         with_json_checker=options.with_json_checker,
+                         writerClass='StyledWriter')
+    if status:
+        sys.exit(status)
+    status = runAllTests(jsontest_executable_path, input_path,
+                         use_valgrind=options.valgrind,
+                         with_json_checker=options.with_json_checker,
+                         writerClass='StyledStreamWriter')
+    if status:
+        sys.exit(status)
+    status = runAllTests(jsontest_executable_path, input_path,
+                         use_valgrind=options.valgrind,
+                         with_json_checker=options.with_json_checker,
+                         writerClass='BuiltStyledStreamWriter')
+    if status:
+        sys.exit(status)
 
 if __name__ == '__main__':
     main()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/rununittests.py b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/rununittests.py
index 366184c..6634e72 100644
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/rununittests.py
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/test/rununittests.py
@@ -1,61 +1,72 @@
+# Copyright 2009 Baptiste Lepilleur and The JsonCpp Authors
+# Distributed under MIT license, or public domain if desired and
+# recognized in your jurisdiction.
+# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
+
+from __future__ import print_function
+from __future__ import unicode_literals
+from io import open
+from glob import glob
 import sys
 import os
 import os.path
 import subprocess
-from glob import glob
 import optparse
 
 VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
 
 class TestProxy(object):
-    def __init__( self, test_exe_path, use_valgrind=False ):
-        self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
+    def __init__(self, test_exe_path, use_valgrind=False):
+        self.test_exe_path = os.path.normpath(os.path.abspath(test_exe_path))
         self.use_valgrind = use_valgrind
 
-    def run( self, options ):
+    def run(self, options):
         if self.use_valgrind:
             cmd = VALGRIND_CMD.split()
         else:
             cmd = []
-        cmd.extend( [self.test_exe_path, '--test-auto'] + options )
-        process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+        cmd.extend([self.test_exe_path, '--test-auto'] + options)
+        try:
+            process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        except:
+            print(cmd)
+            raise
         stdout = process.communicate()[0]
         if process.returncode:
             return False, stdout
         return True, stdout
 
-def runAllTests( exe_path, use_valgrind=False ):
-    test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
-    status, test_names = test_proxy.run( ['--list-tests'] )
+def runAllTests(exe_path, use_valgrind=False):
+    test_proxy = TestProxy(exe_path, use_valgrind=use_valgrind)
+    status, test_names = test_proxy.run(['--list-tests'])
     if not status:
-        print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
+        print("Failed to obtain unit tests list:\n" + test_names, file=sys.stderr)
         return 1
-    test_names = [name.strip() for name in test_names.strip().split('\n')]
+    test_names = [name.strip() for name in test_names.decode('utf-8').strip().split('\n')]
     failures = []
     for name in test_names:
-        print 'TESTING %s:' % name,
-        succeed, result = test_proxy.run( ['--test', name] )
+        print('TESTING %s:' % name, end=' ')
+        succeed, result = test_proxy.run(['--test', name])
         if succeed:
-            print 'OK'
+            print('OK')
         else:
-            failures.append( (name, result) )
-            print 'FAILED'
+            failures.append((name, result))
+            print('FAILED')
     failed_count = len(failures)
     pass_count = len(test_names) - failed_count
     if failed_count:
-        print
+        print()
         for name, result in failures:
-            print result
-        print '%d/%d tests passed (%d failure(s))' % (
-            pass_count, len(test_names), failed_count)
+            print(result)
+        print('%d/%d tests passed (%d failure(s))' % (            pass_count, len(test_names), failed_count))
         return 1
     else:
-        print 'All %d tests passed' % len(test_names)
+        print('All %d tests passed' % len(test_names))
         return 0
 
 def main():
     from optparse import OptionParser
-    parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
+    parser = OptionParser(usage="%prog [options] <path to test_lib_json.exe>")
     parser.add_option("--valgrind",
                   action="store_true", dest="valgrind", default=False,
                   help="run all the tests using valgrind to detect memory leaks")
@@ -63,11 +74,11 @@
     options, args = parser.parse_args()
 
     if len(args) != 1:
-        parser.error( 'Must provides at least path to test_lib_json executable.' )
-        sys.exit( 1 )
+        parser.error('Must provides at least path to test_lib_json executable.')
+        sys.exit(1)
 
-    exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
-    sys.exit( exit_code )
+    exit_code = runAllTests(args[0], use_valgrind=options.valgrind)
+    sys.exit(exit_code)
 
 if __name__ == '__main__':
     main()
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version
deleted file mode 100644
index 7defe1e..0000000
--- a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version
+++ /dev/null
@@ -1 +0,0 @@
-0.6.0-dev
\ No newline at end of file
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version.in b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version.in
new file mode 100644
index 0000000..bfc03f7
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version.in
@@ -0,0 +1 @@
+@JSONCPP_VERSION@
diff --git a/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version.txt b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version.txt
new file mode 100644
index 0000000..bfa363e
--- /dev/null
+++ b/Source/ThirdParty/libwebrtc/Source/third_party/jsoncpp/source/version.txt
@@ -0,0 +1 @@
+1.8.4