From 15833516a4bad0a4ae7786293b22df4bf650aa80 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 5 Feb 2019 16:42:45 -0500 Subject: Add armv6l-linux & armv7l-linux as cross jobs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a cheap way to get 32-bit ARM working. We don’t support it officially but lots of people have raspberry pis and similar hardware they want to install the Nix package manager on. --- release.nix | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/release.nix b/release.nix index 271645067..0ec742906 100644 --- a/release.nix +++ b/release.nix @@ -2,6 +2,7 @@ , nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.09"; } , officialRelease ? false , systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] +, crossSystems ? [ "armv6l-linux" "armv7l-linux" ] }: let @@ -53,11 +54,12 @@ let }; - build = pkgs.lib.genAttrs systems (system: + build = pkgs.lib.genAttrs (systems ++ crossSystems) (system: - let pkgs = import nixpkgs { inherit system; }; in - - with pkgs; + let pkgs = if builtins.elem system systems + then import nixpkgs { inherit system; } + else import nixpkgs { crossSystem = { inherit system; }; }; + in with pkgs; with import ./release-common.nix { inherit pkgs; }; @@ -89,9 +91,12 @@ let }); - perlBindings = pkgs.lib.genAttrs systems (system: + perlBindings = pkgs.lib.genAttrs (systems ++ crossSystems) (system: - let pkgs = import nixpkgs { inherit system; }; in with pkgs; + let pkgs = if builtins.elem system systems + then import nixpkgs { inherit system; } + else import nixpkgs { crossSystem = { inherit system; }; }; + in with pkgs; releaseTools.nixBuild { name = "nix-perl"; @@ -112,9 +117,12 @@ let }); - binaryTarball = pkgs.lib.genAttrs systems (system: + binaryTarball = pkgs.lib.genAttrs (systems ++ crossSystems) (system: - with import nixpkgs { inherit system; }; + let pkgs = if builtins.elem system systems + then import nixpkgs { inherit system; } + else import nixpkgs { crossSystem = { inherit system; }; }; + in with pkgs; let toplevel = builtins.getAttr system jobs.build; -- cgit v1.2.3 From 1996af425ac8ddea1e8a591650e7d0caba2aa201 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 6 Feb 2019 21:43:47 -0500 Subject: Use buildPackages for native dependencies Unfortunately, releaseTools.nixBuild does not separate native and non-native build inputs. As an alternative, we can just use buildPackages to get the native version of some packages like: - pkgconfig - git - curl - utillinux --- release-common.nix | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/release-common.nix b/release-common.nix index 4c5565985..f2aa57c2c 100644 --- a/release-common.nix +++ b/release-common.nix @@ -50,14 +50,16 @@ rec { buildDeps = [ curl bzip2 xz brotli editline - openssl pkgconfig sqlite boehmgc + openssl sqlite boehmgc boost + buildPackages.pkgconfig + # Tests - git - mercurial + buildPackages.git + buildPackages.mercurial ] - ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] + ++ lib.optionals stdenv.isLinux [libseccomp buildPackages.utillinuxMinimal] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ((aws-sdk-cpp.override { -- cgit v1.2.3 From e9072ded9749ab00cc397980e8a26f83d341efc0 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 6 Feb 2019 22:43:28 -0500 Subject: Use nativeBuildInputs --- release-common.nix | 16 +++++++++------- release.nix | 3 +++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/release-common.nix b/release-common.nix index f2aa57c2c..707d36f95 100644 --- a/release-common.nix +++ b/release-common.nix @@ -47,19 +47,21 @@ rec { autoreconfHook ]; + nativeBuildDeps = + [ buildPackages.pkgconfig + + # Tests + buildPackages.git + buildPackages.mercurial + ] ++ lib.optional stdenv.isLinux buildPackages.utillinuxMinimal; + buildDeps = [ curl bzip2 xz brotli editline openssl sqlite boehmgc boost - - buildPackages.pkgconfig - - # Tests - buildPackages.git - buildPackages.mercurial ] - ++ lib.optionals stdenv.isLinux [libseccomp buildPackages.utillinuxMinimal] + ++ lib.optional stdenv.isLinux libseccomp ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ((aws-sdk-cpp.override { diff --git a/release.nix b/release.nix index 0ec742906..9843efa29 100644 --- a/release.nix +++ b/release.nix @@ -24,6 +24,7 @@ let src = nix; inherit officialRelease; + nativeBuildInputs = nativeBuildDeps; buildInputs = tarballDeps ++ buildDeps; configureFlags = "--enable-gc"; @@ -67,6 +68,7 @@ let name = "nix"; src = tarball; + nativeBuildInputs = nativeBuildDeps; buildInputs = buildDeps; preConfigure = @@ -199,6 +201,7 @@ let name = "nix-build"; src = tarball; + nativeBuildInputs = nativeBuildDeps; buildInputs = buildDeps; dontInstall = false; -- cgit v1.2.3 From f6ea56dfac1f4df45a5fa9e2801bc632dee9eff7 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 6 Feb 2019 23:04:40 -0500 Subject: Get shellcheck from buildPackages --- release.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release.nix b/release.nix index 9843efa29..06db7bdc0 100644 --- a/release.nix +++ b/release.nix @@ -133,7 +133,7 @@ let in runCommand "nix-binary-tarball-${version}" - { nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; + { nativeBuildInputs = lib.optional (system != "aarch64-linux") buildPackages.shellcheck; meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; } '' -- cgit v1.2.3 From 4fefe26717fa70828e3f524e43c76e3f7b7a09b0 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 5 Feb 2021 18:22:34 -0600 Subject: Re-enable armv6l support This fixes the libatomic detection. --- configure.ac | 2 +- flake.nix | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.ac b/configure.ac index 2047ed8d2..685c471c5 100644 --- a/configure.ac +++ b/configure.ac @@ -152,7 +152,7 @@ int main() { }]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes) AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC) if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then - LIBS="-latomic $LIBS" + LDFLAGS="$LDFLAGS -latomic" fi PKG_PROG_PKG_CONFIG diff --git a/flake.nix b/flake.nix index 869b92cb7..7e02fd70d 100644 --- a/flake.nix +++ b/flake.nix @@ -20,7 +20,7 @@ linuxSystems = linux64BitSystems ++ [ "i686-linux" ]; systems = linuxSystems ++ [ "x86_64-darwin" ]; - crossSystems = [ "armv7l-linux" ]; + crossSystems = [ "armv6l-linux" "armv7l-linux" ]; forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); -- cgit v1.2.3 From 8a0c00b85600991cdb9aa05902defec6ac44b777 Mon Sep 17 00:00:00 2001 From: Yorick van Pelt Date: Tue, 10 Dec 2019 15:47:38 +0700 Subject: Use libarchive for all compression --- src/libstore/filetransfer.cc | 2 +- src/libutil/compression.cc | 402 ++++++++++--------------------------------- src/libutil/compression.hh | 10 +- src/libutil/serialise.cc | 56 +++++- src/libutil/serialise.hh | 8 + src/libutil/tarfile.cc | 104 +++++------ src/libutil/tarfile.hh | 19 ++ 7 files changed, 232 insertions(+), 369 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 8ea5cdc9d..514ab3bf9 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -148,7 +148,7 @@ struct curlFileTransfer : public FileTransfer } LambdaSink finalSink; - std::shared_ptr decompressionSink; + std::shared_ptr decompressionSink; std::optional errorSink; std::exception_ptr writeException; diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index 986ba2976..8ba536000 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -1,10 +1,13 @@ #include "compression.hh" +#include "tarfile.hh" #include "util.hh" #include "finally.hh" #include "logging.hh" #include #include +#include +#include #include #include @@ -35,177 +38,86 @@ struct ChunkedCompressionSink : CompressionSink virtual void writeInternal(std::string_view data) = 0; }; -struct NoneSink : CompressionSink -{ - Sink & nextSink; - NoneSink(Sink & nextSink) : nextSink(nextSink) { } - void finish() override { flush(); } - void write(std::string_view data) override { nextSink(data); } -}; - -struct GzipDecompressionSink : CompressionSink +struct ArchiveDecompressionSource : Source { - Sink & nextSink; - z_stream strm; - bool finished = false; - uint8_t outbuf[BUFSIZ]; - - GzipDecompressionSink(Sink & nextSink) : nextSink(nextSink) - { - strm.zalloc = Z_NULL; - strm.zfree = Z_NULL; - strm.opaque = Z_NULL; - strm.avail_in = 0; - strm.next_in = Z_NULL; - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - - // Enable gzip and zlib decoding (+32) with 15 windowBits - int ret = inflateInit2(&strm,15+32); - if (ret != Z_OK) - throw CompressionError("unable to initialise gzip encoder"); - } - - ~GzipDecompressionSink() - { - inflateEnd(&strm); - } - - void finish() override - { - CompressionSink::flush(); - write({}); - } - - void write(std::string_view data) override - { - assert(data.size() <= std::numeric_limits::max()); - - strm.next_in = (Bytef *) data.data(); - strm.avail_in = data.size(); - - while (!finished && (!data.data() || strm.avail_in)) { - checkInterrupt(); - - int ret = inflate(&strm,Z_SYNC_FLUSH); - if (ret != Z_OK && ret != Z_STREAM_END) - throw CompressionError("error while decompressing gzip file: %d (%d, %d)", - zError(ret), data.size(), strm.avail_in); - - finished = ret == Z_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink({(char *) outbuf, sizeof(outbuf) - strm.avail_out}); - strm.next_out = (Bytef *) outbuf; - strm.avail_out = sizeof(outbuf); + std::unique_ptr archive = 0; + Source & src; + ArchiveDecompressionSource(Source & src) : src(src) {} + ~ArchiveDecompressionSource() override {} + size_t read(char * data, size_t len) override { + struct archive_entry* ae; + if (!archive) { + archive = std::make_unique(src, true); + this->archive->check(archive_read_next_header(this->archive->archive, &ae), "Failed to read header (%s)"); + if (archive_filter_count(this->archive->archive) < 2) { + throw CompressionError("Input compression not recognized."); } } + ssize_t result = archive_read_data(this->archive->archive, data, len); + if (result > 0) return result; + if (result == 0) { + throw EndOfFile("reached end of compressed file"); + } + this->archive->check(result, "Failed to read compressed data (%s)"); + return result; } }; - -struct XzDecompressionSink : CompressionSink +struct ArchiveCompressionSink : CompressionSink { Sink & nextSink; - uint8_t outbuf[BUFSIZ]; - lzma_stream strm = LZMA_STREAM_INIT; - bool finished = false; - - XzDecompressionSink(Sink & nextSink) : nextSink(nextSink) - { - lzma_ret ret = lzma_stream_decoder( - &strm, UINT64_MAX, LZMA_CONCATENATED); - if (ret != LZMA_OK) - throw CompressionError("unable to initialise lzma decoder"); - - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); + struct archive* archive; + ArchiveCompressionSink(Sink & nextSink, std::string format, bool parallel) : nextSink(nextSink) { + archive = archive_write_new(); + if (!archive) throw Error("failed to initialize libarchive"); + check(archive_write_add_filter_by_name(archive, format.c_str()), "Couldn't initialize compression (%s)"); + check(archive_write_set_format_raw(archive)); + if (format == "xz" && parallel) { + check(archive_write_set_filter_option(archive, format.c_str(), "threads", "0")); + } + // disable internal buffering + check(archive_write_set_bytes_per_block(archive, 0)); + // disable output padding + check(archive_write_set_bytes_in_last_block(archive, 1)); + open(); } - - ~XzDecompressionSink() - { - lzma_end(&strm); + ~ArchiveCompressionSink() override { + if (archive) archive_write_free(archive); } - - void finish() override - { - CompressionSink::flush(); - write({}); + void finish() override { + flush(); + check(archive_write_close(archive)); } - - void write(std::string_view data) override - { - strm.next_in = (const unsigned char *) data.data(); - strm.avail_in = data.size(); - - while (!finished && (!data.data() || strm.avail_in)) { - checkInterrupt(); - - lzma_ret ret = lzma_code(&strm, data.data() ? LZMA_RUN : LZMA_FINISH); - if (ret != LZMA_OK && ret != LZMA_STREAM_END) - throw CompressionError("error %d while decompressing xz file", ret); - - finished = ret == LZMA_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink({(char *) outbuf, sizeof(outbuf) - strm.avail_out}); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - } + void check(int err, const char *reason="Failed to compress (%s)") { + if (err == ARCHIVE_EOF) + throw EndOfFile("reached end of archive"); + else if (err != ARCHIVE_OK) + throw Error(reason, archive_error_string(this->archive)); } -}; - -struct BzipDecompressionSink : ChunkedCompressionSink -{ - Sink & nextSink; - bz_stream strm; - bool finished = false; - - BzipDecompressionSink(Sink & nextSink) : nextSink(nextSink) - { - memset(&strm, 0, sizeof(strm)); - int ret = BZ2_bzDecompressInit(&strm, 0, 0); - if (ret != BZ_OK) - throw CompressionError("unable to initialise bzip2 decoder"); - - strm.next_out = (char *) outbuf; - strm.avail_out = sizeof(outbuf); + void write(std::string_view data) override { + ssize_t result = archive_write_data(archive, data.data(), data.length()); + if (result <= 0) check(result); } - - ~BzipDecompressionSink() - { - BZ2_bzDecompressEnd(&strm); +private: + void open() { + check(archive_write_open(archive, this, NULL, ArchiveCompressionSink::callback_write, NULL)); + struct archive_entry *ae = archive_entry_new(); + archive_entry_set_filetype(ae, AE_IFREG); + check(archive_write_header(archive, ae)); + archive_entry_free(ae); } - - void finish() override - { - flush(); - write({}); + static ssize_t callback_write(struct archive *archive, void *_self, const void *buffer, size_t length) { + ArchiveCompressionSink *self = (ArchiveCompressionSink *)_self; + self->nextSink({(const char*)buffer, length}); + return length; } +}; - void writeInternal(std::string_view data) override - { - assert(data.size() <= std::numeric_limits::max()); - - strm.next_in = (char *) data.data(); - strm.avail_in = data.size(); - - while (strm.avail_in) { - checkInterrupt(); - - int ret = BZ2_bzDecompress(&strm); - if (ret != BZ_OK && ret != BZ_STREAM_END) - throw CompressionError("error while decompressing bzip2 file"); - - finished = ret == BZ_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink({(char *) outbuf, sizeof(outbuf) - strm.avail_out}); - strm.next_out = (char *) outbuf; - strm.avail_out = sizeof(outbuf); - } - } - } +struct NoneSink : CompressionSink +{ + Sink & nextSink; + NoneSink(Sink & nextSink) : nextSink(nextSink) { } + void finish() override { flush(); } + void write(std::string_view data) override { nextSink(data); } }; struct BrotliDecompressionSink : ChunkedCompressionSink @@ -261,161 +173,32 @@ struct BrotliDecompressionSink : ChunkedCompressionSink ref decompress(const std::string & method, const std::string & in) { - StringSink ssink; - auto sink = makeDecompressionSink(method, ssink); - (*sink)(in); - sink->finish(); - return ssink.s; + if (method == "br") { + StringSink ssink; + auto sink = makeDecompressionSink(method, ssink); + (*sink)(in); + sink->finish(); + return ssink.s; + } else { + StringSource ssrc(in); + auto src = makeDecompressionSource(ssrc); + return make_ref(src->drain()); + } } -ref makeDecompressionSink(const std::string & method, Sink & nextSink) +std::unique_ptr makeDecompressionSink(const std::string & method, Sink & nextSink) { if (method == "none" || method == "") - return make_ref(nextSink); - else if (method == "xz") - return make_ref(nextSink); - else if (method == "bzip2") - return make_ref(nextSink); - else if (method == "gzip") - return make_ref(nextSink); + return std::make_unique(nextSink); else if (method == "br") - return make_ref(nextSink); + return std::make_unique(nextSink); else - throw UnknownCompressionMethod("unknown compression method '%s'", method); + return sourceToSink([&](Source & source) { + auto decompressionSource = makeDecompressionSource(source); + decompressionSource->drainInto(nextSink); + }); } -struct XzCompressionSink : CompressionSink -{ - Sink & nextSink; - uint8_t outbuf[BUFSIZ]; - lzma_stream strm = LZMA_STREAM_INIT; - bool finished = false; - - XzCompressionSink(Sink & nextSink, bool parallel) : nextSink(nextSink) - { - lzma_ret ret; - bool done = false; - - if (parallel) { -#ifdef HAVE_LZMA_MT - lzma_mt mt_options = {}; - mt_options.flags = 0; - mt_options.timeout = 300; // Using the same setting as the xz cmd line - mt_options.preset = LZMA_PRESET_DEFAULT; - mt_options.filters = NULL; - mt_options.check = LZMA_CHECK_CRC64; - mt_options.threads = lzma_cputhreads(); - mt_options.block_size = 0; - if (mt_options.threads == 0) - mt_options.threads = 1; - // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the - // number of threads. - ret = lzma_stream_encoder_mt(&strm, &mt_options); - done = true; -#else - printMsg(lvlError, "warning: parallel XZ compression requested but not supported, falling back to single-threaded compression"); -#endif - } - - if (!done) - ret = lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64); - - if (ret != LZMA_OK) - throw CompressionError("unable to initialise lzma encoder"); - - // FIXME: apply the x86 BCJ filter? - - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - - ~XzCompressionSink() - { - lzma_end(&strm); - } - - void finish() override - { - CompressionSink::flush(); - write({}); - } - - void write(std::string_view data) override - { - strm.next_in = (const unsigned char *) data.data(); - strm.avail_in = data.size(); - - while (!finished && (!data.data() || strm.avail_in)) { - checkInterrupt(); - - lzma_ret ret = lzma_code(&strm, data.data() ? LZMA_RUN : LZMA_FINISH); - if (ret != LZMA_OK && ret != LZMA_STREAM_END) - throw CompressionError("error %d while compressing xz file", ret); - - finished = ret == LZMA_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink({(const char *) outbuf, sizeof(outbuf) - strm.avail_out}); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - } - } -}; - -struct BzipCompressionSink : ChunkedCompressionSink -{ - Sink & nextSink; - bz_stream strm; - bool finished = false; - - BzipCompressionSink(Sink & nextSink) : nextSink(nextSink) - { - memset(&strm, 0, sizeof(strm)); - int ret = BZ2_bzCompressInit(&strm, 9, 0, 30); - if (ret != BZ_OK) - throw CompressionError("unable to initialise bzip2 encoder"); - - strm.next_out = (char *) outbuf; - strm.avail_out = sizeof(outbuf); - } - - ~BzipCompressionSink() - { - BZ2_bzCompressEnd(&strm); - } - - void finish() override - { - flush(); - writeInternal({}); - } - - void writeInternal(std::string_view data) override - { - assert(data.size() <= std::numeric_limits::max()); - - strm.next_in = (char *) data.data(); - strm.avail_in = data.size(); - - while (!finished && (!data.data() || strm.avail_in)) { - checkInterrupt(); - - int ret = BZ2_bzCompress(&strm, data.data() ? BZ_RUN : BZ_FINISH); - if (ret != BZ_RUN_OK && ret != BZ_FINISH_OK && ret != BZ_STREAM_END) - throw CompressionError("error %d while compressing bzip2 file", ret); - - finished = ret == BZ_STREAM_END; - - if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { - nextSink({(const char *) outbuf, sizeof(outbuf) - strm.avail_out}); - strm.next_out = (char *) outbuf; - strm.avail_out = sizeof(outbuf); - } - } - } -}; - struct BrotliCompressionSink : ChunkedCompressionSink { Sink & nextSink; @@ -468,15 +251,20 @@ struct BrotliCompressionSink : ChunkedCompressionSink } } }; +std::unique_ptr makeDecompressionSource(Source & prev) { + return std::unique_ptr(new ArchiveDecompressionSource(prev)); +} ref makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel) { + std::vector la_supports = { + "bzip2", "compress", "grzip", "gzip", "lrzip", "lz4", "lzip", "lzma", "lzop", "xz", "zstd" + }; + if (std::find(la_supports.begin(), la_supports.end(), method) != la_supports.end()) { + return make_ref(nextSink, method, parallel); + } if (method == "none") return make_ref(nextSink); - else if (method == "xz") - return make_ref(nextSink, parallel); - else if (method == "bzip2") - return make_ref(nextSink); else if (method == "br") return make_ref(nextSink); else diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index dd666a4e1..192cb3e91 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -8,14 +8,18 @@ namespace nix { -struct CompressionSink : BufferedSink +struct CompressionSink : BufferedSink, FinishSink { - virtual void finish() = 0; + using BufferedSink::operator (); + using BufferedSink::write; + using FinishSink::finish; }; +std::unique_ptr makeDecompressionSource(Source & prev); + ref decompress(const std::string & method, const std::string & in); -ref makeDecompressionSink(const std::string & method, Sink & nextSink); +std::unique_ptr makeDecompressionSink(const std::string & method, Sink & nextSink); ref compress(const std::string & method, const std::string & in, const bool parallel = false); diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index d1a16b6ba..374b48d79 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -201,6 +201,61 @@ static DefaultStackAllocator defaultAllocatorSingleton; StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton; +std::unique_ptr sourceToSink(std::function fun) +{ + struct SourceToSink : FinishSink + { + typedef boost::coroutines2::coroutine coro_t; + + std::function fun; + std::optional coro; + + SourceToSink(std::function fun) : fun(fun) + { + } + + std::string_view cur; + + void operator () (std::string_view in) override + { + if (in.empty()) return; + cur = in; + + if (!coro) + coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) { + LambdaSource source([&](char *out, size_t out_len) { + if (cur.empty()) { + yield(); + if (yield.get()) { + return (size_t)0; + } + } + + size_t n = std::min(cur.size(), out_len); + memcpy(out, cur.data(), n); + cur.remove_prefix(n); + return n; + }); + fun(source); + }); + + if (!*coro) { abort(); } + + if (!cur.empty()) (*coro)(false); + } + + void finish() { + if (!coro) return; + if (!*coro) abort(); + (*coro)(true); + if (*coro) abort(); + } + }; + + return std::make_unique(fun); +} + + std::unique_ptr sinkToSource( std::function fun, std::function eof) @@ -212,7 +267,6 @@ std::unique_ptr sinkToSource( std::function fun; std::function eof; std::optional coro; - bool started = false; SinkToSource(std::function fun, std::function eof) : fun(fun), eof(eof) diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 5bbbc7ce3..0fe6e8332 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -25,6 +25,13 @@ struct NullSink : Sink { } }; + +struct FinishSink : virtual Sink +{ + virtual void finish() = 0; +}; + + /* A buffered abstract sink. Warning: a BufferedSink should not be used from multiple threads concurrently. */ struct BufferedSink : virtual Sink @@ -281,6 +288,7 @@ struct ChainSource : Source size_t read(char * data, size_t len) override; }; +std::unique_ptr sourceToSink(std::function fun); /* Convert a function that feeds data into a Sink into a Source. The Source executes the function as a coroutine. */ diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc index 2da169ba7..b5e1cb4c0 100644 --- a/src/libutil/tarfile.cc +++ b/src/libutil/tarfile.cc @@ -2,83 +2,73 @@ #include #include "serialise.hh" +#include "tarfile.hh" namespace nix { +static int callback_open(struct archive *, void *self) { + return ARCHIVE_OK; +} + +static ssize_t callback_read(struct archive * archive, void * _self, const void * * buffer) { + TarArchive *self = (TarArchive *)_self; + *buffer = self->buffer.data(); -struct TarArchive { - struct archive * archive; - Source * source; - std::vector buffer; + try { + return self->source->read((char *) self->buffer.data(), 4096); + } catch (EndOfFile &) { + return 0; + } catch (std::exception &err) { + archive_set_error(archive, EIO, "Source threw exception: %s", err.what()); + + return -1; + } +} + +static int callback_close(struct archive *, void *self) { + return ARCHIVE_OK; +} - void check(int err, const char * reason = "failed to extract archive: %s") - { +void TarArchive::check(int err, const char *reason) +{ if (err == ARCHIVE_EOF) throw EndOfFile("reached end of archive"); else if (err != ARCHIVE_OK) throw Error(reason, archive_error_string(this->archive)); } - TarArchive(Source & source) : buffer(4096) - { - this->archive = archive_read_new(); - this->source = &source; +TarArchive::TarArchive(Source& source, bool raw) : buffer(4096) +{ + this->archive = archive_read_new(); + this->source = &source; + if (!raw) { archive_read_support_filter_all(archive); archive_read_support_format_all(archive); - check(archive_read_open(archive, - (void *)this, - TarArchive::callback_open, - TarArchive::callback_read, - TarArchive::callback_close), - "failed to open archive: %s"); - } - - TarArchive(const Path & path) - { - this->archive = archive_read_new(); - + } else { archive_read_support_filter_all(archive); - archive_read_support_format_all(archive); - check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s"); - } - - TarArchive(const TarArchive &) = delete; - - void close() - { - check(archive_read_close(archive), "failed to close archive: %s"); + archive_read_support_format_raw(archive); + archive_read_support_format_empty(archive); } + check(archive_read_open(archive, (void *)this, callback_open, callback_read, callback_close), "Failed to open archive (%s)"); +} - ~TarArchive() - { - if (this->archive) archive_read_free(this->archive); - } -private: +TarArchive::TarArchive(const Path &path) +{ + this->archive = archive_read_new(); - static int callback_open(struct archive *, void * self) { - return ARCHIVE_OK; - } + archive_read_support_filter_all(archive); + archive_read_support_format_all(archive); + check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s"); +} - static ssize_t callback_read(struct archive * archive, void * _self, const void * * buffer) - { - auto self = (TarArchive *)_self; - *buffer = self->buffer.data(); - - try { - return self->source->read((char *) self->buffer.data(), 4096); - } catch (EndOfFile &) { - return 0; - } catch (std::exception & err) { - archive_set_error(archive, EIO, "source threw exception: %s", err.what()); - return -1; - } - } +void TarArchive::close() { + check(archive_read_close(this->archive), "Failed to close archive (%s)"); +} - static int callback_close(struct archive *, void * self) { - return ARCHIVE_OK; - } -}; +TarArchive::~TarArchive() { + if (this->archive) archive_read_free(this->archive); +} static void extract_archive(TarArchive & archive, const Path & destDir) { diff --git a/src/libutil/tarfile.hh b/src/libutil/tarfile.hh index 89a024f1d..18adf3490 100644 --- a/src/libutil/tarfile.hh +++ b/src/libutil/tarfile.hh @@ -1,7 +1,26 @@ #include "serialise.hh" +#include namespace nix { +struct TarArchive { + struct archive *archive; + Source *source; + std::vector buffer; + + void check(int err, const char *reason = "Failed to extract archive (%s)"); + + TarArchive(Source& source, bool raw = false); + + TarArchive(const Path &path); + + // disable copy constructor + TarArchive(const TarArchive&) = delete; + + void close(); + + ~TarArchive(); +}; void unpackTarfile(Source & source, const Path & destDir); void unpackTarfile(const Path & tarFile, const Path & destDir); -- cgit v1.2.3 From f3f228700a52857fe6e8632df4e935551ea219ff Mon Sep 17 00:00:00 2001 From: Mykola Orliuk Date: Wed, 31 Mar 2021 04:20:41 +0200 Subject: canonPath in one pass --- src/libutil/util.cc | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index dea9c74b7..c092076f3 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -143,16 +143,18 @@ Path canonPath(const Path & path, bool resolveSymlinks) s += '/'; while (i != end && *i != '/') s += *i++; - /* If s points to a symlink, resolve it and restart (since - the symlink target might contain new symlinks). */ + /* If s points to a symlink, resolve it and continue from there */ if (resolveSymlinks && isLink(s)) { if (++followCount >= maxFollow) throw Error("infinite symlink recursion in path '%1%'", path); - temp = absPath(readLink(s), dirOf(s)) - + string(i, end); - i = temp.begin(); /* restart */ + temp = readLink(s) + string(i, end); + i = temp.begin(); end = temp.end(); - s = ""; + if (!temp.empty() && temp[0] == '/') { + s.clear(); /* restart for symlinks pointing to absolute path */ + } else { + s = dirOf(s); + } } } } -- cgit v1.2.3 From f66fb5fb5b1478a5da39d0e9cc0f835272199c5d Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 23 Mar 2021 12:06:43 +0100 Subject: flake.nix: Build nix with strictDeps = true --- flake.nix | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flake.nix b/flake.nix index 58dc5019d..adb796a05 100644 --- a/flake.nix +++ b/flake.nix @@ -233,6 +233,8 @@ separateDebugInfo = true; + strictDeps = true; + passthru.perl-bindings = with final; stdenv.mkDerivation { name = "nix-perl-${version}"; @@ -517,6 +519,8 @@ installCheckFlags = "sysconfdir=$(out)/etc"; stripAllList = ["bin"]; + + strictDeps = true; }; }); -- cgit v1.2.3 From c3090bc6fdf6e052cd4c56fce6aeb11ddeb5dd6f Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 24 Mar 2021 14:44:20 +0100 Subject: tests/*: show when tests are skipped --- tests/build-remote.sh | 4 ++-- tests/gc-runtime.sh | 2 +- tests/linux-sandbox.sh | 4 ++-- tests/recursive.sh | 2 +- tests/shell.sh | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/build-remote.sh b/tests/build-remote.sh index 04848e4b5..70f82e939 100644 --- a/tests/build-remote.sh +++ b/tests/build-remote.sh @@ -1,5 +1,5 @@ -if ! canUseSandbox; then exit; fi -if ! [[ $busybox =~ busybox ]]; then exit; fi +if ! canUseSandbox; then exit 99; fi +if ! [[ $busybox =~ busybox ]]; then exit 99; fi unset NIX_STORE_DIR unset NIX_STATE_DIR diff --git a/tests/gc-runtime.sh b/tests/gc-runtime.sh index 4c5028005..6094959cb 100644 --- a/tests/gc-runtime.sh +++ b/tests/gc-runtime.sh @@ -4,7 +4,7 @@ case $system in *linux*) ;; *) - exit 0; + exit 99; esac set -m # enable job control, needed for kill diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh index 70a90a907..eac62d461 100644 --- a/tests/linux-sandbox.sh +++ b/tests/linux-sandbox.sh @@ -2,13 +2,13 @@ source common.sh clearStore -if ! canUseSandbox; then exit; fi +if ! canUseSandbox; then exit 99; fi # Note: we need to bind-mount $SHELL into the chroot. Currently we # only support the case where $SHELL is in the Nix store, because # otherwise things get complicated (e.g. if it's in /bin, do we need # /lib as well?). -if [[ ! $SHELL =~ /nix/store ]]; then exit; fi +if [[ ! $SHELL =~ /nix/store ]]; then exit 99; fi chmod -R u+w $TEST_ROOT/store0 || true rm -rf $TEST_ROOT/store0 diff --git a/tests/recursive.sh b/tests/recursive.sh index b020ec710..a55b061b5 100644 --- a/tests/recursive.sh +++ b/tests/recursive.sh @@ -1,7 +1,7 @@ source common.sh # FIXME -if [[ $(uname) != Linux ]]; then exit; fi +if [[ $(uname) != Linux ]]; then exit 99; fi clearStore diff --git a/tests/shell.sh b/tests/shell.sh index 7a9ee8ab0..2b85bb337 100644 --- a/tests/shell.sh +++ b/tests/shell.sh @@ -6,7 +6,7 @@ clearCache nix shell -f shell-hello.nix hello -c hello | grep 'Hello World' nix shell -f shell-hello.nix hello -c hello NixOS | grep 'Hello NixOS' -if ! canUseSandbox; then exit; fi +if ! canUseSandbox; then exit 99; fi chmod -R u+w $TEST_ROOT/store0 || true rm -rf $TEST_ROOT/store0 -- cgit v1.2.3 From ff1a2143aa1338ccba0e2bc5ccd66bd3df8baa31 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 24 Mar 2021 14:50:15 +0100 Subject: flake.nix: Make the sandbox tests work again --- flake.nix | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index adb796a05..9a758eafa 100644 --- a/flake.nix +++ b/flake.nix @@ -78,7 +78,8 @@ buildPackages.git buildPackages.mercurial buildPackages.jq - ]; + ] + ++ lib.optionals stdenv.isLinux [(pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)]; buildDeps = [ curl @@ -90,7 +91,7 @@ lowdown gmock ] - ++ lib.optionals stdenv.isLinux [libseccomp (pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)] + ++ lib.optionals stdenv.isLinux [libseccomp] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional stdenv.isx86_64 libcpuid; -- cgit v1.2.3 From 9f28dd97ae6afc68f0574a251325336c12d60c6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gohla?= Date: Mon, 5 Apr 2021 21:24:55 +0100 Subject: Revert "Use upstream nlohmann_json" This reverts commit 4145cd2da002e1bd8affa0392c80118eabe58e3c. --- src/nlohmann/json.hpp | 20406 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 20406 insertions(+) create mode 100644 src/nlohmann/json.hpp diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp new file mode 100644 index 000000000..c9af0bed3 --- /dev/null +++ b/src/nlohmann/json.hpp @@ -0,0 +1,20406 @@ +/* + __ _____ _____ _____ + __| | __| | | | JSON for Modern C++ +| | |__ | | | | | | version 3.5.0 +|_____|_____|_____|_|___| https://github.com/nlohmann/json + +Licensed under the MIT License . +SPDX-License-Identifier: MIT +Copyright (c) 2013-2018 Niels Lohmann . + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +#ifndef NLOHMANN_JSON_HPP +#define NLOHMANN_JSON_HPP + +#define NLOHMANN_JSON_VERSION_MAJOR 3 +#define NLOHMANN_JSON_VERSION_MINOR 5 +#define NLOHMANN_JSON_VERSION_PATCH 0 + +#include // all_of, find, for_each +#include // assert +#include // and, not, or +#include // nullptr_t, ptrdiff_t, size_t +#include // hash, less +#include // initializer_list +#include // istream, ostream +#include // random_access_iterator_tag +#include // accumulate +#include // string, stoi, to_string +#include // declval, forward, move, pair, swap + +// #include +#ifndef NLOHMANN_JSON_FWD_HPP +#define NLOHMANN_JSON_FWD_HPP + +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string +#include // vector + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +namespace nlohmann +{ +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer> +class basic_json; + +/*! +@brief JSON Pointer + +A JSON pointer defines a string syntax for identifying a specific value +within a JSON document. It can be used with functions `at` and +`operator[]`. Furthermore, JSON pointers are the base for JSON patches. + +@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) + +@since version 2.0.0 +*/ +template +class json_pointer; + +/*! +@brief default JSON class + +This type is the default specialization of the @ref basic_json class which +uses the standard template types. + +@since version 1.0.0 +*/ +using json = basic_json<>; +} // namespace nlohmann + +#endif + +// #include + + +// This file contains all internal macro definitions +// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them + +// exclude unsupported compilers +#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) + #if defined(__clang__) + #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 + #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 + #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #endif +#endif + +// disable float-equal warnings on GCC/clang +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wfloat-equal" +#endif + +// disable documentation warnings on clang +#if defined(__clang__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdocumentation" +#endif + +// allow for portable deprecation warnings +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #define JSON_DEPRECATED __attribute__((deprecated)) +#elif defined(_MSC_VER) + #define JSON_DEPRECATED __declspec(deprecated) +#else + #define JSON_DEPRECATED +#endif + +// allow to disable exceptions +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) + #define JSON_THROW(exception) throw exception + #define JSON_TRY try + #define JSON_CATCH(exception) catch(exception) + #define JSON_INTERNAL_CATCH(exception) catch(exception) +#else + #define JSON_THROW(exception) std::abort() + #define JSON_TRY if(true) + #define JSON_CATCH(exception) if(false) + #define JSON_INTERNAL_CATCH(exception) if(false) +#endif + +// override exception macros +#if defined(JSON_THROW_USER) + #undef JSON_THROW + #define JSON_THROW JSON_THROW_USER +#endif +#if defined(JSON_TRY_USER) + #undef JSON_TRY + #define JSON_TRY JSON_TRY_USER +#endif +#if defined(JSON_CATCH_USER) + #undef JSON_CATCH + #define JSON_CATCH JSON_CATCH_USER + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_CATCH_USER +#endif +#if defined(JSON_INTERNAL_CATCH_USER) + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER +#endif + +// manual branch prediction +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #define JSON_LIKELY(x) __builtin_expect(!!(x), 1) + #define JSON_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else + #define JSON_LIKELY(x) x + #define JSON_UNLIKELY(x) x +#endif + +// C++ language standard detection +#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 +#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #define JSON_HAS_CPP_14 +#endif + +/*! +@brief macro to briefly define a mapping between an enum and JSON +@def NLOHMANN_JSON_SERIALIZE_ENUM +@since version 3.4.0 +*/ +#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ + template \ + inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [e](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.first == e; \ + }); \ + j = ((it != std::end(m)) ? it : std::begin(m))->second; \ + } \ + template \ + inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [j](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.second == j; \ + }); \ + e = ((it != std::end(m)) ? it : std::begin(m))->first; \ + } + +// Ugly macros to avoid uglier copy-paste when specializing basic_json. They +// may be removed in the future once the class is split. + +#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ + template class ObjectType, \ + template class ArrayType, \ + class StringType, class BooleanType, class NumberIntegerType, \ + class NumberUnsignedType, class NumberFloatType, \ + template class AllocatorType, \ + template class JSONSerializer> + +#define NLOHMANN_BASIC_JSON_TPL \ + basic_json + +// #include + + +#include // not +#include // size_t +#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type + +namespace nlohmann +{ +namespace detail +{ +// alias templates to reduce boilerplate +template +using enable_if_t = typename std::enable_if::type; + +template +using uncvref_t = typename std::remove_cv::type>::type; + +// implementation of C++14 index_sequence and affiliates +// source: https://stackoverflow.com/a/32223343 +template +struct index_sequence +{ + using type = index_sequence; + using value_type = std::size_t; + static constexpr std::size_t size() noexcept + { + return sizeof...(Ints); + } +}; + +template +struct merge_and_renumber; + +template +struct merge_and_renumber, index_sequence> + : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; + +template +struct make_index_sequence + : merge_and_renumber < typename make_index_sequence < N / 2 >::type, + typename make_index_sequence < N - N / 2 >::type > {}; + +template<> struct make_index_sequence<0> : index_sequence<> {}; +template<> struct make_index_sequence<1> : index_sequence<0> {}; + +template +using index_sequence_for = make_index_sequence; + +// dispatch utility (taken from ranges-v3) +template struct priority_tag : priority_tag < N - 1 > {}; +template<> struct priority_tag<0> {}; + +// taken from ranges-v3 +template +struct static_const +{ + static constexpr T value{}; +}; + +template +constexpr T static_const::value; +} // namespace detail +} // namespace nlohmann + +// #include + + +#include // not +#include // numeric_limits +#include // false_type, is_constructible, is_integral, is_same, true_type +#include // declval + +// #include + +// #include + + +#include // random_access_iterator_tag + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template struct make_void +{ + using type = void; +}; +template using void_t = typename make_void::type; +} // namespace detail +} // namespace nlohmann + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template +struct iterator_types {}; + +template +struct iterator_types < + It, + void_t> +{ + using difference_type = typename It::difference_type; + using value_type = typename It::value_type; + using pointer = typename It::pointer; + using reference = typename It::reference; + using iterator_category = typename It::iterator_category; +}; + +// This is required as some compilers implement std::iterator_traits in a way that +// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. +template +struct iterator_traits +{ +}; + +template +struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> + : iterator_types +{ +}; + +template +struct iterator_traits::value>> +{ + using iterator_category = std::random_access_iterator_tag; + using value_type = T; + using difference_type = ptrdiff_t; + using pointer = T*; + using reference = T&; +}; +} +} + +// #include + +// #include + + +#include + +// #include + + +// http://en.cppreference.com/w/cpp/experimental/is_detected +namespace nlohmann +{ +namespace detail +{ +struct nonesuch +{ + nonesuch() = delete; + ~nonesuch() = delete; + nonesuch(nonesuch const&) = delete; + void operator=(nonesuch const&) = delete; +}; + +template class Op, + class... Args> +struct detector +{ + using value_t = std::false_type; + using type = Default; +}; + +template class Op, class... Args> +struct detector>, Op, Args...> +{ + using value_t = std::true_type; + using type = Op; +}; + +template