From ff107d9d03246ea8c15bbea7647e60bba65e9643 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 26 Apr 2024 16:05:03 +0200 Subject: [PATCH 01/77] Input::fetchToStore(): Don't try to substitute Having a narHash doesn't mean that we have the other attributes returned by the fetcher (such as lastModified or rev). For instance, $ nix flake metadata github:NixOS/patchelf/7c2f768bf9601268a4e71c2ebe91e2011918a70f Last modified: 2024-01-15 10:51:22 but $ nix flake metadata github:NixOS/patchelf/7c2f768bf9601268a4e71c2ebe91e2011918a70f?narHash=sha256-PPXqKY2hJng4DBVE0I4xshv/vGLUskL7jl53roB8UdU%3D (does not print a "Last modified") The latter only happens if the store path already exists or is substitutable, which made this impure behaviour unpredictable. Fixes #10601. --- src/libfetchers/fetchers.cc | 18 ------------------ tests/functional/tarball.sh | 3 --- 2 files changed, 21 deletions(-) diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 0577b8d9d5ad..a92b1e5ed3ef 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -167,24 +167,6 @@ std::pair Input::fetchToStore(ref store) const if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - /* The tree may already be in the Nix store, or it could be - substituted (which is often faster than fetching from the - original source). So check that. */ - if (getNarHash()) { - try { - auto storePath = computeStorePath(*store); - - store->ensurePath(storePath); - - debug("using substituted/cached input '%s' in '%s'", - to_string(), store->printStorePath(storePath)); - - return {std::move(storePath), *this}; - } catch (Error & e) { - debug("substitution of input '%s' failed: %s", to_string(), e.what()); - } - } - auto [storePath, input] = [&]() -> std::pair { try { auto [accessor, final] = getAccessorUnchecked(store); diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 062f27ad61b4..158a73f555b8 100644 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -33,9 +33,6 @@ test_tarball() { nix-build -o $TEST_ROOT/result -E "import (fetchTree file://$tarball)" nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; })" nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })" - # Do not re-fetch paths already present - nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file:///does-not-exist/must-remain-unused/$tarball; narHash = \"$hash\"; })" - expectStderr 102 nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"sha256-xdKv2pq/IiwLSnBBJXW8hNowI4MrdZfW+SYqDQs7Tzc=\"; })" | grep 'NAR hash mismatch in input' [[ $(nix eval --impure --expr "(fetchTree file://$tarball).lastModified") = 1000000000 ]] From df36ff0d1e60f59eb3e0442fa335252421ec8057 Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Tue, 2 Jul 2024 21:02:45 -0500 Subject: [PATCH 02/77] install-darwin: fix _nixbld uids for macOS sequoia Starting in macOS 15 Sequoia, macOS daemon UIDs are encroaching on our default UIDs of 301-332. This commit relocates our range up to avoid clashing with the current UIDs of 301-304 and buy us a little time while still leaving headroom for people installing more than 32 users. --- scripts/bigsur-nixbld-user-migration.sh | 2 +- scripts/install-darwin-multi-user.sh | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/bigsur-nixbld-user-migration.sh b/scripts/bigsur-nixbld-user-migration.sh index 0eb312e07cd9..bc42e02e6b23 100755 --- a/scripts/bigsur-nixbld-user-migration.sh +++ b/scripts/bigsur-nixbld-user-migration.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -((NEW_NIX_FIRST_BUILD_UID=301)) +((NEW_NIX_FIRST_BUILD_UID=350)) id_available(){ dscl . list /Users UniqueID | grep -E '\b'"$1"'\b' >/dev/null diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh index 24c9052f91cb..bd1a54ad8738 100644 --- a/scripts/install-darwin-multi-user.sh +++ b/scripts/install-darwin-multi-user.sh @@ -4,7 +4,17 @@ set -eu set -o pipefail # System specific settings -export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-301}" +# Notes: +# - up to macOS Big Sur we used the same GID/UIDs as Linux (30000:30001-32) +# - we changed UID to 301 because Big Sur updates failed into recovery mode +# we're targeting the 200-400 UID range for role users mentioned in the +# usage note for sysadminctl +# - we changed UID to 350 because Sequoia now uses UIDs 300-304 for its own +# daemon users +# - we changed GID to 350 alongside above just because it hides the nixbld +# group from the Users & Groups settings panel :) +export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-350}" +export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-350}" export NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d" readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist From 75567423fb6163559575c38867cda09b754364d7 Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Tue, 2 Jul 2024 21:22:35 -0500 Subject: [PATCH 03/77] install-darwin: move nixbld gid to match first UID --- scripts/install-multi-user.sh | 6 ++---- scripts/install-systemd-multi-user.sh | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 6aee073e3f93..a487d459f404 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -23,10 +23,10 @@ readonly RED='\033[31m' # installer allows overriding build user count to speed up installation # as creating each user takes non-trivial amount of time on macos readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32} -readonly NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}" readonly NIX_BUILD_GROUP_NAME="nixbld" # each system specific installer must set these: # NIX_FIRST_BUILD_UID +# NIX_BUILD_GROUP_ID # NIX_BUILD_USER_NAME_TEMPLATE # Please don't change this. We don't support it, because the # default shell profile that comes with Nix doesn't support it. @@ -530,9 +530,7 @@ It seems the build group $NIX_BUILD_GROUP_NAME already exists, but with the UID $primary_group_id. This script can't really handle that right now, so I'm going to give up. -You can fix this by editing this script and changing the -NIX_BUILD_GROUP_ID variable near the top to from $NIX_BUILD_GROUP_ID -to $primary_group_id and re-run. +You can export NIX_BUILD_GROUP_ID=$primary_group_id and re-run. EOF else row " Exists" "Yes" diff --git a/scripts/install-systemd-multi-user.sh b/scripts/install-systemd-multi-user.sh index a62ed7e3aa44..a79a699906af 100755 --- a/scripts/install-systemd-multi-user.sh +++ b/scripts/install-systemd-multi-user.sh @@ -5,6 +5,7 @@ set -o pipefail # System specific settings export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-30001}" +export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}" export NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d" readonly SERVICE_SRC=/lib/systemd/system/nix-daemon.service From 11cf29b15c8ea144035eb6a9d9f31bb05eee2048 Mon Sep 17 00:00:00 2001 From: Emily Date: Mon, 26 Aug 2024 17:59:58 +0100 Subject: [PATCH 04/77] install-darwin: increment base UID by 1 (#15) --- scripts/bigsur-nixbld-user-migration.sh | 2 +- scripts/install-darwin-multi-user.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/bigsur-nixbld-user-migration.sh b/scripts/bigsur-nixbld-user-migration.sh index bc42e02e6b23..57f65da7212b 100755 --- a/scripts/bigsur-nixbld-user-migration.sh +++ b/scripts/bigsur-nixbld-user-migration.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -((NEW_NIX_FIRST_BUILD_UID=350)) +((NEW_NIX_FIRST_BUILD_UID=351)) id_available(){ dscl . list /Users UniqueID | grep -E '\b'"$1"'\b' >/dev/null diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh index bd1a54ad8738..89c66b8f41cd 100644 --- a/scripts/install-darwin-multi-user.sh +++ b/scripts/install-darwin-multi-user.sh @@ -9,11 +9,11 @@ set -o pipefail # - we changed UID to 301 because Big Sur updates failed into recovery mode # we're targeting the 200-400 UID range for role users mentioned in the # usage note for sysadminctl -# - we changed UID to 350 because Sequoia now uses UIDs 300-304 for its own +# - we changed UID to 351 because Sequoia now uses UIDs 300-304 for its own # daemon users # - we changed GID to 350 alongside above just because it hides the nixbld # group from the Users & Groups settings panel :) -export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-350}" +export NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-351}" export NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-350}" export NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d" From 5dd6c4f062ec392b965a5fcfc18fd3f75e2a79cb Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 19 Aug 2024 13:21:44 +0200 Subject: [PATCH 05/77] libgit2, GitRepo: Write thin packfiles libgit2 didn't write thin ones, hence the patch. This should improve performance on systems with weak I/O in ~/.cache, especially in terms of operations per second, or where system calls are slower. (macOS, VMs?) --- packaging/dependencies.nix | 2 + .../libgit2-mempack-thin-packfile.patch | 282 ++++++++++++++++++ src/libfetchers/git-utils.cc | 57 ++++ src/libfetchers/git-utils.hh | 2 + 4 files changed, 343 insertions(+) create mode 100644 packaging/patches/libgit2-mempack-thin-packfile.patch diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 21c48e5cc829..74b5cbc05c6f 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -115,6 +115,8 @@ scope: { version = inputs.libgit2.lastModifiedDate; cmakeFlags = attrs.cmakeFlags or [] ++ [ "-DUSE_SSH=exec" ]; + patches = attrs.patches or [] + ++ [ ./patches/libgit2-mempack-thin-packfile.patch ]; }); busybox-sandbox-shell = pkgs.busybox-sandbox-shell or (pkgs.busybox.override { diff --git a/packaging/patches/libgit2-mempack-thin-packfile.patch b/packaging/patches/libgit2-mempack-thin-packfile.patch new file mode 100644 index 000000000000..fb74b1683136 --- /dev/null +++ b/packaging/patches/libgit2-mempack-thin-packfile.patch @@ -0,0 +1,282 @@ +commit 9bacade4a3ef4b6b26e2c02f549eef0e9eb9eaa2 +Author: Robert Hensing +Date: Sun Aug 18 20:20:36 2024 +0200 + + Add unoptimized git_mempack_write_thin_pack + +diff --git a/include/git2/sys/mempack.h b/include/git2/sys/mempack.h +index 17da590a3..3688bdd50 100644 +--- a/include/git2/sys/mempack.h ++++ b/include/git2/sys/mempack.h +@@ -44,6 +44,29 @@ GIT_BEGIN_DECL + */ + GIT_EXTERN(int) git_mempack_new(git_odb_backend **out); + ++/** ++ * Write a thin packfile with the objects in the memory store. ++ * ++ * A thin packfile is a packfile that does not contain its transitive closure of ++ * references. This is useful for efficiently distributing additions to a ++ * repository over the network, but also finds use in the efficient bulk ++ * addition of objects to a repository, locally. ++ * ++ * This operation performs the (shallow) insert operations into the ++ * `git_packbuilder`, but does not write the packfile to disk; ++ * see `git_packbuilder_write_buf`. ++ * ++ * It also does not reset the memory store; see `git_mempack_reset`. ++ * ++ * @note This function may or may not write trees and blobs that are not ++ * referenced by commits. Currently everything is written, but this ++ * behavior may change in the future as the packer is optimized. ++ * ++ * @param backend The mempack backend ++ * @param pb The packbuilder to use to write the packfile ++ */ ++GIT_EXTERN(int) git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb); ++ + /** + * Dump all the queued in-memory writes to a packfile. + * +diff --git a/src/libgit2/odb_mempack.c b/src/libgit2/odb_mempack.c +index 6f27f45f8..0b61e2b66 100644 +--- a/src/libgit2/odb_mempack.c ++++ b/src/libgit2/odb_mempack.c +@@ -132,6 +132,35 @@ cleanup: + return err; + } + ++int git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb) ++{ ++ struct memory_packer_db *db = (struct memory_packer_db *)backend; ++ const git_oid *oid; ++ size_t iter = 0; ++ int err = -1; ++ ++ /* TODO: Implement the recency heuristics. ++ For this it probably makes sense to only write what's referenced ++ through commits, an option I've carved out for you in the docs. ++ wrt heuristics: ask your favorite LLM to translate https://git-scm.com/docs/pack-heuristics/en ++ to actual normal reference documentation. */ ++ while (true) { ++ err = git_oidmap_iterate(NULL, db->objects, &iter, &oid); ++ if (err == GIT_ITEROVER) { ++ err = 0; ++ break; ++ } ++ if (err != 0) ++ return err; ++ ++ err = git_packbuilder_insert(pb, oid, NULL); ++ if (err != 0) ++ return err; ++ } ++ ++ return 0; ++} ++ + int git_mempack_dump( + git_buf *pack, + git_repository *repo, +diff --git a/tests/libgit2/mempack/thinpack.c b/tests/libgit2/mempack/thinpack.c +new file mode 100644 +index 000000000..604a4dda2 +--- /dev/null ++++ b/tests/libgit2/mempack/thinpack.c +@@ -0,0 +1,196 @@ ++#include "clar_libgit2.h" ++#include "git2/indexer.h" ++#include "git2/odb_backend.h" ++#include "git2/tree.h" ++#include "git2/types.h" ++#include "git2/sys/mempack.h" ++#include "git2/sys/odb_backend.h" ++#include "util.h" ++ ++static git_repository *_repo; ++static git_odb_backend * _mempack_backend; ++ ++void test_mempack_thinpack__initialize(void) ++{ ++ git_odb *odb; ++ ++ _repo = cl_git_sandbox_init_new("mempack_thinpack_repo"); ++ ++ cl_git_pass(git_mempack_new(&_mempack_backend)); ++ cl_git_pass(git_repository_odb(&odb, _repo)); ++ cl_git_pass(git_odb_add_backend(odb, _mempack_backend, 999)); ++ git_odb_free(odb); ++} ++ ++void _mempack_thinpack__cleanup(void) ++{ ++ cl_git_sandbox_cleanup(); ++} ++ ++/* ++ Generating a packfile for an unchanged repo works and produces an empty packfile. ++ Even if we allow this scenario to be detected, it shouldn't misbehave if the ++ application is unaware of it. ++*/ ++void test_mempack_thinpack__empty(void) ++{ ++ git_packbuilder *pb; ++ int version; ++ int n; ++ git_buf buf = GIT_BUF_INIT; ++ ++ git_packbuilder_new(&pb, _repo); ++ ++ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); ++ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); ++ cl_assert_in_range(12, buf.size, 1024 /* empty packfile is >0 bytes, but certainly not that big */); ++ cl_assert(buf.ptr[0] == 'P'); ++ cl_assert(buf.ptr[1] == 'A'); ++ cl_assert(buf.ptr[2] == 'C'); ++ cl_assert(buf.ptr[3] == 'K'); ++ version = (buf.ptr[4] << 24) | (buf.ptr[5] << 16) | (buf.ptr[6] << 8) | buf.ptr[7]; ++ /* Subject to change. https://git-scm.com/docs/pack-format: Git currently accepts version number 2 or 3 but generates version 2 only.*/ ++ cl_assert_equal_i(2, version); ++ n = (buf.ptr[8] << 24) | (buf.ptr[9] << 16) | (buf.ptr[10] << 8) | buf.ptr[11]; ++ cl_assert_equal_i(0, n); ++ git_buf_dispose(&buf); ++ ++ git_packbuilder_free(pb); ++} ++ ++#define LIT_LEN(x) x, sizeof(x) - 1 ++ ++/* ++ Check that git_mempack_write_thin_pack produces a thin packfile. ++*/ ++void test_mempack_thinpack__thin(void) ++{ ++ /* Outline: ++ - Create tree 1 ++ - Flush to packfile A ++ - Create tree 2 ++ - Flush to packfile B ++ ++ Tree 2 has a new blob and a reference to a blob from tree 1. ++ ++ Expectation: ++ - Packfile B is thin and does not contain the objects from packfile A ++ */ ++ ++ ++ git_oid oid_blob_1; ++ git_oid oid_blob_2; ++ git_oid oid_blob_3; ++ git_oid oid_tree_1; ++ git_oid oid_tree_2; ++ git_treebuilder *tb; ++ ++ git_packbuilder *pb; ++ git_buf buf = GIT_BUF_INIT; ++ git_indexer *indexer; ++ git_indexer_progress stats; ++ char pack_dir_path[1024]; ++ ++ char sbuf[1024]; ++ const char * repo_path; ++ const char * pack_name_1; ++ const char * pack_name_2; ++ git_str pack_path_1 = GIT_STR_INIT; ++ git_str pack_path_2 = GIT_STR_INIT; ++ git_odb_backend * pack_odb_backend_1; ++ git_odb_backend * pack_odb_backend_2; ++ ++ ++ cl_assert_in_range(0, snprintf(pack_dir_path, sizeof(pack_dir_path), "%s/objects/pack", git_repository_path(_repo)), sizeof(pack_dir_path)); ++ ++ /* Create tree 1 */ ++ ++ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); ++ cl_git_pass(git_blob_create_from_buffer(&oid_blob_2, _repo, LIT_LEN("thinpack blob 2"))); ++ ++ ++ cl_git_pass(git_treebuilder_new(&tb, _repo, NULL)); ++ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); ++ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob2", &oid_blob_2, GIT_FILEMODE_BLOB)); ++ cl_git_pass(git_treebuilder_write(&oid_tree_1, tb)); ++ ++ /* Flush */ ++ ++ cl_git_pass(git_packbuilder_new(&pb, _repo)); ++ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); ++ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); ++ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); ++ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); ++ cl_git_pass(git_indexer_commit(indexer, &stats)); ++ pack_name_1 = strdup(git_indexer_name(indexer)); ++ cl_assert(pack_name_1); ++ git_buf_dispose(&buf); ++ git_mempack_reset(_mempack_backend); ++ git_indexer_free(indexer); ++ git_packbuilder_free(pb); ++ ++ /* Create tree 2 */ ++ ++ cl_git_pass(git_treebuilder_clear(tb)); ++ /* blob 1 won't be used, but we add it anyway to test that just "declaring" an object doesn't ++ necessarily cause its inclusion in the next thin packfile. It must only be included if new. */ ++ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); ++ cl_git_pass(git_blob_create_from_buffer(&oid_blob_3, _repo, LIT_LEN("thinpack blob 3"))); ++ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); ++ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob3", &oid_blob_3, GIT_FILEMODE_BLOB)); ++ cl_git_pass(git_treebuilder_write(&oid_tree_2, tb)); ++ ++ /* Flush */ ++ ++ cl_git_pass(git_packbuilder_new(&pb, _repo)); ++ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); ++ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); ++ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); ++ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); ++ cl_git_pass(git_indexer_commit(indexer, &stats)); ++ pack_name_2 = strdup(git_indexer_name(indexer)); ++ cl_assert(pack_name_2); ++ git_buf_dispose(&buf); ++ git_mempack_reset(_mempack_backend); ++ git_indexer_free(indexer); ++ git_packbuilder_free(pb); ++ git_treebuilder_free(tb); ++ ++ /* Assertions */ ++ ++ assert(pack_name_1); ++ assert(pack_name_2); ++ ++ repo_path = git_repository_path(_repo); ++ ++ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_1); ++ git_str_joinpath(&pack_path_1, repo_path, sbuf); ++ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_2); ++ git_str_joinpath(&pack_path_2, repo_path, sbuf); ++ ++ /* If they're the same, something definitely went wrong. */ ++ cl_assert(strcmp(pack_name_1, pack_name_2) != 0); ++ ++ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_1, pack_path_1.ptr)); ++ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_1)); ++ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_2)); ++ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_3)); ++ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_1)); ++ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_2)); ++ ++ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_2, pack_path_2.ptr)); ++ /* blob 1 is already in the packfile 1, so packfile 2 must not include it, in order to be _thin_. */ ++ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_1)); ++ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_2)); ++ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_3)); ++ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_1)); ++ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_2)); ++ ++ pack_odb_backend_1->free(pack_odb_backend_1); ++ pack_odb_backend_2->free(pack_odb_backend_2); ++ free((void *)pack_name_1); ++ free((void *)pack_name_2); ++ git_str_dispose(&pack_path_1); ++ git_str_dispose(&pack_path_2); ++ ++} diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 114aa4ec078b..5306d8780b30 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -13,13 +13,17 @@ #include #include #include +#include #include +#include #include #include #include #include #include #include +#include +#include #include #include @@ -76,6 +80,9 @@ typedef std::unique_ptr> StatusLi typedef std::unique_ptr> Remote; typedef std::unique_ptr> GitConfig; typedef std::unique_ptr> ConfigIterator; +typedef std::unique_ptr> ObjectDb; +typedef std::unique_ptr> PackBuilder; +typedef std::unique_ptr> Indexer; // A helper to ensure that we don't leak objects returned by libgit2. template @@ -164,6 +171,11 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this /** Location of the repository on disk. */ std::filesystem::path path; Repository repo; + /** + * In-memory object store for efficient batched writing to packfiles. + * Owned by `repo`. + */ + git_odb_backend * mempack_backend; GitRepoImpl(std::filesystem::path _path, bool create, bool bare) : path(std::move(_path)) @@ -177,6 +189,16 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this if (git_repository_init(Setter(repo), path.string().c_str(), bare)) throw Error("creating Git repository '%s': %s", path, git_error_last()->message); } + git_odb * odb; + if (git_repository_odb(&odb, repo.get())) + throw Error("getting Git object database: %s", git_error_last()->message); + + // TODO: release mempack_backend? + if (git_mempack_new(&mempack_backend)) + throw Error("creating mempack backend: %s", git_error_last()->message); + + if (git_odb_add_backend(odb, mempack_backend, 999)) + throw Error("adding mempack backend to Git object database: %s", git_error_last()->message); } operator git_repository * () @@ -184,6 +206,39 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this return repo.get(); } + void flush() override { + git_buf buf = GIT_BUF_INIT; + try { + PackBuilder packBuilder; + git_packbuilder_new(Setter(packBuilder), *this); + git_mempack_write_thin_pack(mempack_backend, packBuilder.get()); + git_packbuilder_write_buf(&buf, packBuilder.get()); + + std::string repo_path = std::string(git_repository_path(repo.get())); + while (!repo_path.empty() && repo_path.back() == '/') + repo_path.pop_back(); + std::string pack_dir_path = repo_path + "/objects/pack"; + + // TODO: could the indexing be done in a separate thread? + Indexer indexer; + git_indexer_progress stats; + if (git_indexer_new(Setter(indexer), pack_dir_path.c_str(), 0, nullptr, nullptr)) + throw Error("creating git packfile indexer: %s", git_error_last()->message); + if (git_indexer_append(indexer.get(), buf.ptr, buf.size, &stats)) + throw Error("appending to git packfile index: %s", git_error_last()->message); + if (git_indexer_commit(indexer.get(), &stats)) + throw Error("committing git packfile index: %s", git_error_last()->message); + + if (git_mempack_reset(mempack_backend)) + throw Error("resetting git mempack backend: %s", git_error_last()->message); + + git_buf_dispose(&buf); + } catch (...) { + git_buf_dispose(&buf); + throw; + } + } + uint64_t getRevCount(const Hash & rev) override { std::unordered_set done; @@ -1008,6 +1063,8 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink auto [oid, _name] = popBuilder(); + repo->flush(); + return toHash(oid); } }; diff --git a/src/libfetchers/git-utils.hh b/src/libfetchers/git-utils.hh index 9152528680c7..65a598ce5f2e 100644 --- a/src/libfetchers/git-utils.hh +++ b/src/libfetchers/git-utils.hh @@ -80,6 +80,8 @@ struct GitRepo virtual ref getFileSystemObjectSink() = 0; + virtual void flush() = 0; + virtual void fetch( const std::string & url, const std::string & refspec, From d0f8a9236392ab41414dddbe7400da98fad9f62d Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 26 Aug 2024 11:38:40 +0200 Subject: [PATCH 06/77] Make tarball cache more interruptible --- src/libfetchers/git-utils.cc | 7 +++++++ src/libutil/unix/signals-impl.hh | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 5306d8780b30..fb5341599a6a 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -211,8 +211,12 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this try { PackBuilder packBuilder; git_packbuilder_new(Setter(packBuilder), *this); + checkInterrupt(); git_mempack_write_thin_pack(mempack_backend, packBuilder.get()); + checkInterrupt(); + // TODO make git_packbuilder_write_buf() interruptible git_packbuilder_write_buf(&buf, packBuilder.get()); + checkInterrupt(); std::string repo_path = std::string(git_repository_path(repo.get())); while (!repo_path.empty() && repo_path.back() == '/') @@ -224,8 +228,10 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this git_indexer_progress stats; if (git_indexer_new(Setter(indexer), pack_dir_path.c_str(), 0, nullptr, nullptr)) throw Error("creating git packfile indexer: %s", git_error_last()->message); + // TODO: feed buf in (fairly large) chunk to make this interruptible if (git_indexer_append(indexer.get(), buf.ptr, buf.size, &stats)) throw Error("appending to git packfile index: %s", git_error_last()->message); + checkInterrupt(); if (git_indexer_commit(indexer.get(), &stats)) throw Error("committing git packfile index: %s", git_error_last()->message); @@ -237,6 +243,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this git_buf_dispose(&buf); throw; } + checkInterrupt(); } uint64_t getRevCount(const Hash & rev) override diff --git a/src/libutil/unix/signals-impl.hh b/src/libutil/unix/signals-impl.hh index 7ac8c914d567..2193922be4cd 100644 --- a/src/libutil/unix/signals-impl.hh +++ b/src/libutil/unix/signals-impl.hh @@ -84,6 +84,12 @@ static inline bool getInterrupted() return unix::_isInterrupted; } +/** + * Throw `Interrupted` exception if the process has been interrupted. + * + * Call this in long-running loops and between slow operations to terminate + * them as needed. + */ void inline checkInterrupt() { using namespace unix; From 97ff2ed4555ce0761368046270fc4466550bfb0c Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 26 Aug 2024 11:39:14 +0200 Subject: [PATCH 07/77] Sync tarball cache within tarball cache Activity --- src/libfetchers/github.cc | 3 ++- src/libfetchers/tarball.cc | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 2e914164acd6..ecfd035fcc6f 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -261,11 +261,12 @@ struct GitArchiveInputScheme : InputScheme auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); + auto tree = parseSink->sync(); act.reset(); TarballInfo tarballInfo { - .treeHash = tarballCache->dereferenceSingletonDirectory(parseSink->sync()), + .treeHash = tarballCache->dereferenceSingletonDirectory(tree), .lastModified = lastModified }; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index dd4f3b78086e..a082b0078b6f 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -170,6 +170,7 @@ static DownloadTarballResult downloadTarball_( auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); + auto tree = parseSink->sync(); act.reset(); @@ -184,7 +185,7 @@ static DownloadTarballResult downloadTarball_( } else { infoAttrs.insert_or_assign("etag", res->etag); infoAttrs.insert_or_assign("treeHash", - tarballCache->dereferenceSingletonDirectory(parseSink->sync()).gitRev()); + tarballCache->dereferenceSingletonDirectory(tree).gitRev()); infoAttrs.insert_or_assign("lastModified", uint64_t(lastModified)); if (res->immutableUrl) infoAttrs.insert_or_assign("immutableUrl", *res->immutableUrl); From fb8d3ed1506d2c0e3065abaad9587e16dccfd1b9 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 26 Aug 2024 15:04:47 +0200 Subject: [PATCH 08/77] fixup: sync -> flush The latter is not used for memory synchronization things. --- src/libfetchers/git-utils.cc | 7 ++++++- src/libfetchers/git-utils.hh | 6 +++++- src/libfetchers/github.cc | 2 +- src/libfetchers/tarball.cc | 2 +- tests/unit/libfetchers/git-utils.cc | 2 +- 5 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index fb5341599a6a..59b4d424a192 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -170,6 +170,11 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this { /** Location of the repository on disk. */ std::filesystem::path path; + /** + * libgit2 repository. Note that new objects are not written to disk, + * because we are using a mempack backend. For writing to disk, see + * `flush()`, which is also called by `GitFileSystemObjectSink::sync()`. + */ Repository repo; /** * In-memory object store for efficient batched writing to packfiles. @@ -1064,7 +1069,7 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink git_tree_entry_filemode(entry)); } - Hash sync() override + Hash flush() override { updateBuilders({}); diff --git a/src/libfetchers/git-utils.hh b/src/libfetchers/git-utils.hh index 65a598ce5f2e..f45b5a50425d 100644 --- a/src/libfetchers/git-utils.hh +++ b/src/libfetchers/git-utils.hh @@ -7,12 +7,16 @@ namespace nix { namespace fetchers { struct PublicKey; } +/** + * A sink that writes into a Git repository. Note that nothing may be written + * until `flush()` is called. + */ struct GitFileSystemObjectSink : ExtendedFileSystemObjectSink { /** * Flush builder and return a final Git hash. */ - virtual Hash sync() = 0; + virtual Hash flush() = 0; }; struct GitRepo diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index ecfd035fcc6f..308cff33a461 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -261,7 +261,7 @@ struct GitArchiveInputScheme : InputScheme auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); - auto tree = parseSink->sync(); + auto tree = parseSink->flush(); act.reset(); diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index a082b0078b6f..aa5d61bc5a04 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -170,7 +170,7 @@ static DownloadTarballResult downloadTarball_( auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); - auto tree = parseSink->sync(); + auto tree = parseSink->flush(); act.reset(); diff --git a/tests/unit/libfetchers/git-utils.cc b/tests/unit/libfetchers/git-utils.cc index de5110cc39ef..0bf3076dca86 100644 --- a/tests/unit/libfetchers/git-utils.cc +++ b/tests/unit/libfetchers/git-utils.cc @@ -77,7 +77,7 @@ TEST_F(GitUtilsTest, sink_basic) // sink->createHardlink("foo-1.1/links/foo-2", CanonPath("foo-1.1/hello")); - auto result = repo->dereferenceSingletonDirectory(sink->sync()); + auto result = repo->dereferenceSingletonDirectory(sink->flush()); auto accessor = repo->getAccessor(result, false); auto entries = accessor->readDirectory(CanonPath::root); ASSERT_EQ(entries.size(), 5); From 57c48304bba7d1f92fb9acc8abc9e8db4450fc0c Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 26 Aug 2024 15:34:35 +0200 Subject: [PATCH 09/77] fixup: Release odb --- src/libfetchers/git-utils.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 59b4d424a192..70391a2878bb 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -194,15 +194,16 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this if (git_repository_init(Setter(repo), path.string().c_str(), bare)) throw Error("creating Git repository '%s': %s", path, git_error_last()->message); } - git_odb * odb; - if (git_repository_odb(&odb, repo.get())) + + ObjectDb odb; + if (git_repository_odb(Setter(odb), repo.get())) throw Error("getting Git object database: %s", git_error_last()->message); - // TODO: release mempack_backend? + // mempack_backend will be owned by the repository, so we are not expected to free it ourselves. if (git_mempack_new(&mempack_backend)) throw Error("creating mempack backend: %s", git_error_last()->message); - if (git_odb_add_backend(odb, mempack_backend, 999)) + if (git_odb_add_backend(odb.get(), mempack_backend, 999)) throw Error("adding mempack backend to Git object database: %s", git_error_last()->message); } From c1fe3546ed67babeecfb376315063f0642ce4bbd Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 28 Aug 2024 01:48:25 +0200 Subject: [PATCH 10/77] libgit2: Add libgit2-packbuilder-callback-interruptible.patch --- packaging/dependencies.nix | 18 +- ...2-packbuilder-callback-interruptible.patch | 930 ++++++++++++++++++ 2 files changed, 947 insertions(+), 1 deletion(-) create mode 100644 packaging/patches/libgit2-packbuilder-callback-interruptible.patch diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 74b5cbc05c6f..0182f29c0d05 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -115,8 +115,24 @@ scope: { version = inputs.libgit2.lastModifiedDate; cmakeFlags = attrs.cmakeFlags or [] ++ [ "-DUSE_SSH=exec" ]; + nativeBuildInputs = attrs.nativeBuildInputs or [] + ++ [ + # Needed for `git apply`; see `prePatch` + pkgs.buildPackages.gitMinimal + ]; + # Only `git apply` can handle git binary patches + prePatch = '' + patch() { + git apply + } + ''; patches = attrs.patches or [] - ++ [ ./patches/libgit2-mempack-thin-packfile.patch ]; + ++ [ + ./patches/libgit2-mempack-thin-packfile.patch + + # binary patch; see `prePatch` + ./patches/libgit2-packbuilder-callback-interruptible.patch + ]; }); busybox-sandbox-shell = pkgs.busybox-sandbox-shell or (pkgs.busybox.override { diff --git a/packaging/patches/libgit2-packbuilder-callback-interruptible.patch b/packaging/patches/libgit2-packbuilder-callback-interruptible.patch new file mode 100644 index 000000000000..c67822ff755e --- /dev/null +++ b/packaging/patches/libgit2-packbuilder-callback-interruptible.patch @@ -0,0 +1,930 @@ +commit e9823c5da4fa977c46bcb97167fbdd0d70adb5ff +Author: Robert Hensing +Date: Mon Aug 26 20:07:04 2024 +0200 + + Make packbuilder interruptible using progress callback + + Forward errors from packbuilder->progress_cb + + This allows the callback to terminate long-running operations when + the application is interrupted. + +diff --git a/include/git2/pack.h b/include/git2/pack.h +index 0f6bd2ab9..bee72a6c0 100644 +--- a/include/git2/pack.h ++++ b/include/git2/pack.h +@@ -247,6 +247,9 @@ typedef int GIT_CALLBACK(git_packbuilder_progress)( + * @param progress_cb Function to call with progress information during + * pack building. Be aware that this is called inline with pack building + * operations, so performance may be affected. ++ * When progress_cb returns an error, the pack building process will be ++ * aborted and the error will be returned from the invoked function. ++ * `pb` must then be freed. + * @param progress_cb_payload Payload for progress callback. + * @return 0 or an error code + */ +diff --git a/src/libgit2/pack-objects.c b/src/libgit2/pack-objects.c +index b2d80cba9..7c331c2d5 100644 +--- a/src/libgit2/pack-objects.c ++++ b/src/libgit2/pack-objects.c +@@ -932,6 +932,9 @@ static int report_delta_progress( + { + int ret; + ++ if (pb->failure) ++ return pb->failure; ++ + if (pb->progress_cb) { + uint64_t current_time = git_time_monotonic(); + uint64_t elapsed = current_time - pb->last_progress_report_time; +@@ -943,8 +946,10 @@ static int report_delta_progress( + GIT_PACKBUILDER_DELTAFICATION, + count, pb->nr_objects, pb->progress_cb_payload); + +- if (ret) ++ if (ret) { ++ pb->failure = ret; + return git_error_set_after_callback(ret); ++ } + } + } + +@@ -976,7 +981,10 @@ static int find_deltas(git_packbuilder *pb, git_pobject **list, + } + + pb->nr_deltified += 1; +- report_delta_progress(pb, pb->nr_deltified, false); ++ if ((error = report_delta_progress(pb, pb->nr_deltified, false)) < 0) { ++ GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); ++ goto on_error; ++ } + + po = *list++; + (*list_size)--; +@@ -1124,6 +1132,10 @@ struct thread_params { + size_t depth; + size_t working; + size_t data_ready; ++ ++ /* A pb->progress_cb can stop the packing process by returning an error. ++ When that happens, all threads observe the error and stop voluntarily. */ ++ bool stopped; + }; + + static void *threaded_find_deltas(void *arg) +@@ -1133,7 +1145,12 @@ static void *threaded_find_deltas(void *arg) + while (me->remaining) { + if (find_deltas(me->pb, me->list, &me->remaining, + me->window, me->depth) < 0) { +- ; /* TODO */ ++ me->stopped = true; ++ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); ++ me->working = false; ++ git_cond_signal(&me->pb->progress_cond); ++ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_unlock(me->pb) == 0, NULL); ++ return NULL; + } + + GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); +@@ -1175,8 +1192,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, + pb->nr_threads = git__online_cpus(); + + if (pb->nr_threads <= 1) { +- find_deltas(pb, list, &list_size, window, depth); +- return 0; ++ return find_deltas(pb, list, &list_size, window, depth); + } + + p = git__mallocarray(pb->nr_threads, sizeof(*p)); +@@ -1195,6 +1211,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, + p[i].depth = depth; + p[i].working = 1; + p[i].data_ready = 0; ++ p[i].stopped = 0; + + /* try to split chunks on "path" boundaries */ + while (sub_size && sub_size < list_size && +@@ -1262,7 +1279,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, + (!victim || victim->remaining < p[i].remaining)) + victim = &p[i]; + +- if (victim) { ++ if (victim && !target->stopped) { + sub_size = victim->remaining / 2; + list = victim->list + victim->list_size - sub_size; + while (sub_size && list[0]->hash && +@@ -1286,7 +1303,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, + } + target->list_size = sub_size; + target->remaining = sub_size; +- target->working = 1; ++ target->working = 1; /* even when target->stopped, so that we don't process this thread again */ + GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); + + if (git_mutex_lock(&target->mutex)) { +@@ -1299,7 +1316,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, + git_cond_signal(&target->cond); + git_mutex_unlock(&target->mutex); + +- if (!sub_size) { ++ if (target->stopped || !sub_size) { + git_thread_join(&target->thread, NULL); + git_cond_free(&target->cond); + git_mutex_free(&target->mutex); +@@ -1308,7 +1325,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, + } + + git__free(p); +- return 0; ++ return pb->failure; + } + + #else +@@ -1319,6 +1336,7 @@ int git_packbuilder__prepare(git_packbuilder *pb) + { + git_pobject **delta_list; + size_t i, n = 0; ++ int error; + + if (pb->nr_objects == 0 || pb->done) + return 0; /* nothing to do */ +@@ -1327,8 +1345,10 @@ int git_packbuilder__prepare(git_packbuilder *pb) + * Although we do not report progress during deltafication, we + * at least report that we are in the deltafication stage + */ +- if (pb->progress_cb) +- pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload); ++ if (pb->progress_cb) { ++ if ((error = pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload)) < 0) ++ return git_error_set_after_callback(error); ++ } + + delta_list = git__mallocarray(pb->nr_objects, sizeof(*delta_list)); + GIT_ERROR_CHECK_ALLOC(delta_list); +@@ -1345,31 +1365,33 @@ int git_packbuilder__prepare(git_packbuilder *pb) + + if (n > 1) { + git__tsort((void **)delta_list, n, type_size_sort); +- if (ll_find_deltas(pb, delta_list, n, ++ if ((error = ll_find_deltas(pb, delta_list, n, + GIT_PACK_WINDOW + 1, +- GIT_PACK_DEPTH) < 0) { ++ GIT_PACK_DEPTH)) < 0) { + git__free(delta_list); +- return -1; ++ return error; + } + } + +- report_delta_progress(pb, pb->nr_objects, true); ++ error = report_delta_progress(pb, pb->nr_objects, true); + + pb->done = true; + git__free(delta_list); +- return 0; ++ return error; + } + +-#define PREPARE_PACK if (git_packbuilder__prepare(pb) < 0) { return -1; } ++#define PREPARE_PACK error = git_packbuilder__prepare(pb); if (error < 0) { return error; } + + int git_packbuilder_foreach(git_packbuilder *pb, int (*cb)(void *buf, size_t size, void *payload), void *payload) + { ++ int error; + PREPARE_PACK; + return write_pack(pb, cb, payload); + } + + int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb) + { ++ int error; + PREPARE_PACK; + + return write_pack(pb, &write_pack_buf, buf); +diff --git a/src/libgit2/pack-objects.h b/src/libgit2/pack-objects.h +index bbc8b9430..380a28ebe 100644 +--- a/src/libgit2/pack-objects.h ++++ b/src/libgit2/pack-objects.h +@@ -100,6 +100,10 @@ struct git_packbuilder { + uint64_t last_progress_report_time; + + bool done; ++ ++ /* A non-zero error code in failure causes all threads to shut themselves ++ down. Some functions will return this error code. */ ++ volatile int failure; + }; + + int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb); +diff --git a/tests/libgit2/pack/cancel.c b/tests/libgit2/pack/cancel.c +new file mode 100644 +index 000000000..a0aa9716a +--- /dev/null ++++ b/tests/libgit2/pack/cancel.c +@@ -0,0 +1,240 @@ ++#include "clar_libgit2.h" ++#include "futils.h" ++#include "pack.h" ++#include "hash.h" ++#include "iterator.h" ++#include "vector.h" ++#include "posix.h" ++#include "hash.h" ++#include "pack-objects.h" ++ ++static git_repository *_repo; ++static git_revwalk *_revwalker; ++static git_packbuilder *_packbuilder; ++static git_indexer *_indexer; ++static git_vector _commits; ++static int _commits_is_initialized; ++static git_indexer_progress _stats; ++ ++extern bool git_disable_pack_keep_file_checks; ++ ++static void pack_packbuilder_init(const char *sandbox) { ++ _repo = cl_git_sandbox_init(sandbox); ++ /* cl_git_pass(p_chdir(sandbox)); */ ++ cl_git_pass(git_revwalk_new(&_revwalker, _repo)); ++ cl_git_pass(git_packbuilder_new(&_packbuilder, _repo)); ++ cl_git_pass(git_vector_init(&_commits, 0, NULL)); ++ _commits_is_initialized = 1; ++ memset(&_stats, 0, sizeof(_stats)); ++ p_fsync__cnt = 0; ++} ++ ++void test_pack_cancel__initialize(void) ++{ ++ pack_packbuilder_init("small.git"); ++} ++ ++void test_pack_cancel__cleanup(void) ++{ ++ git_oid *o; ++ unsigned int i; ++ ++ cl_git_pass(git_libgit2_opts(GIT_OPT_ENABLE_FSYNC_GITDIR, 0)); ++ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, false)); ++ ++ if (_commits_is_initialized) { ++ _commits_is_initialized = 0; ++ git_vector_foreach(&_commits, i, o) { ++ git__free(o); ++ } ++ git_vector_free(&_commits); ++ } ++ ++ git_packbuilder_free(_packbuilder); ++ _packbuilder = NULL; ++ ++ git_revwalk_free(_revwalker); ++ _revwalker = NULL; ++ ++ git_indexer_free(_indexer); ++ _indexer = NULL; ++ ++ /* cl_git_pass(p_chdir("..")); */ ++ cl_git_sandbox_cleanup(); ++ _repo = NULL; ++} ++ ++static int seed_packbuilder(void) ++{ ++ int error; ++ git_oid oid, *o; ++ unsigned int i; ++ ++ git_revwalk_sorting(_revwalker, GIT_SORT_TIME); ++ cl_git_pass(git_revwalk_push_ref(_revwalker, "HEAD")); ++ ++ while (git_revwalk_next(&oid, _revwalker) == 0) { ++ o = git__malloc(sizeof(git_oid)); ++ cl_assert(o != NULL); ++ git_oid_cpy(o, &oid); ++ cl_git_pass(git_vector_insert(&_commits, o)); ++ } ++ ++ git_vector_foreach(&_commits, i, o) { ++ if((error = git_packbuilder_insert(_packbuilder, o, NULL)) < 0) ++ return error; ++ } ++ ++ git_vector_foreach(&_commits, i, o) { ++ git_object *obj; ++ cl_git_pass(git_object_lookup(&obj, _repo, o, GIT_OBJECT_COMMIT)); ++ error = git_packbuilder_insert_tree(_packbuilder, ++ git_commit_tree_id((git_commit *)obj)); ++ git_object_free(obj); ++ if (error < 0) ++ return error; ++ } ++ ++ return 0; ++} ++ ++static int fail_stage; ++ ++static int packbuilder_cancel_after_n_calls_cb(int stage, uint32_t current, uint32_t total, void *payload) ++{ ++ ++ /* Force the callback to run again on the next opportunity regardless ++ of how fast we're running. */ ++ _packbuilder->last_progress_report_time = 0; ++ ++ if (stage == fail_stage) { ++ int *calls = (int *)payload; ++ int n = *calls; ++ /* Always decrement, including past zero. This way the error is only ++ triggered once, making sure it is picked up immediately. */ ++ --*calls; ++ if (n == 0) ++ return GIT_EUSER; ++ } ++ ++ return 0; ++} ++ ++static void test_cancel(int n) ++{ ++ ++ int calls_remaining = n; ++ int err; ++ git_buf buf = GIT_BUF_INIT; ++ ++ /* Switch to a small repository, so that `packbuilder_cancel_after_n_calls_cb` ++ can hack the time to call the callback on every opportunity. */ ++ ++ cl_git_pass(git_packbuilder_set_callbacks(_packbuilder, &packbuilder_cancel_after_n_calls_cb, &calls_remaining)); ++ err = seed_packbuilder(); ++ if (!err) ++ err = git_packbuilder_write_buf(&buf, _packbuilder); ++ ++ cl_assert_equal_i(GIT_EUSER, err); ++} ++void test_pack_cancel__cancel_after_add_0(void) ++{ ++ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; ++ test_cancel(0); ++} ++ ++void test_pack_cancel__cancel_after_add_1(void) ++{ ++ cl_skip(); ++ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; ++ test_cancel(1); ++} ++ ++void test_pack_cancel__cancel_after_delta_0(void) ++{ ++ fail_stage = GIT_PACKBUILDER_DELTAFICATION; ++ test_cancel(0); ++} ++ ++void test_pack_cancel__cancel_after_delta_1(void) ++{ ++ fail_stage = GIT_PACKBUILDER_DELTAFICATION; ++ test_cancel(1); ++} ++ ++void test_pack_cancel__cancel_after_delta_0_threaded(void) ++{ ++#ifdef GIT_THREADS ++ git_packbuilder_set_threads(_packbuilder, 8); ++ fail_stage = GIT_PACKBUILDER_DELTAFICATION; ++ test_cancel(0); ++#else ++ cl_skip(); ++#endif ++} ++ ++void test_pack_cancel__cancel_after_delta_1_threaded(void) ++{ ++#ifdef GIT_THREADS ++ git_packbuilder_set_threads(_packbuilder, 8); ++ fail_stage = GIT_PACKBUILDER_DELTAFICATION; ++ test_cancel(1); ++#else ++ cl_skip(); ++#endif ++} ++ ++static int foreach_cb(void *buf, size_t len, void *payload) ++{ ++ git_indexer *idx = (git_indexer *) payload; ++ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); ++ return 0; ++} ++ ++void test_pack_cancel__foreach(void) ++{ ++ git_indexer *idx; ++ ++ seed_packbuilder(); ++ ++#ifdef GIT_EXPERIMENTAL_SHA256 ++ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); ++#else ++ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); ++#endif ++ ++ cl_git_pass(git_packbuilder_foreach(_packbuilder, foreach_cb, idx)); ++ cl_git_pass(git_indexer_commit(idx, &_stats)); ++ git_indexer_free(idx); ++} ++ ++static int foreach_cancel_cb(void *buf, size_t len, void *payload) ++{ ++ git_indexer *idx = (git_indexer *)payload; ++ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); ++ return (_stats.total_objects > 2) ? -1111 : 0; ++} ++ ++void test_pack_cancel__foreach_with_cancel(void) ++{ ++ git_indexer *idx; ++ ++ seed_packbuilder(); ++ ++#ifdef GIT_EXPERIMENTAL_SHA256 ++ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); ++#else ++ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); ++#endif ++ ++ cl_git_fail_with( ++ git_packbuilder_foreach(_packbuilder, foreach_cancel_cb, idx), -1111); ++ git_indexer_free(idx); ++} ++ ++void test_pack_cancel__keep_file_check(void) ++{ ++ assert(!git_disable_pack_keep_file_checks); ++ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, true)); ++ assert(git_disable_pack_keep_file_checks); ++} +diff --git a/tests/resources/small.git/HEAD b/tests/resources/small.git/HEAD +new file mode 100644 +index 0000000000000000000000000000000000000000..cb089cd89a7d7686d284d8761201649346b5aa1c +GIT binary patch +literal 23 +ecmXR)O|w!cN=+-)&qz&7Db~+TEG|hc;sO9;xClW2 + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/config b/tests/resources/small.git/config +new file mode 100644 +index 0000000000000000000000000000000000000000..07d359d07cf1ed0c0074fdad71ffff5942f0adfa +GIT binary patch +literal 66 +zcmaz}&M!)h<>D+#Eyypk5{uv*03B5png9R* + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/description b/tests/resources/small.git/description +new file mode 100644 +index 0000000000000000000000000000000000000000..498b267a8c7812490d6479839c5577eaaec79d62 +GIT binary patch +literal 73 +zcmWH|%S+5nO;IRHEyyp$t+PQ$;d2LNXyJgRZve!Elw`VEGWs$&r??@ +Q$yWgB0LrH#Y0~2Y0PnOK(EtDd + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/applypatch-msg.sample b/tests/resources/small.git/hooks/applypatch-msg.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..dcbf8167fa503f96ff6a39c68409007eadc9b1f3 +GIT binary patch +literal 535 +zcmY+AX;Q;542A#a6e8^~FyI8r&I~hf2QJ{GO6(?HuvEG*+#R{4EI%zhfA8r{j%sh$ +zHE~E-UtQd8{bq4@*S%jq3@bmxwQDXGv#o!N`o3AHMw3xD)hy0#>&E&zzl%vRffomqo=v6>_2NRa#TwDdYvTVQyueO*15Nlo%=#DXgC0bhF3vTa`LQGaO9;jeD$OP?~ +za$G4Q{z+Q_{5V?5h;a-noM$P{<>Q~j4o7u%#P6^o^16{y*jU=-K8GYD_dUtdj4FSx +zSC0C!DvAnv%S!4dgk +XB^)11aoGMJPCqWs%IS0YSv(eBT&%T6 + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/commit-msg.sample b/tests/resources/small.git/hooks/commit-msg.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..f3780f92349638ebe32f6baf24c7c3027675d7c9 +GIT binary patch +literal 953 +zcmaJy@-{3h^^Cx;#d0zEA@DDc$nY4ez&|=%jTg@_HU*ub=!!y$xW09TSjlj +z(`I@QCsM`!9&80$I98wsQ8yK#)Orb<8re8FjkKh630D$QUDwi~(gkX=RunYm$rDjk +zlp%RUSnzA#6yjdG5?T?2DcYKp+v_lts0ljn&bh3J0bD5@N@1UKZ190O6ZeWr-BuZ^ +zWRebCX%(%=Xoj#(xYk1Cjtr!=tyBesf@m6}8zY6Ijbz9i9ziI_jG9MvR +zDH*e>^ga9IR?2wrSrAVm;eButj4Y>7(E2?b~jsu>& +zRKCJ7bp#19sqYh627wD%D9R$8=Ml$TNlumDypl~$jBu*G>5fIR^FB0h0Ex&TGZNr> +zL5hs1_K>taRb!|ThN9ns7^@4MXKP+6aGI_UK)T-M#rcP$;kN(Vcf#P)+5GzWa{l@J +z>-E{`$1iiNVYxq27}j;uo%;)r3kJI2xCFF~Ux;$Q%) +wjbk6JlDCM`jU&P+UVOvg`|iYl<7~9k>HHB4I;pdlQ=I-^$DrHaN$@lH1?P!0U;qFB + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/fsmonitor-watchman.sample b/tests/resources/small.git/hooks/fsmonitor-watchman.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..41184ebc318c159f51cd1ebe2290559805df89d8 +GIT binary patch +literal 4777 +zcmbtYYi}F368$Xwipg4lq(BeHMvzvH-4;n7DGJBPqq#tw3aed8+IU5-m)yvL>;Cqh +z8FFRGj$`9CA8aoJ?j^$%==FV``-=rhLcPW`McSytRm~mEO7_&_cAVZrf1fFy*ha@8oe%*-aBYE +zcjzZg>LOkgxuUr-XJnHyD;zmPnRaSc#!k_P*d_BttRdc+J6G7za5#+^Y1nkc2Oowk`ya47uUR3Feu?B(w;S{(VYzxh}q-=#zP@uxSx{wbyPUMFU;K(06)$o{07&3yI?q{GqMcQ1c_^M<0< +zF4acAV)Il-V(rCTC1(;bsZ*}bl8dmejAk~yb`B}!^0;g^(o9kGUfZfDOvyp@x4OQt +zSgWh6T|3eq;9MFs8-#z+FDM1h(IjRUP|``PxupgJ7CUHOH90gbgl^2~97`?_X{P)) +zB*$r1cDlF-%azKND}?Gv`2K8-9v5e`gQoft=j?T<&a13c^!wY_$D`5z-X1g?ty&6- +zQN50{8?bUk9AI->^W@~~nkOghHIC2YN+AXkLQG_2-{Pq3%{`3KUMeG$iIn%%^6*NYb +zn|_BdV#C)n4565VccX;uT8&z3vSi!HXGbUj2B!R +zdz~&#fk#L-&k$fLwo$4?>12g@AXOKFekuo#6EHB%gmpD?1eyh%N8s{2wGoTu +z*@6cEZ^ZW!FAF_|JL`NkV7k}0ow|-2jHwbgH0;c@Dq*o?@&c*HnGdyx6^su8Qk%2{ +z*ye(dxO*6-&>qn1+zw}tc6;=sOX{4WB=VqjTS^))y1jlX2Q;=e!qMmFA5lC$#;BxC +z=Y%tRpWxb+_uQAvAw7Q{HGV#R$xb&udLCzZ+HN?kTyB};1EJ8UlQ5!>5eGW@)RX0n +zkjj>EF!3=0Gl^8dzv$B^NMGRxJoqN4A`xq-@wCbrx*u2NmIJ1xZ%H +zh;{|4T3(!E9sY#Ni(wUJYs1MmIc9bl)(4Nl3_wD_BWB>i<1S(LX7m*{Q7PU$muMS* +zM!%0EZx-Vw=Zey;erC?SNxF;pY@^A%-krqzfLV2meBp1vWdyArFYn`DD19T)Hw(?n +z)}{NP(Lk(o*?gl#B@pP7^*r|=;PIDT4|F#{2Hzh-AL0Rv$6uT;n|WzE4=slK?on@(fZeGhRgQCu56qB +z{+n81Az96qnQjMY*-*r-KV*7;Z#4QuJRJJV$M^KdldiMhj?ImK6~FvwJ*L5a){QoM=L5TYHkGO1$UrO3`a>{?Opw|b +zG(#59NQ#jFL9v~vgOVkM@^^(^A}onOE))yWEwhIlk&{ZyseZ^O0b=w8&O=BK{k<5B +k^Q-B@eG}LeHrquz%(SVEp_N)VhYZikCW__82JXfD17`J9Qvd(} + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/pre-applypatch.sample b/tests/resources/small.git/hooks/pre-applypatch.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..625837e25f91421b8809a097f4a3103dd387ef31 +GIT binary patch +literal 481 +zcmY+ATTa6;5Jms9iouO45IBJXEg&Jm9@v1LPHMM_ZR|;#6tQh$71hSXq*MxP;V& +zj0cY7SCL=x4`a46sF)C>94Gk%=3q$W2s;j6iHtB2$R0%gix4oK@&T~=ALd_o*CKxt +I-`Pv{1Bpzc>;M1& + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/pre-commit.sample b/tests/resources/small.git/hooks/pre-commit.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..10b39b2e26981b8f87ea424e735ef87359066dbb +GIT binary patch +literal 1706 +zcmZuxU2ohr5PY_N#pZ0-F<{-v&v-X^RA+u>k}E$4d&uD7=g_fA8+pNNV=4s0|iD3p<=DTXClTS +zXV23tJ;ECmN@M0j@zUAKEYW@3bv!SeYZ8ZH`YQNTApFVNc;F|9r5p4TqGs=>8E?6y +zi|gY{iM#PG1nL?UE9YCnWTk72kgZPG*Usqw!~Qd3c?~@w2?%eg@~)+VlSs6N5Yf2^ +zz;owF#K#r^&KMq1A`oqVGFpD&-!Pv|Rc +zO3KSqA@h9nSc%bm`0)Amk6*J}@14J*1-219l%%7D!Pl}UK>|lVi0Dfgu2jN3WC!uL +z0ej??b2iSehVgdnWHmZV4kUo*QL#aiIp}U=9x)IXk}JJ7VQ;CI9Rtn5e0VcjbYcVt+`x5D+svCGD;Z5hm*E$jSEQZ%SQ(}oLgslTvrKK@9Qf#b!hajVFnp9@oIix;NcI9Wk +xjnh0ya!AWet{I7YpD;y6HXyzI*lfSvH=o6*7mJZPkuaYpm>vzZ`wyGEBtOQPo|pgt + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/pre-push.sample b/tests/resources/small.git/hooks/pre-push.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..02cbd80c287f959fe33975bb66c56293e3f5b396 +GIT binary patch +literal 1431 +zcmaJ>U60!~5PUX&#a1@z9B{IIZkjLT0t5kq9#8~D(I5{+8&J~9;#ndUk~-ZT`r|uG +z$#K$$J{TsKs*LP1}9!GoZ@4I4myMMG_di|of +z%?llx{O8TS-#^;(OioEmPy%kwWQBA1OMzV{hsQ8XFzS1k!~YQoLa5 +zhtP1fA$q6VmMbbAC_9)4I628k*O5J$NR19uHe4QYDK<==I~SQk)Nu%xQ~KH +z53w=!ke(FGb_PpnZfd*+hnXDTn;2*`u^~;?+5C~cn?bRka7NR%06%e6O91{MAgN6J +zmlO8{Biw4&wr&&(z4p3eln`E}XR9m9bNYZ7Ibrg(4yZIXrfgD7N*AFD7L3YSM#j}% +zo__rOS5fr;@8UM<6cl+cv_$YB$PQ&9dv($eM*))g!_cu!QcSh-mqE9i#QDZT)=o#` +z?8!RtE?w6p?GkGZ-6yt_p~5~4ecu|Sf^)6096%h*q-eNiEA1;Xwg)p~Q&iGSG7-IQ +z9aII&`ps$WOojFA`*bjGkFk|E@sHHuD}W^d`7YJ3YE^zrQnqR +zGoq?;YGKe)93o|_=^f%3U1KYZGPOXRRxK7w`UUbMMa3<86OmVH!EKP$8RCrn9mWX+ +zC?9yF!fRVLmud3hF<}x;;sR}f(*r}6Gap3fR6zLHR~kbMgD{98N`L+r&?3p~*0+FX +zcAL%j=(SO}xTJUTvA`&Lf`2mv4koPG9&|;2+68$XxiXKL@ma;l5d2^5Ba_rPh_DHI-u1#&_upttZXp;no03$20|NFiM +zK#D#xQ>!Z3JkX8T-LDVm!B5j7y_{;JDmmTTef+K1oIiPzeEr+Ai*<2PUgnG4^ZB>p +z_fkAvoR1emuf~ri^K$-px=4#D-vY9w& +z`bCv#2zVn=YnJyeNey(Y +zRh`9vtLw~A+5zsjp|W0Nsa|29Rm!B>OoG5a+vi;ari8O>KkU!KAWg_fa3btK2x*_@ +z0bEc7J;Ubghm}n9bOi(Sv_B66nQ7U)J7f0fO}8Wuf*uorcIgEG +zOHc|-V6+HlRhOP}?Cn?@5iwSl43abmBA^2lyL$+cpabCGVES+v^j^FO_}?FIp%En%Ll?Z*7*}TwrZyg5OSZ9rY-`aU~Mc-jjv{Ll)FLMgtB4ujktfQ`Xhqrka +zT=P!A;9w^;Z?PqpLwOLu=cj3L>TdUKw2;DMu)`oVkj}#bcDx4tYg=j%D`+i{W~fVM +zVmZ>W9VMyin9c-0KzI_;iZ-g|OyzuG`Yq%(%dvl;ifnVr0;jWE&S`z|rQu=!yHBBO +zx`OJ;oOQ(KKM<$(bC38o>pD0%|HA(E0TRw7qj$fJ_pRN+7Nm>dSC(gLg{(`t+5Z=?o+}wXU4tHy+&%F&aRhFebeEhR2R5|$#Ycbp^w@t +zTl%=f1t=w+WpJzF<|CE@?SCNAz)%9?w33lQ8vrHJqPfH9@}qs*QXOG71W=ylx;wOB +zcx!Bj^)Yy6WX$a^vBkBJ5CobqlaDx_B0c<3b+8)f84LCrt;e;qxc+7>VbwVK{skNv!wvBiTa^9Iu +zkwP;VK)jH$WJ{`MRwAA9fal!y0dtV;FWg8PTkWU>CwnqD>1ZX2B@;$DlX%C5MI+}{ +z9xQVnffR*~v2KAUj*hCdgul~`bk#mk`o>zk9)<2Uc8?hUZAEvd!`9em)~$Z)zev>w^8 +zyAgCP_$&Y)7HSQ84`xG}OeTavaEswwF|8Xpi5iZzZa@hCiv(J-%bfFC&)HLlO+Rhw +zG6g?9eL5&A!SuJnQ6}LxG%tU+@vZ`i+!+Rz6iYvsTdhnPo7lW{m-}{hya@viX4)XZ +zngaw+j;gloB#|UwI@8sOmQpc`h+bicQJnQIB5eifIMQNgD2+oai33m!34~xU|0Azj +zhu$8z+T5^;Pxx@d{N)pzOJLSa^e;aDf$W%N5XcOf!mGC9l9j$Ev2h6N+6ZQC+CJzl +zaM7?S!SrFLS2DASjj(h6y1WN3N?|bmqmyzm!&nLoE|`rKBOc_yDF$a#FsUn!IQf(t +zdC&Us(kQz*7mvH^j*^MC@>wTDb}g%~sx*ng#>{@lR=XG-Z5_ +z#<9*Oh0joMzt;nS)ObAp)347`D=}r-;nV!TbIq&xrGRGsF6fZg+!VkfUei@_&l-M& +zPqQ+Dw)RV}+)I8RuqAxa`Pv8e&!_gXS=e2-un>=Ktn}-;%lLZxaVn?Q>yZCb2R3Wk +z77zr%;Rq&h|2ncqyKYmFI0148JVY7Q$V5p=dWj+Qqpu%i|xp2C=WaOb2Wudn^h0EcD%$p9YVU1fnoRV9`(cy(vv6K>FXS!2jY>1GnU--7)4usH&K +zao*&P^@9~YmUe|ZdLW@C>H;!*Vt3>Nw4M*;=?j(TBD#O@XCv0|MEhA;z}kTFRv@`tPHhp=&Yh +zg%Zhg4i7o_k{a5i&f5;tZ==%}^Sn4aD_6%qs_XAuJt&EumdH4Yu`UjT<-+XHTuHss+b +YOmM2;hq8Egm*4=7_P9T{21QBYH*F=mfB*mh + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/prepare-commit-msg.sample b/tests/resources/small.git/hooks/prepare-commit-msg.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..b1970da1b6d3f42f00069fd17c325de72cda812e +GIT binary patch +literal 1702 +zcmb_cTW{Mo6n>t6#i?x6xmZ$SFLf{QfG*3r0L?Pg?px55l8$UTGO3bO;spKi{V3XX +z))weX0X>M9bNMcZ-6yG%>(n}JI2|25dr}WZBP@ih?JX^+@ +zu#5O48P>yRX(mfDIhYP)doc1&TADZa@ZGpusJ$6G+e$ZMcmC +zoOosDQPS}l{H?YPsq(4;0SGkATa9eeqAaDcjq8n2wALbFwU@2i@FAaRV!=uw-nwx1gKn2SvY +z>Ff>;2sg!+Hxfkwv1lsiii=p6WenF=5)6LZcQaZ=aS_}+-4Y&?!@HWh|<^gJ21!|T@+%On#w6azxPHV}XsRbe*w +zR_TZ2XEsQa1lPK~biYqg@0-RW@5J1@=<87cFzEUABdCoFH2CZo?}l(Z*!OFqUxo>K +z_d`l#4d9|H6;VPT{X?^{VJ>oL|D7K{BJwwqB>`YcPoGk+9hbvHnoQ{EM|kPgD_`wk +zKm4#2xu;-y`RAm!=L_BnLvJ8$AZm8@?)v<%vwvsw8AF2x6!mTT;c72A_~U9nIq0ST +zv)N0!I!^1p=g8-RQfx5)E_Mb_4I2vtQpI30XZ&t-9h5!Hn + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/hooks/push-to-checkout.sample b/tests/resources/small.git/hooks/push-to-checkout.sample +new file mode 100755 +index 0000000000000000000000000000000000000000..a80611e18896f212c390d845e49a3f6d5693b41d +GIT binary patch +literal 2840 +zcmai0U31$u5PXh)#YOS7cE^-rw@uolNhe9&aUS|HtvhX>G$45tVUYj>fRdF?|9kfU +zNR~aG=E)WbEbeyq7JTw}ZuHIE2kUtL<AoeCNptd-NM1aZLhESzC;I`+Ns +zfmNNjdAp^W8#Q*}l>CT7RB9F5(BbI8ly2l~+E};JW|>&d1)=epZ-8vm8ppkbEVn#R +zt30a5A-c(YQR8eM5%;|UAnO>rt!&@x@G@yp+92%w-}%(5P_+P&Wf_zb$f-Qrl5(7z +z2ah(bkE;!DK(&aAMuQ%1TS>ai?wSXCOCSj=_}8x4IbCx^$}9q)whwv)SBt| +zg#MX4;;Oau`m=MI9(^&zPbueY@~>3*ixX%mvR5m_1&nAg@ZKvY1E$O}&EtLiG;mhV +z1xhMIm~fGjmf_#{62f`y;09?I7M1W2tWQvz<}i9lR>OpQyUJi45_&*pQus&EkwY<> +zI|ZAx=*3i9a-)g)hXkvO7>UJ5MNgL(Z+-wpXVcgbSgpmFmbf1~DPA(OVGI&FNLeIE +zNH!_aiH$vsif$_j7=T2{cS(!DOI`~bn@)vSd-0d7xL=DF;UNP|tW}4ih>DvHtu9tY_pbJ6x(6E*hxgC +zzNDao%qlr-IE%YGbS4hF!n!on7#W3$bX-_hbZAaws^nHu#)Dx=WzdbJ>AKzAy@T$x +zSWE^x9+|TEHVEPyaPYa0DOChp?AeHSBBDbZNokQpAY{lE!7geZI=jV)G^2@l)&91Zb1+`T+oq9wWF +zRV~kGTGce0O~p^6mj{kT5kL(pv>r;Lvd7VDX*P>A^Th`$3cWO0L81p4Ysdo3ZP1(SrR-peEdTo;-@bkB((G +zPHYQXUL!@Q$e(OQ;R9r%@Afz+50I7>*^^c&&|E*r-jN)LH=pM4AqMwWxSv|nqjddE +Z4{_hwv8!W(T +zYw`X3V>TCdnSD1ru8&`j=2DIPbCT@SnIgUw>$+lEYP}+x8(BMYnr=iT3*ndq)xzaV +z>I+qjv}vC#8_9M+b1p#uNS0M0)q

8!3p_LRQ0MA3M`!2foxzRUjbFY@}O~(ki=S +zqscnq8cU*dY)D$$cqE}n)V0yIk>CNKHCrndOtSP*HbOb;nbwAHSb;R+gs^?^Dve%) +zoW}t(*D}$>O3ab0TS^-;J|u&sb-PkZzo#kn*#xYt(;FGuwzSb^g&RDiGcOz9TB;Hu`nJh)$W=C=XCSm2AY=$w3G3P-V#Oo+N*;#2 +z4ijJ-pBZ=;T(RTgp_HYrD!uW-dTMfkuqY5jwOy)~gM;#=P^i{!l7`pXTS^s(&^{RU +zydaw}OpS#^D1cXM8?FW+fh`t7D(g;yr6|}fdaNtZBx3hlK~IpkTu3!Qq%R+zAo#t}Bs8^3$vHD+-TGT@`F>H1Cc#WAVW;&$S6%fE2d6@kLS0g&ihIM{}0z +z8#XhD>b>3{(BH|Px7}&lJ4%y1v(CihZJx@8MPoGdl*BJGD;usf*iS7%;{Joe; +zNFuBa>*~o&qETDPo~u&~$FxE1xb^x&(CbE`Y3GfsibL2rl+L;>P6j&Y3U>K$mkp*6 +zd`Q{<^+^&;GskGjwD-%!boR&i-TCA9UOR|@=GYb5x#+dhd7fkaVIR^pol`Mv+rUbmZ43dVL6^S7g3{NsPiG$iy$5EDB% +z6KIgnb$H(n&t3e4E6d4V7w^B?JS}JkG)PM6+X3Co`SQs($O*AA+MG~{S7RJ=cy-l& +z>~%3y`tjfx2>uOutB_^s +ziwG=e=ch|FQ0IkN91US7rhdQkXhwwt$gU0WEVDjo=IPb+?6PC=s8}J*ua(Ms))`UL +fi$|vMHn?H_tSE3ettp-hLlsZCxaLX8(nU;bVRB;Ce6@s#eu2|WvLz>- +zvy(&>Gyfp@+BtKnpqWkKi^+v{4jn_pNw_zeuxETifiGO|)w}OANj2n2D^K=o3j6P6uOL70#cbA{uzWXDlk1wr9GV1X(2W{RuTvjXV +zCmd8u +zH%V`94=q3)Dk)PHNrnFC(T1)Om6f{Usj;u1R->&XoCYVK2V3ZlgZuF?N}1+33OER*x +z*9Z=L=zI8CN>A_^jYjt0F$psO$sL=38q5q|SG)qCN6{^>RFh5E&l5GZ$pEahnF&d+ +z5c>64t}uJPkf~_!VUj#&N%nC-gUMj%=@B=!V>&}xtj2%@-mOm#rQUSJ3(ccmc+fza +znZ#uxF>N?QN5UrIEd!5RgHEfW#;(nKYF+D<*rdshJ$X-z2OZ2X;)nn@KSVdVhaA?}@3;6gZxb4v +zozoWSr{{+!h}zGpumG3H`=AvWpm^9kW;J$Jp^Xl*?8ckr`fqN%c|Z;VC0|cM4vSrk +zH_O8Yvh85nvJp^;``wo8=z0f`FWg?`>gO#y1hjX1{}rTlg9rwIKia8eyGexA3GnuR +z`Rg~XZoW;0pA)vI8=p5!+6sIn#C^FCvR>ffv39h6SCNi9v);%WD;WZ`of_MgwyRWy +z-yY%n*Y>X89W-v4`Ff%bx$Vkn}$!Ay}rnY6F$m-Kg*KD_+;Lx#g4|^&N +I02NaX#p`nv=Kufz + +literal 0 +HcmV?d00001 + +diff --git a/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b b/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b +new file mode 100644 +index 0000000000000000000000000000000000000000..822bc151862ec3763cf2d3fa2372b93bbd3a4b65 +GIT binary patch +literal 30 +mcmb>0i}&W3IZ_@1U=^!a~EV1casc=c+{&un1qQN*i9hD|0|m(2n|iwp*q%W +z%N;b$hu%cM`$TMo*~EnC1BFP&Pfj~;jZVKXQ96s_PhV<-XAROi+@-v8dBLUa`!;GB +k^iXlEv8$>R)1G>9th&t3j;s7J{?^9n|7U^`%mXoWC24Q^m!3%@{ + +literal 0 +HcmV?d00001 + From 9cc550d65252d3ad822cc12496ef71482c47ff7e Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Sat, 31 Aug 2024 15:59:18 +0200 Subject: [PATCH 11/77] Don't refer to public keys as secret keys in error This constructor is used for public keys as well. --- src/libutil/signature/local-keys.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/signature/local-keys.cc b/src/libutil/signature/local-keys.cc index 858b036f550d..00c4543f2be0 100644 --- a/src/libutil/signature/local-keys.cc +++ b/src/libutil/signature/local-keys.cc @@ -22,7 +22,7 @@ Key::Key(std::string_view s) key = ss.payload; if (name == "" || key == "") - throw Error("secret key is corrupt"); + throw Error("key is corrupt"); key = base64Decode(key); } From a33cb8af5693af56dd69073dc5dddb4c6900ad7a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2024 17:28:11 +0200 Subject: [PATCH 12/77] Respect max-substitution-jobs again This broke in #11005. Any number of PathSubstitutionGoals would be woken up by a single build slot becoming available. If there are a lot of substitution goals active, this could lead to us running out of file descriptors (especially on macOS where the default limit is 256). --- src/libstore/build/substitution-goal.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index 0152f1808280..a26eea8201f3 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -183,7 +183,7 @@ Goal::Co PathSubstitutionGoal::tryToRun(StorePath subPath, nix::ref sub, /* Make sure that we are allowed to start a substitution. Note that even if maxSubstitutionJobs == 0, we still allow a substituter to run. This prevents infinite waiting. */ - if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) { + while (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) { worker.waitForBuildSlot(shared_from_this()); co_await Suspend{}; } From b7acd1c4145c7316085f2a12bfa26ef742ac6146 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2024 17:28:55 +0200 Subject: [PATCH 13/77] "unsigned" -> size_t Slight cleanup. --- src/libstore/build/worker.cc | 4 ++-- src/libstore/build/worker.hh | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index ab0ba67b521f..dbe86f43f6a3 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -184,13 +184,13 @@ void Worker::wakeUp(GoalPtr goal) } -unsigned Worker::getNrLocalBuilds() +size_t Worker::getNrLocalBuilds() { return nrLocalBuilds; } -unsigned Worker::getNrSubstitutions() +size_t Worker::getNrSubstitutions() { return nrSubstitutions; } diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh index 33a7bf015179..e083dbea6d1f 100644 --- a/src/libstore/build/worker.hh +++ b/src/libstore/build/worker.hh @@ -92,12 +92,12 @@ private: * Number of build slots occupied. This includes local builds but does not * include substitutions or remote builds via the build hook. */ - unsigned int nrLocalBuilds; + size_t nrLocalBuilds; /** * Number of substitution slots occupied. */ - unsigned int nrSubstitutions; + size_t nrSubstitutions; /** * Maps used to prevent multiple instantiations of a goal for the @@ -235,12 +235,12 @@ public: * Return the number of local build processes currently running (but not * remote builds via the build hook). */ - unsigned int getNrLocalBuilds(); + size_t getNrLocalBuilds(); /** * Return the number of substitution processes currently running. */ - unsigned int getNrSubstitutions(); + size_t getNrSubstitutions(); /** * Registers a running child process. `inBuildSlot` means that From 13100eaa4f5b8e1934e312ac9300bd0e84d450c2 Mon Sep 17 00:00:00 2001 From: Sandro Date: Mon, 2 Sep 2024 23:25:44 +0200 Subject: [PATCH 14/77] Fix link anchor --- src/libstore/globals.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index e5e7024cbe7e..be922c9f7418 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -1204,7 +1204,7 @@ public: If the user is trusted (see `trusted-users` option), when building a fixed-output derivation, environment variables set in this option - will be passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md##adv-attr-impureEnvVars). + will be passed to the builder if they are listed in [`impureEnvVars`](@docroot@/language/advanced-attributes.md#adv-attr-impureEnvVars). This option is useful for, e.g., setting `https_proxy` for fixed-output derivations and in a multi-user Nix installation, or From 4decd6f8b3bf11636f6710a3d8db396ba30fefe2 Mon Sep 17 00:00:00 2001 From: Jeremy Kerfs Date: Tue, 3 Sep 2024 03:36:00 -0500 Subject: [PATCH 15/77] add removal of ~root/.cache/nix to uninstall instructions (#11407) --- doc/manual/src/installation/uninstall.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/src/installation/uninstall.md b/doc/manual/src/installation/uninstall.md index 91fb90bc0fb0..bb21af24e32e 100644 --- a/doc/manual/src/installation/uninstall.md +++ b/doc/manual/src/installation/uninstall.md @@ -19,7 +19,7 @@ If you are on Linux with systemd: Remove files created by Nix: ```console -sudo rm -rf /etc/nix /etc/profile.d/nix.sh /etc/tmpfiles.d/nix-daemon.conf /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile +sudo rm -rf /etc/nix /etc/profile.d/nix.sh /etc/tmpfiles.d/nix-daemon.conf /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix ``` Remove build users and their group: From 02bb633a582b05991e77b65e866a06ef21b2482a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 3 Sep 2024 16:49:57 +0200 Subject: [PATCH 16/77] maintainers/upload-release.pl: Delete temporary directories when we're done --- maintainers/upload-release.pl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index 731988568585..8a470c7cc341 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -112,7 +112,7 @@ sub copyManual { system("xz -d < '$manualNar' | nix-store --restore $tmpDir/manual.tmp") == 0 or die "unable to unpack $manualNar\n"; rename("$tmpDir/manual.tmp/share/doc/nix/manual", "$tmpDir/manual") or die; - system("rm -rf '$tmpDir/manual.tmp'") == 0 or die; + File::Path::remove_tree("$tmpDir/manual.tmp", {safe => 1}); } system("aws s3 sync '$tmpDir/manual' s3://$releasesBucketName/$releaseDir/manual") == 0 @@ -281,3 +281,6 @@ sub downloadFile { system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die; system("git push --tags") == 0 or die; system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die if $isLatest; + +File::Path::remove_tree($narCache, {safe => 1}); +File::Path::remove_tree($tmpDir, {safe => 1}); From 46b31880458f0cbea2a45a42734d1ac76c9ea88f Mon Sep 17 00:00:00 2001 From: Parker Hoyes Date: Mon, 26 Aug 2024 13:50:22 +0000 Subject: [PATCH 17/77] Move daemon process into sub-cgroup The daemon process is now moved into a new sub-cgroup called nix-daemon when the daemon starts. This is necessary to abide by the no-processes-in-inner-nodes rule, because the service cgroup becomes an inner node when the child cgroups for the build are created (see LocalDerivationGoal::startBuilder()). See #9675 --- .../unix/build/local-derivation-goal.cc | 21 ++++++------ src/libutil/linux/cgroup.cc | 32 +++++++++++++++++++ src/libutil/linux/cgroup.hh | 9 ++++++ src/nix/unix/daemon.cc | 25 +++++++++++++++ 4 files changed, 75 insertions(+), 12 deletions(-) diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index 01a133766d99..d55278a52960 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -444,25 +444,22 @@ void LocalDerivationGoal::startBuilder() #if __linux__ experimentalFeatureSettings.require(Xp::Cgroups); + /* If we're running from the daemon, then this will return the + root cgroup of the service. Otherwise, it will return the + current cgroup. */ + auto rootCgroup = getRootCgroup(); auto cgroupFS = getCgroupFS(); if (!cgroupFS) throw Error("cannot determine the cgroups file system"); - - auto ourCgroups = getCgroups("/proc/self/cgroup"); - auto ourCgroup = ourCgroups[""]; - if (ourCgroup == "") - throw Error("cannot determine cgroup name from /proc/self/cgroup"); - - auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup); - - if (!pathExists(ourCgroupPath)) - throw Error("expected cgroup directory '%s'", ourCgroupPath); + auto rootCgroupPath = canonPath(*cgroupFS + "/" + rootCgroup); + if (!pathExists(rootCgroupPath)) + throw Error("expected cgroup directory '%s'", rootCgroupPath); static std::atomic counter{0}; cgroup = buildUser - ? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID()) - : fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++); + ? fmt("%s/nix-build-uid-%d", rootCgroupPath, buildUser->getUID()) + : fmt("%s/nix-build-pid-%d-%d", rootCgroupPath, getpid(), counter++); debug("using cgroup '%s'", *cgroup); diff --git a/src/libutil/linux/cgroup.cc b/src/libutil/linux/cgroup.cc index 140ff45668ab..706f0f159ab6 100644 --- a/src/libutil/linux/cgroup.cc +++ b/src/libutil/linux/cgroup.cc @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -144,4 +145,35 @@ CgroupStats destroyCgroup(const Path & cgroup) return destroyCgroup(cgroup, true); } +std::string getCurrentCgroup() +{ + auto cgroupFS = getCgroupFS(); + if (!cgroupFS) + throw Error("cannot determine the cgroups file system"); + + auto ourCgroups = getCgroups("/proc/self/cgroup"); + auto ourCgroup = ourCgroups[""]; + if (ourCgroup == "") + throw Error("cannot determine cgroup name from /proc/self/cgroup"); + return ourCgroup; +} + +static std::optional rootCgroup; +static std::mutex rootCgroupMutex; + +std::string getRootCgroup() +{ + { + std::lock_guard guard(rootCgroupMutex); + if (rootCgroup) + return *rootCgroup; + } + auto current = getCurrentCgroup(); + std::lock_guard guard(rootCgroupMutex); + if (rootCgroup) + return *rootCgroup; + rootCgroup = current; + return current; +} + } diff --git a/src/libutil/linux/cgroup.hh b/src/libutil/linux/cgroup.hh index 783a0ab87422..87d135ba6296 100644 --- a/src/libutil/linux/cgroup.hh +++ b/src/libutil/linux/cgroup.hh @@ -25,4 +25,13 @@ struct CgroupStats */ CgroupStats destroyCgroup(const Path & cgroup); +std::string getCurrentCgroup(); + +/** + * Get the cgroup that should be used as the parent when creating new + * sub-cgroups. The first time this is called, the current cgroup will be + * returned, and then all subsequent calls will return the original cgroup. + */ +std::string getRootCgroup(); + } diff --git a/src/nix/unix/daemon.cc b/src/nix/unix/daemon.cc index 66d8dbcf0926..746963a01039 100644 --- a/src/nix/unix/daemon.cc +++ b/src/nix/unix/daemon.cc @@ -33,6 +33,10 @@ #include #include +#if __linux__ +#include "cgroup.hh" +#endif + #if __APPLE__ || __FreeBSD__ #include #endif @@ -312,6 +316,27 @@ static void daemonLoop(std::optional forceTrustClientOpt) // Get rid of children automatically; don't let them become zombies. setSigChldAction(true); + #if __linux__ + if (settings.useCgroups) { + experimentalFeatureSettings.require(Xp::Cgroups); + + // This also sets the root cgroup to the current one. + auto rootCgroup = getRootCgroup(); + auto cgroupFS = getCgroupFS(); + if (!cgroupFS) + throw Error("cannot determine the cgroups file system"); + auto rootCgroupPath = canonPath(*cgroupFS + "/" + rootCgroup); + if (!pathExists(rootCgroupPath)) + throw Error("expected cgroup directory '%s'", rootCgroupPath); + auto daemonCgroupPath = rootCgroupPath + "/nix-daemon"; + // Create new sub-cgroup for the daemon. + if (mkdir(daemonCgroupPath.c_str(), 0755) != 0 && errno != EEXIST) + throw SysError("creating cgroup '%s'", daemonCgroupPath); + // Move daemon into the new cgroup. + writeFile(daemonCgroupPath + "/cgroup.procs", fmt("%d", getpid())); + } + #endif + // Loop accepting connections. while (1) { From 4c88deef38678154f3043ca2e0f4bd84a3c6cbc5 Mon Sep 17 00:00:00 2001 From: Parker Hoyes Date: Tue, 3 Sep 2024 17:27:56 +0000 Subject: [PATCH 18/77] Add tests for daemon with cgroups --- tests/nixos/cgroups/default.nix | 40 +++++++++++++++++++++++++++++++++ tests/nixos/cgroups/hang.nix | 10 +++++++++ tests/nixos/default.nix | 2 ++ 3 files changed, 52 insertions(+) create mode 100644 tests/nixos/cgroups/default.nix create mode 100644 tests/nixos/cgroups/hang.nix diff --git a/tests/nixos/cgroups/default.nix b/tests/nixos/cgroups/default.nix new file mode 100644 index 000000000000..b8febbf4bdad --- /dev/null +++ b/tests/nixos/cgroups/default.nix @@ -0,0 +1,40 @@ +{ nixpkgs, ... }: + +{ + name = "cgroups"; + + nodes = + { + host = + { config, pkgs, ... }: + { virtualisation.additionalPaths = [ pkgs.stdenvNoCC ]; + nix.extraOptions = + '' + extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-system-features = uid-range + ''; + nix.settings.use-cgroups = true; + nix.nixPath = [ "nixpkgs=${nixpkgs}" ]; + }; + }; + + testScript = { nodes }: '' + start_all() + + host.wait_for_unit("multi-user.target") + + # Start build in background + host.execute("NIX_REMOTE=daemon nix build --auto-allocate-uids --file ${./hang.nix} >&2 &") + service = "/sys/fs/cgroup/system.slice/nix-daemon.service" + + # Wait for cgroups to be created + host.succeed(f"until [ -e {service}/nix-daemon ]; do sleep 1; done", timeout=30) + host.succeed(f"until [ -e {service}/nix-build-uid-* ]; do sleep 1; done", timeout=30) + + # Check that there aren't processes where there shouldn't be, and that there are where there should be + host.succeed(f'[ -z "$(cat {service}/cgroup.procs)" ]') + host.succeed(f'[ -n "$(cat {service}/nix-daemon/cgroup.procs)" ]') + host.succeed(f'[ -n "$(cat {service}/nix-build-uid-*/cgroup.procs)" ]') + ''; + +} diff --git a/tests/nixos/cgroups/hang.nix b/tests/nixos/cgroups/hang.nix new file mode 100644 index 000000000000..cefe2d031c07 --- /dev/null +++ b/tests/nixos/cgroups/hang.nix @@ -0,0 +1,10 @@ +{ }: + +with import {}; + +runCommand "hang" + { requiredSystemFeatures = "uid-range"; + } + '' + sleep infinity + '' diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 40d29b371282..62fc6b10f912 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -157,4 +157,6 @@ in s3-binary-cache-store = runNixOSTestFor "x86_64-linux" ./s3-binary-cache-store.nix; fsync = runNixOSTestFor "x86_64-linux" ./fsync.nix; + + cgroups = runNixOSTestFor "x86_64-linux" ./cgroups; } From 62a99049c40be4971220f4115f00943f5e299b07 Mon Sep 17 00:00:00 2001 From: Parker Hoyes Date: Tue, 3 Sep 2024 19:07:18 +0000 Subject: [PATCH 19/77] Enable cgroups delegation for systemd --- misc/systemd/nix-daemon.service.in | 1 + 1 file changed, 1 insertion(+) diff --git a/misc/systemd/nix-daemon.service.in b/misc/systemd/nix-daemon.service.in index 45fbea02c479..b3055cfe2929 100644 --- a/misc/systemd/nix-daemon.service.in +++ b/misc/systemd/nix-daemon.service.in @@ -11,6 +11,7 @@ ExecStart=@@bindir@/nix-daemon nix-daemon --daemon KillMode=process LimitNOFILE=1048576 TasksMax=1048576 +Delegate=yes [Install] WantedBy=multi-user.target From 9e79061bac9d60cc2704964dc6fcc736c91173b2 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Tue, 3 Sep 2024 13:27:11 -0700 Subject: [PATCH 20/77] fixup: use the real bindir for systemd unit's bindir Prior to this commit, the unit contained this line: ExecStart=@share/nix-daemon nix-daemon --daemon which caused systemd to complain: Failed to restart nix-daemon.service: Unit nix-daemon.service has a bad unit file setting. See system logs and 'systemctl status nix-daemon.service' for details. and had this in the unit output: Sep 03 13:34:59 scadrial systemd[1]: /etc/systemd/system/nix-daemon.service:10: Neither a valid executable name nor an absolute path: share/nix-daemon Sep 03 13:34:59 scadrial systemd[1]: nix-daemon.service: Unit configuration has fatal error, unit will not be started. (Notice how it's trying to execute `share/nix-daemon`, which is unlikely to exist.) Now with this commit, the path to the daemon binary is properly set: ExecStart=@/nix/store/lcbx6d8gzznf3z3c8lsv9jy3j6c67x6r-nix-2.25.0pre20240903_dirty/bin/nix-daemon nix-daemon --daemon --- misc/systemd/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/systemd/meson.build b/misc/systemd/meson.build index 58b30f30bff1..6ccb6a873fd3 100644 --- a/misc/systemd/meson.build +++ b/misc/systemd/meson.build @@ -8,7 +8,7 @@ foreach config : [ 'nix-daemon.socket', 'nix-daemon.service' ] configuration : { 'storedir' : store_dir, 'localstatedir' : localstatedir, - 'bindir' : get_option('datadir'), + 'bindir' : bindir, }, ) endforeach From 9d24080090539c717015add8f2d8ce02d1d84a2d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Sep 2024 14:43:43 +0200 Subject: [PATCH 21/77] Git fetcher: Ignore .gitmodules entries that are not submodules Fixes #10739. --- src/libfetchers/git-utils.cc | 18 ++++++++++++------ tests/functional/fetchGitSubmodules.sh | 21 +++++++++++++++++++++ 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 114aa4ec078b..0bc930ab28e3 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -601,12 +601,16 @@ struct GitSourceAccessor : SourceAccessor return readBlob(path, true); } - Hash getSubmoduleRev(const CanonPath & path) + /** + * If `path` exists and is a submodule, return its + * revision. Otherwise return nothing. + */ + std::optional getSubmoduleRev(const CanonPath & path) { - auto entry = need(path); + auto entry = lookup(path); - if (git_tree_entry_type(entry) != GIT_OBJECT_COMMIT) - throw Error("'%s' is not a submodule", showPath(path)); + if (!entry || git_tree_entry_type(entry) != GIT_OBJECT_COMMIT) + return std::nullopt; return toHash(*git_tree_entry_id(entry)); } @@ -1074,8 +1078,10 @@ std::vector> GitRepoImpl::getSubmodules auto rawAccessor = getRawAccessor(rev); for (auto & submodule : parseSubmodules(pathTemp)) { - auto rev = rawAccessor->getSubmoduleRev(submodule.path); - result.push_back({std::move(submodule), rev}); + /* Filter out .gitmodules entries that don't exist or are not + submodules. */ + if (auto rev = rawAccessor->getSubmoduleRev(submodule.path)) + result.push_back({std::move(submodule), *rev}); } return result; diff --git a/tests/functional/fetchGitSubmodules.sh b/tests/functional/fetchGitSubmodules.sh index 4a3e4c347ecc..cd3b51674cf3 100755 --- a/tests/functional/fetchGitSubmodules.sh +++ b/tests/functional/fetchGitSubmodules.sh @@ -104,6 +104,27 @@ noSubmoduleRepo=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$subR [[ $noSubmoduleRepoBaseline == $noSubmoduleRepo ]] +# Test .gitmodules with entries that refer to non-existent objects or objects that are not submodules. +cat >> $rootRepo/.gitmodules < $rootRepo/file +git -C $rootRepo add file +git -C $rootRepo commit -a -m "Add bad submodules" + +rev=$(git -C $rootRepo rev-parse HEAD) + +r=$(nix eval --raw --expr "builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; submodules = true; }") + +[[ -f $r/file ]] +[[ ! -e $r/missing ]] + # Test relative submodule URLs. rm $TEST_HOME/.cache/nix/fetcher-cache* rm -rf $rootRepo/.git $rootRepo/.gitmodules $rootRepo/sub From 46f65058655550bff87cf547954f7ca5622d1b2d Mon Sep 17 00:00:00 2001 From: Jeremy Kolb Date: Wed, 4 Sep 2024 10:14:51 -0400 Subject: [PATCH 22/77] Pull fut.get() out of the lock This is https://gerrit.lix.systems/c/lix/+/1462 by @jade_ see: https://git.lix.systems/lix-project/lix/issues/366 see: https://gerrit.lix.systems/c/lix/+/1462 --- src/libstore/store-api.cc | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 8eef340ccb44..fc03133f8add 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -822,14 +822,25 @@ StorePathSet Store::queryValidPaths(const StorePathSet & paths, SubstituteFlag m auto doQuery = [&](const StorePath & path) { checkInterrupt(); queryPathInfo(path, {[path, &state_, &wakeup](std::future> fut) { - auto state(state_.lock()); + bool exists = false; + std::exception_ptr newExc{}; + try { auto info = fut.get(); - state->valid.insert(path); + exists = true; } catch (InvalidPath &) { } catch (...) { - state->exc = std::current_exception(); + newExc = std::current_exception(); } + + auto state(state_.lock()); + + if (exists) + state->valid.insert(path); + + if (newExc != nullptr) + state->exc = newExc; + assert(state->left); if (!--state->left) wakeup.notify_one(); From 8152c5c828e845bf39334318c65297cf3dfbf518 Mon Sep 17 00:00:00 2001 From: Jeremy Kolb Date: Wed, 4 Sep 2024 12:55:32 -0400 Subject: [PATCH 23/77] Remote nullptr Co-authored-by: Eelco Dolstra --- src/libstore/store-api.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index fc03133f8add..fc8d51c398fd 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -838,7 +838,7 @@ StorePathSet Store::queryValidPaths(const StorePathSet & paths, SubstituteFlag m if (exists) state->valid.insert(path); - if (newExc != nullptr) + if (newExc) state->exc = newExc; assert(state->left); From 03484641a1c712b6d10f636e7dbb5280c1668c4b Mon Sep 17 00:00:00 2001 From: Parker Hoyes Date: Wed, 4 Sep 2024 18:11:16 +0000 Subject: [PATCH 24/77] Simplify getRootCgroup() Static local initializers are atomic in C++. --- src/libutil/linux/cgroup.cc | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/src/libutil/linux/cgroup.cc b/src/libutil/linux/cgroup.cc index 706f0f159ab6..ad3e8a0172f3 100644 --- a/src/libutil/linux/cgroup.cc +++ b/src/libutil/linux/cgroup.cc @@ -6,7 +6,6 @@ #include #include -#include #include #include #include @@ -158,22 +157,10 @@ std::string getCurrentCgroup() return ourCgroup; } -static std::optional rootCgroup; -static std::mutex rootCgroupMutex; - std::string getRootCgroup() { - { - std::lock_guard guard(rootCgroupMutex); - if (rootCgroup) - return *rootCgroup; - } - auto current = getCurrentCgroup(); - std::lock_guard guard(rootCgroupMutex); - if (rootCgroup) - return *rootCgroup; - rootCgroup = current; - return current; + static std::string rootCgroup = getCurrentCgroup(); + return rootCgroup; } } From bd6ae2f3b929e8bf280692f3cae36bce7f688b4f Mon Sep 17 00:00:00 2001 From: Parker Hoyes Date: Wed, 4 Sep 2024 19:10:31 +0000 Subject: [PATCH 25/77] Use getCurrentCgroup() in getMaxCPU() --- src/libutil/current-process.cc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/libutil/current-process.cc b/src/libutil/current-process.cc index 0bc46d7464cc..ed1c1ca6ccfe 100644 --- a/src/libutil/current-process.cc +++ b/src/libutil/current-process.cc @@ -32,11 +32,7 @@ unsigned int getMaxCPU() auto cgroupFS = getCgroupFS(); if (!cgroupFS) return 0; - auto cgroups = getCgroups("/proc/self/cgroup"); - auto cgroup = cgroups[""]; - if (cgroup == "") return 0; - - auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max"; + auto cpuFile = *cgroupFS + "/" + getCurrentCgroup() + "/cpu.max"; auto cpuMax = readFile(cpuFile); auto cpuMaxParts = tokenizeString>(cpuMax, " \n"); From 495d32e1b8e5d5143f048d1be755a96bea822b19 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Sep 2024 21:43:59 +0200 Subject: [PATCH 26/77] NAR parser: Fix check for duplicate / incorrectly sorted entries "prevName" was always empty because it was declared in the wrong scope. --- src/libutil/archive.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 9ed65be6a668..e4a6a3181f2a 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -214,11 +214,13 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath else if (t == "directory") { sink.createDirectory(path); + std::string prevName; + while (1) { s = getString(); if (s == "entry") { - std::string name, prevName; + std::string name; s = getString(); if (s != "(") throw badArchive("expected open tag"); From ef3d3c568248ce5f2b2b7ff1bebb837d22b7df65 Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Wed, 4 Sep 2024 20:19:39 -0500 Subject: [PATCH 27/77] use existing GID in sequoia migration script I hardcoded the wrong GID (30001 instead of 30000), but it's better to just pick up the GID from the existing group. --- scripts/sequoia-nixbld-user-migration.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/scripts/sequoia-nixbld-user-migration.sh b/scripts/sequoia-nixbld-user-migration.sh index 644249192ddf..ecf19fc870ba 100755 --- a/scripts/sequoia-nixbld-user-migration.sh +++ b/scripts/sequoia-nixbld-user-migration.sh @@ -17,18 +17,23 @@ any_nixbld(){ dscl . list /Users UniqueID | grep -E '\b_nixbld' >/dev/null } +dsclattr() { + dscl . -read "$1" | awk "/$2/ { print \$2 }" +} + re_create_nixbld_user(){ local name uid name="$1" uid="$2" + gid="$3" sudo /usr/bin/dscl . -create "/Users/$name" "UniqueID" "$uid" sudo /usr/bin/dscl . -create "/Users/$name" "IsHidden" "1" sudo /usr/bin/dscl . -create "/Users/$name" "NFSHomeDirectory" "/var/empty" sudo /usr/bin/dscl . -create "/Users/$name" "RealName" "Nix build user $name" sudo /usr/bin/dscl . -create "/Users/$name" "UserShell" "/sbin/nologin" - sudo /usr/bin/dscl . -create "/Users/$name" "PrimaryGroupID" "30001" + sudo /usr/bin/dscl . -create "/Users/$name" "PrimaryGroupID" "$gid" } hit_id_cap(){ @@ -68,11 +73,12 @@ temporarily_move_existing_nixbld_uids(){ } change_nixbld_uids(){ - local name next_id user_n + local existing_gid name next_id user_n ((next_id=NEW_NIX_FIRST_BUILD_UID)) ((user_n=1)) name="$(nix_user_n "$user_n")" + existing_gid="$(dsclattr "/Groups/nixbld" "PrimaryGroupID")" # we know that we have *some* nixbld users, but macOS may have # already clobbered the first few users if this system has been @@ -91,7 +97,7 @@ change_nixbld_uids(){ fi done - re_create_nixbld_user "$name" "$next_id" + re_create_nixbld_user "$name" "$next_id" "$existing_gid" echo " $name was missing; created with uid: $next_id" ((user_n++)) From 38d9d536a884db28edb4f31a415921029c93faae Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Thu, 5 Sep 2024 03:28:06 +0200 Subject: [PATCH 28/77] docs: small fixups on the default expression - highlighted example - linked definitions to the glossary (this is a shorter read) - fixed some artefact --- .../files/default-nix-expression.md | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/doc/manual/src/command-ref/files/default-nix-expression.md b/doc/manual/src/command-ref/files/default-nix-expression.md index 620f7035c581..2bd45ff5debd 100644 --- a/doc/manual/src/command-ref/files/default-nix-expression.md +++ b/doc/manual/src/command-ref/files/default-nix-expression.md @@ -1,6 +1,6 @@ ## Default Nix expression -The source for the default [Nix expressions](@docroot@/language/index.md) used by [`nix-env`]: +The source for the [Nix expressions](@docroot@/glossary.md#gloss-nix-expression) used by [`nix-env`] by default: - `~/.nix-defexpr` - `$XDG_STATE_HOME/nix/defexpr` if [`use-xdg-base-directories`] is set to `true`. @@ -18,24 +18,25 @@ Then, the resulting expression is interpreted like this: - If the expression is an attribute set, it is used as the default Nix expression. - If the expression is a function, an empty set is passed as argument and the return value is used as the default Nix expression. - -For example, if the default expression contains two files, `foo.nix` and `bar.nix`, then the default Nix expression will be equivalent to - -```nix -{ - foo = import ~/.nix-defexpr/foo.nix; - bar = import ~/.nix-defexpr/bar.nix; -} -``` +> **Example** +> +> If the default expression contains two files, `foo.nix` and `bar.nix`, then the default Nix expression will be equivalent to +> +> ```nix +> { +> foo = import ~/.nix-defexpr/foo.nix; +> bar = import ~/.nix-defexpr/bar.nix; +> } +> ``` The file [`manifest.nix`](@docroot@/command-ref/files/manifest.nix.md) is always ignored. -The command [`nix-channel`] places a symlink to the user's current [channels profile](@docroot@/command-ref/files/channels.md) in this directory. +The command [`nix-channel`] places a symlink to the current user's [channels] in this directory, the [user channel link](#user-channel-link). This makes all subscribed channels available as attributes in the default expression. ## User channel link -A symlink that ensures that [`nix-env`] can find your channels: +A symlink that ensures that [`nix-env`] can find the current user's [channels]: - `~/.nix-defexpr/channels` - `$XDG_STATE_HOME/defexpr/channels` if [`use-xdg-base-directories`] is set to `true`. @@ -45,8 +46,9 @@ This symlink points to: - `$XDG_STATE_HOME/profiles/channels` for regular users - `$NIX_STATE_DIR/profiles/per-user/root/channels` for `root` -In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, which links to the channels of the root user.[`nix-env`]: ../nix-env.md +In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, which links to the channels of the root user. -[`nix-env`]: @docroot@/command-ref/nix-env.md [`nix-channel`]: @docroot@/command-ref/nix-channel.md +[`nix-env`]: @docroot@/command-ref/nix-env.md [`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories +[channels]: @docroot@/command-ref/files/channels.md From 48249e001a7c1bc9fac7a94a997566164944b17c Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Thu, 5 Sep 2024 04:13:43 +0200 Subject: [PATCH 29/77] docs: reword glossary entry on Nix expression this makes it less cumbersome to read and puts the statements in meaningful order. --- doc/manual/src/glossary.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/doc/manual/src/glossary.md b/doc/manual/src/glossary.md index 877c4668bfe1..808ef477c3c6 100644 --- a/doc/manual/src/glossary.md +++ b/doc/manual/src/glossary.md @@ -182,13 +182,18 @@ - [Nix expression]{#gloss-nix-expression} - 1. Commonly, a high-level description of software packages and compositions - thereof. Deploying software using Nix entails writing Nix - expressions for your packages. Nix expressions specify [derivations][derivation], - which are [instantiated][instantiate] into the Nix store as [store derivations][store derivation]. - These derivations can then be [realised][realise] to produce [outputs][output]. + A syntactically valid use of the [Nix language]. - 2. A syntactically valid use of the [Nix language]. For example, the contents of a `.nix` file form an expression. + > **Example** + > + > The contents of a `.nix` file form a Nix expression. + + Nix expressions specify [derivations][derivation], which are [instantiated][instantiate] into the Nix store as [store derivations][store derivation]. + These derivations can then be [realised][realise] to produce [outputs][output]. + + > **Example** + > + > Building and deploying software using Nix entails writing Nix expressions as a high-level description of packages and compositions thereof. - [reference]{#gloss-reference} From 17655ecfefc1f804caf935902469ebb1f621deac Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Thu, 5 Sep 2024 04:18:02 +0200 Subject: [PATCH 30/77] docs: explain provenance of default `nix-path` values this should make it more obvious how things are related to each other, and also hopefully expose the historical context without having to say on every corner that these details are accounting for legacy decisions. --- src/libexpr/eval-settings.hh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/libexpr/eval-settings.hh b/src/libexpr/eval-settings.hh index 3d412bbbf57f..115e3ee501b3 100644 --- a/src/libexpr/eval-settings.hh +++ b/src/libexpr/eval-settings.hh @@ -87,10 +87,19 @@ struct EvalSettings : Config If the respective paths are accessible, the default values are: - `$HOME/.nix-defexpr/channels` + + The [user channel link](@docroot@/command-ref/files/default-nix-expression.md#user-channel-link), pointing to the current state of [channels](@docroot@/command-ref/files/channels.md) for the current user. + - `nixpkgs=$NIX_STATE_DIR/profiles/per-user/root/channels/nixpkgs` + + The current state of the `nixpkgs` channel for the `root` user. + - `$NIX_STATE_DIR/profiles/per-user/root/channels` - See [`NIX_STATE_DIR`](@docroot@/command-ref/env-common.md#env-NIX_STATE_DIR) for details. + The current state of all channels for the `root` user. + + These files are set up by the [Nix installer](@docroot@/installation/installing-binary.md). + See [`NIX_STATE_DIR`](@docroot@/command-ref/env-common.md#env-NIX_STATE_DIR) for details on the environment variable. > **Note** > From a1cc362d9d249b95e4c9ad403f1e6e26ca302413 Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Thu, 5 Sep 2024 10:34:07 +0200 Subject: [PATCH 31/77] fix broken link (#11435) --- doc/manual/src/glossary.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/src/glossary.md b/doc/manual/src/glossary.md index 877c4668bfe1..aacee268022f 100644 --- a/doc/manual/src/glossary.md +++ b/doc/manual/src/glossary.md @@ -119,7 +119,7 @@ A store object consists of a [file system object], [references][reference] to other store objects, and other metadata. It can be referred to by a [store path]. - See [Store Object](@docroot@/store/index.md#store-object) for details. + See [Store Object](@docroot@/store/store-object.md) for details. [store object]: #gloss-store-object From 22ba4dc78d956020e06e0618f020e11700749823 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 26 Aug 2024 21:14:20 +0200 Subject: [PATCH 32/77] builtins.readDir: fix nix error trace on filesystem errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before: nix-env % ./src/nix/nix eval --impure --expr 'let f = builtins.readDir "/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo"; in f' --show-trace error: filesystem error: directory iterator cannot open directory: No such file or directory [/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo] After: error: … while calling the 'readDir' builtin at «string»:1:9: 1| let f = builtins.readDir "/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo"; in f | ^ error: reading directory '/nix/store/hs3yxdq9knimwdm51gvbs4dvncz46f9d-hello-2.12.1/foo': No such file or directory --- src/libutil/posix-source-accessor.cc | 42 +++++++++++++++------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index 8cec3388d7b9..f26f74d58468 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -132,23 +132,24 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath & { assertNoSymlinks(path); DirEntries res; - for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) { - checkInterrupt(); - auto type = [&]() -> std::optional { - std::filesystem::file_type nativeType; - try { - nativeType = entry.symlink_status().type(); - } catch (std::filesystem::filesystem_error & e) { - // We cannot always stat the child. (Ideally there is no - // stat because the native directory entry has the type - // already, but this isn't always the case.) - if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted) - return std::nullopt; - else throw; - } - - // cannot exhaustively enumerate because implementation-specific - // additional file types are allowed. + try { + for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) { + checkInterrupt(); + auto type = [&]() -> std::optional { + std::filesystem::file_type nativeType; + try { + nativeType = entry.symlink_status().type(); + } catch (std::filesystem::filesystem_error & e) { + // We cannot always stat the child. (Ideally there is no + // stat because the native directory entry has the type + // already, but this isn't always the case.) + if (e.code() == std::errc::permission_denied || e.code() == std::errc::operation_not_permitted) + return std::nullopt; + else throw; + } + + // cannot exhaustively enumerate because implementation-specific + // additional file types are allowed. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wswitch-enum" switch (nativeType) { @@ -158,8 +159,11 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath & default: return tMisc; } #pragma GCC diagnostic pop - }(); - res.emplace(entry.path().filename().string(), type); + }(); + res.emplace(entry.path().filename().string(), type); + } + } catch (std::filesystem::filesystem_error & e) { + throw SysError("reading directory %1%", showPath(path)); } return res; } From 05a1ffe23649950269999fd97fa351e78bd43dd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 5 Sep 2024 10:59:43 +0200 Subject: [PATCH 33/77] repl: wrap std::filesystem error into SysError /tmp/ecstatic-euler-mAFGV7 % /home/joerg/git/nix/build/subprojects/nix/nix repl Nix 2.25.0 Type :? for help. after doing rm /tmp/ecstatic-euler-mAFGV7 this will result in: nix-repl> :lf . error: cannot determine current working directory: No such file or directory Before it would make the repl crash /tmp/clever-hermann-MCm7A9 % /home/joerg/git/nix/build/subprojects/nix/nix repl Nix 2.25.0 Type :? for help. nix-repl> :lf . error: filesystem error: cannot get current path: No such file or directory --- src/libcmd/repl.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 63f6c1bdd0db..319dae3613be 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -2,6 +2,7 @@ #include #include +#include "error.hh" #include "repl-interacter.hh" #include "repl.hh" @@ -720,7 +721,14 @@ void NixRepl::loadFlake(const std::string & flakeRefS) if (flakeRefS.empty()) throw Error("cannot use ':load-flake' without a path specified. (Use '.' for the current working directory.)"); - auto flakeRef = parseFlakeRef(fetchSettings, flakeRefS, std::filesystem::current_path().string(), true); + std::filesystem::path cwd; + try { + cwd = std::filesystem::current_path(); + } catch (std::filesystem::filesystem_error & e) { + throw SysError("cannot determine current working directory"); + } + + auto flakeRef = parseFlakeRef(fetchSettings, flakeRefS, cwd.string(), true); if (evalSettings.pureEval && !flakeRef.input.isLocked()) throw Error("cannot use ':load-flake' on locked flake reference '%s' (use --impure to override)", flakeRefS); From 70c52d72f4ee93b68b57b12cd7892bba03446067 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 5 Sep 2024 12:59:54 +0200 Subject: [PATCH 34/77] builtins.unpackChannel: wrap filesystem errors and sanitize channelName Otherwise these errors are not caught correctly --- src/libstore/builtins/unpack-channel.cc | 28 +++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/src/libstore/builtins/unpack-channel.cc b/src/libstore/builtins/unpack-channel.cc index a5f2b8e3adfb..7f9a520eed36 100644 --- a/src/libstore/builtins/unpack-channel.cc +++ b/src/libstore/builtins/unpack-channel.cc @@ -13,21 +13,37 @@ void builtinUnpackChannel( return i->second; }; - auto out = outputs.at("out"); - auto channelName = getAttr("channelName"); + std::filesystem::path out(outputs.at("out")); + std::filesystem::path channelName(getAttr("channelName")); auto src = getAttr("src"); + if (channelName.filename() != channelName) { + throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName); + } + createDirs(out); unpackTarfile(src, out); - auto entries = std::filesystem::directory_iterator{out}; - auto fileName = entries->path().string(); - auto fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries)); + size_t fileCount; + std::string fileName; + try { + auto entries = std::filesystem::directory_iterator{out}; + fileName = entries->path().string(); + fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries)); + } catch (std::filesystem::filesystem_error &e) { + throw SysError("failed to read directory %1%", out); + } + if (fileCount != 1) throw Error("channel tarball '%s' contains more than one file", src); - std::filesystem::rename(fileName, (out + "/" + channelName)); + std::filesystem::path target(out / channelName); + try { + std::filesystem::rename(fileName, target); + } catch (std::filesystem::filesystem_error &e) { + throw SysError("failed to rename %1% to %2%", fileName, target); + } } } From a81083d080152eb06200c250a4879ece451165d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 5 Sep 2024 15:16:51 +0200 Subject: [PATCH 35/77] Revert "Update src/libutil/url.cc" This reverts commit 9b1cefe27e542d890aa346996a03dfecd9793dfe. --- src/libutil/url.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 8ef1857bb509..78c83244007e 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -81,7 +81,7 @@ std::map decodeQuery(const std::string & query) auto e = s.find('='); if (e == std::string::npos) { - warn("dubious URI query '%s' is missing equal sign '%s'", s, "="); + warn("invalid URI query '%s', did you forget an equals sign `=`?", s); continue; } From 5a5a010120928262341a11b88b58373a55f013b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 5 Sep 2024 15:16:53 +0200 Subject: [PATCH 36/77] Revert "fix: Error on malformed URI query parameter" This reverts commit c9f45677b585dabb3a83570e21426257d92746bd. This now triggers on simple cases like `nix build .#nix`. Reverting for now. --- src/libutil/url.cc | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 78c83244007e..bcbe9ea4eb21 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -79,15 +79,10 @@ std::map decodeQuery(const std::string & query) for (auto s : tokenizeString(query, "&")) { auto e = s.find('='); - - if (e == std::string::npos) { - warn("invalid URI query '%s', did you forget an equals sign `=`?", s); - continue; - } - - result.emplace( - s.substr(0, e), - percentDecode(std::string_view(s).substr(e + 1))); + if (e != std::string::npos) + result.emplace( + s.substr(0, e), + percentDecode(std::string_view(s).substr(e + 1))); } return result; From 83d5b32803e5b828967a27b1ea93c5728d3a4d0a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 16:41:15 +0200 Subject: [PATCH 37/77] Add test case for NARs with duplicate directory entries This test was made by @puckipedia. --- tests/functional/duplicate.nar | Bin 0 -> 1400 bytes tests/functional/local.mk | 2 +- tests/functional/{case-hack.sh => nars.sh} | 9 +++++---- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 tests/functional/duplicate.nar rename tests/functional/{case-hack.sh => nars.sh} (79%) diff --git a/tests/functional/duplicate.nar b/tests/functional/duplicate.nar new file mode 100644 index 0000000000000000000000000000000000000000..1d0993ed4cab41a6d45907ac0c17026afd5471a2 GIT binary patch literal 1400 zcmdT@+it=z49zZ#4T*h25D#ojRW~kz9 z$BsP}-LYn0DAbktf#N+v9qTBW&+onV;7jX2S0C@V9t<{lr}pt&I-XgF4v29E z3g3EyMu?&G+_E0O>ztu< "$TEST_ROOT/case.nar" cmp case.nar "$TEST_ROOT/case.nar" From da1ad28912334bb57f923afb4745273fd68f695c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 16:48:43 +0200 Subject: [PATCH 38/77] Test that nix-store --restore fails if the output already exists This restores the behaviour from before the std::filesystem refactorings. --- src/libutil/fs-sink.cc | 3 ++- tests/functional/nars.sh | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libutil/fs-sink.cc b/src/libutil/fs-sink.cc index 154346ceee30..b1e342c77a48 100644 --- a/src/libutil/fs-sink.cc +++ b/src/libutil/fs-sink.cc @@ -71,7 +71,8 @@ static GlobalConfig::Register r1(&restoreSinkSettings); void RestoreSink::createDirectory(const CanonPath & path) { - std::filesystem::create_directory(dstPath / path.rel()); + if (!std::filesystem::create_directory(dstPath / path.rel())) + throw Error("path '%s' already exists", (dstPath / path.rel()).string()); }; struct RestoreRegularFile : CreateRegularFileSink { diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index c58d12cd59d1..106bd10fcf15 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -10,6 +10,9 @@ clearStore rm -rf "$TEST_ROOT/out" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "NAR directory is not sorted" +# Check that nix-store --restore fails if the output already exists. +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" + # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. rm -rf "$TEST_ROOT/case" From 77c090cdbd56220895a2447efae79f68ed7861c5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 16:54:12 +0200 Subject: [PATCH 39/77] More tests --- tests/functional/nars.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index 106bd10fcf15..b2b6b2b1ae5c 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -13,6 +13,17 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet # Check that nix-store --restore fails if the output already exists. expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" +rm -rf "$TEST_ROOT/out" +echo foo > "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" + +rm -rf "$TEST_ROOT/out" +ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" + +mkdir -p "$TEST_ROOT/out2" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" + # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. rm -rf "$TEST_ROOT/case" From 35575873813f60fff26f27a65e09038986f17cb5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 19:26:10 +0200 Subject: [PATCH 40/77] Detect NAR directory entries that collide with another path after case-hacking The test was made by @puckipedia. --- src/libutil/archive.cc | 3 +++ tests/functional/case-collision.nar | Bin 0 -> 1928 bytes tests/functional/nars.sh | 6 ++++++ 3 files changed, 9 insertions(+) create mode 100644 tests/functional/case-collision.nar diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index e4a6a3181f2a..0d72f910d461 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -243,6 +243,9 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath debug("case collision between '%1%' and '%2%'", i->first, name); name += caseHackSuffix; name += std::to_string(++i->second); + auto j = names.find(name); + if (j != names.end()) + throw Error("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first); } else names[name] = 0; } diff --git a/tests/functional/case-collision.nar b/tests/functional/case-collision.nar new file mode 100644 index 0000000000000000000000000000000000000000..2eff86901c617be2a830d23074923cb5b3b69aa3 GIT binary patch literal 1928 zcmd^9%}&EG3@&2)Y!WvfAc(_YXsQr5o`XF=mU?TnHklH4TQ7Zf(qMC#G>KJ{av&Gy za}?+EXU7lO&ocTjmrj*>2lMyfx+4Dz*%4W6x6p6LgbVFJp>-|c8?s<9`cB0$vW{^$ z?iYCMuQE2ai07y7GmkrZ&%wH>q|5FJD{C-t@C1MJc_jzOWqdC0M~c()?t*xok{-HJ zs!i9+H#iU9)|ED!?3UuAbZZF8FyEZ~jG6y2J~toM9S7FoQvGmE`2|Vij(PpHA1=*f z7ka8+sd=Qc8V} DaOkrB literal 0 HcmV?d00001 diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index b2b6b2b1ae5c..f2339af88ea9 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -37,3 +37,9 @@ cmp case.nar "$TEST_ROOT/case.nar" # removal of the suffix). touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3" (! nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > /dev/null) + +# Detect NARs that have a directory entry that after case-hacking +# collides with another entry (e.g. a directory containing 'Test', +# 'Test~nix~case~hack~1' and 'test'). +rm -rf "$TEST_ROOT/case" +expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collision.nar | grepQuiet "NAR contains file name 'test' that collides with case-hacked file name 'Test~nix~case~hack~1'" From 7a765a6aafa27267659eb7339cf7039990f30caa Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 20:37:26 +0200 Subject: [PATCH 41/77] Test that deserializing NARs with names with equal Unicode normal forms fails on macOS The test is based on the one by @puckipedia but with the file names swapped to make them sorted. --- tests/functional/nars.sh | 11 +++++++++++ tests/functional/unnormalized.nar | Bin 0 -> 1728 bytes 2 files changed, 11 insertions(+) create mode 100644 tests/functional/unnormalized.nar diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index f2339af88ea9..b16650e7e0d2 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -43,3 +43,14 @@ touch "$TEST_ROOT/case/xt_CONNMARK.h~nix~case~hack~3" # 'Test~nix~case~hack~1' and 'test'). rm -rf "$TEST_ROOT/case" expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collision.nar | grepQuiet "NAR contains file name 'test' that collides with case-hacked file name 'Test~nix~case~hack~1'" + +# Deserializing a NAR that contains file names that Unicode-normalize +# to the same name should fail on macOS but succeed on Linux. +rm -rf "$TEST_ROOT/out" +if [[ $(uname) = Darwin ]]; then + expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "cannot create directory.*File exists" +else + nix-store --restore "$TEST_ROOT/out" < unnormalized.nar + [[ -e $TEST_ROOT/out/â ]] + [[ -e $TEST_ROOT/out/â ]] +fi diff --git a/tests/functional/unnormalized.nar b/tests/functional/unnormalized.nar new file mode 100644 index 0000000000000000000000000000000000000000..4b7edb17e0b4a9b75cf2958e9f12cceca22d267c GIT binary patch literal 1728 zcmd^9&2GXl4DNo}ka&koJMc51YTAwW-~mEvXhfQz#07fgQFxVI_fQML(N5J=2`NbQ zV*7LLe6bx5vh%0qe#)&Vu$+Qc|z8XR?vo72w}Ja>8T2af{uR|2^gTKAx4X{4ZTc z-^V~CHIFT~SHUB7Jzi)&Hr6bq0+*^U@tqW~ Date: Thu, 5 Sep 2024 20:55:24 +0200 Subject: [PATCH 42/77] Fix test on macOS --- tests/functional/nars.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index b16650e7e0d2..bd2c49fce5c3 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -48,7 +48,7 @@ expectStderr 1 nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case-collisi # to the same name should fail on macOS but succeed on Linux. rm -rf "$TEST_ROOT/out" if [[ $(uname) = Darwin ]]; then - expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "cannot create directory.*File exists" + expectStderr 1 nix-store --restore "$TEST_ROOT/out" < unnormalized.nar | grepQuiet "path '.*/out/â' already exists" else nix-store --restore "$TEST_ROOT/out" < unnormalized.nar [[ -e $TEST_ROOT/out/â ]] From 9fcb588dd8a7b3f0d7d103cea449abcf9f736ad6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2024 22:21:53 +0200 Subject: [PATCH 43/77] RestoreSink::createDirectory(): Use append() On macOS, `mkdir("x/')` behaves differently than `mkdir("x")` if `x` is a dangling symlink (the formed succeed while the latter fails). So make sure we always strip the trailing slash. --- src/libutil/fs-sink.cc | 20 ++++++++++---------- tests/functional/nars.sh | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/libutil/fs-sink.cc b/src/libutil/fs-sink.cc index b1e342c77a48..72e5c731f05f 100644 --- a/src/libutil/fs-sink.cc +++ b/src/libutil/fs-sink.cc @@ -68,11 +68,19 @@ static RestoreSinkSettings restoreSinkSettings; static GlobalConfig::Register r1(&restoreSinkSettings); +static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path) +{ + auto dst = src; + if (!path.rel().empty()) + dst /= path.rel(); + return dst; +} void RestoreSink::createDirectory(const CanonPath & path) { - if (!std::filesystem::create_directory(dstPath / path.rel())) - throw Error("path '%s' already exists", (dstPath / path.rel()).string()); + auto p = append(dstPath, path); + if (!std::filesystem::create_directory(p)) + throw Error("path '%s' already exists", p.string()); }; struct RestoreRegularFile : CreateRegularFileSink { @@ -94,14 +102,6 @@ struct RestoreRegularFile : CreateRegularFileSink { void preallocateContents(uint64_t size) override; }; -static std::filesystem::path append(const std::filesystem::path & src, const CanonPath & path) -{ - auto dst = src; - if (!path.rel().empty()) - dst /= path.rel(); - return dst; -} - void RestoreSink::createRegularFile(const CanonPath & path, std::function func) { auto p = append(dstPath, path); diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index bd2c49fce5c3..4f2470ea719d 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -11,18 +11,18 @@ rm -rf "$TEST_ROOT/out" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "NAR directory is not sorted" # Check that nix-store --restore fails if the output already exists. -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists" rm -rf "$TEST_ROOT/out" echo foo > "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists" rm -rf "$TEST_ROOT/out" ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "cannot create directory.*File exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "File exists" mkdir -p "$TEST_ROOT/out2" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out/' already exists" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists" # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. From 92be107c8eb324460a040e104be1f790cf0b57fd Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Fri, 6 Sep 2024 10:33:12 +0200 Subject: [PATCH 44/77] update instructions to build the manual --- doc/manual/src/development/documentation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/manual/src/development/documentation.md b/doc/manual/src/development/documentation.md index 63f574ab7aae..d5a95e0c1d0b 100644 --- a/doc/manual/src/development/documentation.md +++ b/doc/manual/src/development/documentation.md @@ -13,13 +13,13 @@ Incremental refactorings of the documentation build setup to make it faster or e Build the manual from scratch: ```console -nix-build $(nix-instantiate)'!doc' +nix-build -E '(import ./.).packages.${builtins.currentSystem}.nix.doc' ``` or ```console -nix build .#^doc +nix build .#nix^doc ``` and open `./result-doc/share/doc/nix/manual/index.html`. From 52ba3cc5eac0418218a90c0cddb06688d4c7b5d3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 6 Sep 2024 16:28:09 +0200 Subject: [PATCH 45/77] Test that deserializing regular files / symlinks is exclusive --- tests/functional/nars.sh | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index 4f2470ea719d..ed19637a1bce 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -24,6 +24,44 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet mkdir -p "$TEST_ROOT/out2" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < duplicate.nar | grepQuiet "path '.*/out' already exists" +# The same, but for a regular file. +nix-store --dump ./nars.sh > "$TEST_ROOT/tmp.nar" + +rm -rf "$TEST_ROOT/out" +nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +mkdir -p "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +mkdir -p "$TEST_ROOT/out2" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +# The same, but for a symlink +ln -sfn foo "$TEST_ROOT/symlink" +nix-store --dump "$TEST_ROOT/symlink" > "$TEST_ROOT/tmp.nar" + +rm -rf "$TEST_ROOT/out" +nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" +[[ -L "$TEST_ROOT/out" ]] +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +mkdir -p "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +rm -rf "$TEST_ROOT/out" +ln -s "$TEST_ROOT/out2" "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + +mkdir -p "$TEST_ROOT/out2" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" + # Check whether restoring and dumping a NAR that contains case # collisions is round-tripping, even on a case-insensitive system. rm -rf "$TEST_ROOT/case" From 9df5236c468839ead6627131886f3d1153035bf3 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 27 Aug 2024 13:43:50 +0200 Subject: [PATCH 46/77] progress-bar: Only write when truly updated --- src/libmain/progress-bar.cc | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index bb4c52ef7ea6..d864d947363f 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -75,6 +75,9 @@ class ProgressBar : public Logger bool active = true; bool paused = false; bool haveUpdate = true; + + /** Helps avoid unnecessary redraws, see `draw()` */ + std::string lastOutput; }; Sync state_; @@ -360,6 +363,31 @@ class ProgressBar : public Logger } std::chrono::milliseconds draw(State & state) + { + // Call draw() and render if the output has changed. + + // Excessive redrawing is noticable on slow terminals, and it interferes + // with text selection in some terminals, including libvte-based terminal + // emulators. + + std::optional newOutput; + auto nextWakeup = draw(state, newOutput); + { + auto state(state_.lock()); + if (newOutput && *newOutput != state->lastOutput) { + writeToStderr(*newOutput); + state->lastOutput = std::move(*newOutput); + } + } + return nextWakeup; + } + + /** + * @param output[out] `nullopt` if nothing is to be drawn. Otherwise, a + * string of ANSI terminal output that can be used to + * render the progress bar. + */ + std::chrono::milliseconds draw(State & state, std::optional & output) { auto nextWakeup = std::chrono::milliseconds::max(); @@ -412,7 +440,7 @@ class ProgressBar : public Logger auto width = getWindowSize().second; if (width <= 0) width = std::numeric_limits::max(); - writeToStderr("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"); + output = "\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"; return nextWakeup; } From 047d9643b54aa857dfff81ba8aed7881a97938a4 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Sun, 8 Sep 2024 01:09:51 +0200 Subject: [PATCH 47/77] refact: Extract ProgressBar::redraw(newOutput) --- src/libmain/progress-bar.cc | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index d864d947363f..2ea743034225 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -362,23 +362,28 @@ class ProgressBar : public Logger updateCV.notify_one(); } - std::chrono::milliseconds draw(State & state) + /** + * Redraw, if the output has changed. + * + * Excessive redrawing is noticable on slow terminals, and it interferes + * with text selection in some terminals, including libvte-based terminal + * emulators. + */ + void redraw(std::string newOutput) { - // Call draw() and render if the output has changed. - - // Excessive redrawing is noticable on slow terminals, and it interferes - // with text selection in some terminals, including libvte-based terminal - // emulators. + auto state(state_.lock()); + if (newOutput != state->lastOutput) { + writeToStderr(newOutput); + state->lastOutput = std::move(newOutput); + } + } + std::chrono::milliseconds draw(State & state) + { std::optional newOutput; auto nextWakeup = draw(state, newOutput); - { - auto state(state_.lock()); - if (newOutput && *newOutput != state->lastOutput) { - writeToStderr(*newOutput); - state->lastOutput = std::move(*newOutput); - } - } + if (newOutput) + redraw(*newOutput); return nextWakeup; } From e10ea78f9313d8df381301ac2fc3023de3993689 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Sun, 8 Sep 2024 01:21:40 +0200 Subject: [PATCH 48/77] refact: Inline ProgressBar::draw(state, newOutput), inline local output --- src/libmain/progress-bar.cc | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index 2ea743034225..ce513d204abc 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -379,20 +379,6 @@ class ProgressBar : public Logger } std::chrono::milliseconds draw(State & state) - { - std::optional newOutput; - auto nextWakeup = draw(state, newOutput); - if (newOutput) - redraw(*newOutput); - return nextWakeup; - } - - /** - * @param output[out] `nullopt` if nothing is to be drawn. Otherwise, a - * string of ANSI terminal output that can be used to - * render the progress bar. - */ - std::chrono::milliseconds draw(State & state, std::optional & output) { auto nextWakeup = std::chrono::milliseconds::max(); @@ -445,7 +431,7 @@ class ProgressBar : public Logger auto width = getWindowSize().second; if (width <= 0) width = std::numeric_limits::max(); - output = "\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"; + redraw("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"); return nextWakeup; } From c955563b6440ffe7abb22f15744ceea8b4ce2d9c Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Sun, 8 Sep 2024 11:44:24 +0200 Subject: [PATCH 49/77] fix: Avoid deadlock in ProgressBar::redraw() --- src/libmain/progress-bar.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index ce513d204abc..e63d4f13f8f9 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -75,11 +75,11 @@ class ProgressBar : public Logger bool active = true; bool paused = false; bool haveUpdate = true; - - /** Helps avoid unnecessary redraws, see `draw()` */ - std::string lastOutput; }; + /** Helps avoid unnecessary redraws, see `redraw()` */ + Sync lastOutput_; + Sync state_; std::thread updateThread; @@ -371,10 +371,10 @@ class ProgressBar : public Logger */ void redraw(std::string newOutput) { - auto state(state_.lock()); - if (newOutput != state->lastOutput) { + auto lastOutput(lastOutput_.lock()); + if (newOutput != *lastOutput) { writeToStderr(newOutput); - state->lastOutput = std::move(newOutput); + *lastOutput = std::move(newOutput); } } From 4cfa59fdb32aa4fcc58b735d8843ce308692a652 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2024 14:11:35 +0200 Subject: [PATCH 50/77] Typo --- tests/functional/nars.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index ed19637a1bce..9f5f43dc6353 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -42,7 +42,7 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | gre mkdir -p "$TEST_ROOT/out2" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | grepQuiet "File exists" -# The same, but for a symlink +# The same, but for a symlink. ln -sfn foo "$TEST_ROOT/symlink" nix-store --dump "$TEST_ROOT/symlink" > "$TEST_ROOT/tmp.nar" From 5ca2f58798e6f514b5194c16c0fea0d8ec128171 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2024 14:29:05 +0200 Subject: [PATCH 51/77] Improve use-case-hack description slightly --- src/libutil/archive.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 0d72f910d461..e26b7eb935b9 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -23,7 +23,7 @@ struct ArchiveSettings : Config false, #endif "use-case-hack", - "Whether to enable a Darwin-specific hack for dealing with file name collisions."}; + "Whether to enable a macOS-specific hack for dealing with file name case collisions."}; }; static ArchiveSettings archiveSettings; From c5a4dfa6602796ebb3c62493a2fd33f2b58ec91c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2024 15:41:38 +0200 Subject: [PATCH 52/77] nix flake {metadata,archive}: Fix chroot stores Fixes $ nix flake metadata --store /tmp/nix nixpkgs error: path '/tmp/nix/nix/store/65xpqkz92d9j7k5ric4z8lzhiigxsfbg-source/flake.nix' is not in the Nix store This has been broken since 598deb2b23bc59df61c92ea25745d675686f3991. --- src/libflake/flake/flake.cc | 27 ++++++++++++++++----------- src/libflake/flake/flake.hh | 10 ++++++++++ src/nix/flake.cc | 4 ++-- tests/functional/flakes/flakes.sh | 3 +++ 4 files changed, 31 insertions(+), 13 deletions(-) diff --git a/src/libflake/flake/flake.cc b/src/libflake/flake/flake.cc index fd1183514c9f..35db13f7e5a5 100644 --- a/src/libflake/flake/flake.cc +++ b/src/libflake/flake/flake.cc @@ -751,6 +751,21 @@ LockedFlake lockFlake( } } +std::pair sourcePathToStorePath( + ref store, + const SourcePath & _path) +{ + auto path = _path.path.abs(); + + if (auto store2 = store.dynamic_pointer_cast()) { + auto realStoreDir = store2->getRealStoreDir(); + if (isInDir(path, realStoreDir)) + path = store2->storeDir + path.substr(realStoreDir.size()); + } + + return store->toStorePath(path); +} + void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) @@ -768,17 +783,7 @@ void callFlake(EvalState & state, auto lockedNode = node.dynamic_pointer_cast(); - // FIXME: This is a hack to support chroot stores. Remove this - // once we can pass a sourcePath rather than a storePath to - // call-flake.nix. - auto path = sourcePath.path.abs(); - if (auto store = state.store.dynamic_pointer_cast()) { - auto realStoreDir = store->getRealStoreDir(); - if (isInDir(path, realStoreDir)) - path = store->storeDir + path.substr(realStoreDir.size()); - } - - auto [storePath, subdir] = state.store->toStorePath(path); + auto [storePath, subdir] = sourcePathToStorePath(state.store, sourcePath); emitTreeAttrs( state, diff --git a/src/libflake/flake/flake.hh b/src/libflake/flake/flake.hh index cce17009ce35..496e186736cc 100644 --- a/src/libflake/flake/flake.hh +++ b/src/libflake/flake/flake.hh @@ -214,6 +214,16 @@ void callFlake( const LockedFlake & lockedFlake, Value & v); +/** + * Map a `SourcePath` to the corresponding store path. This is a + * temporary hack to support chroot stores while we don't have full + * lazy trees. FIXME: Remove this once we can pass a sourcePath rather + * than a storePath to call-flake.nix. + */ +std::pair sourcePathToStorePath( + ref store, + const SourcePath & path); + } void emitTreeAttrs( diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 2db1e039ec99..8e109b327705 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -214,7 +214,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON auto & flake = lockedFlake.flake; // Currently, all flakes are in the Nix store via the rootFS accessor. - auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first); + auto storePath = store->printStorePath(sourcePathToStorePath(store, flake.path).first); if (json) { nlohmann::json j; @@ -1079,7 +1079,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun StorePathSet sources; - auto storePath = store->toStorePath(flake.flake.path.path.abs()).first; + auto storePath = sourcePathToStorePath(store, flake.flake.path).first; sources.insert(storePath); diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index 26b91eda751a..aa4cb1e18556 100755 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -184,6 +184,9 @@ nix registry list | grepInverse '^user' # nothing in user registry nix flake metadata flake1 nix flake metadata flake1 | grepQuiet 'Locked URL:.*flake1.*' +# Test 'nix flake metadata' on a chroot store. +nix flake metadata --store $TEST_ROOT/chroot-store flake1 + # Test 'nix flake metadata' on a local flake. (cd "$flake1Dir" && nix flake metadata) | grepQuiet 'URL:.*flake1.*' (cd "$flake1Dir" && nix flake metadata .) | grepQuiet 'URL:.*flake1.*' From b80b091bac1eeb6fa64db1ae078de5c6a2e4b1b8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2024 19:52:21 +0200 Subject: [PATCH 53/77] Git fetcher: Don't update mtime of ref file if fetching by rev This fixes the warning $ nix eval --store /tmp/nix --expr 'builtins.fetchTree { type = "git"; url = "https://github.com/DeterminateSystems/attic"; ref = "fixups-for-magic-nix-cache"; rev = "635753a2069d4b8228e846dc5c09ad361c75cd1a"; }' warning: could not update mtime for file '/home/eelco/.cache/nix/gitv3/09788h9zgba5lbfkaa6ija2dvi004jwsqjf5ln21i2njs07cz766/refs/heads/fixups-for-magic-nix-cache': error: changing modification time of '"/home/eelco/.cache/nix/gitv3/09788h9zgba5lbfkaa6ija2dvi004jwsqjf5ln21i2njs07cz766/refs/heads/fixups-for-magic-nix-cache"': No such file or directory When we're fetching by rev, that file doesn't necessarily exist, and we don't care about it anyway. --- src/libfetchers/git.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 076c757c5f52..6c5bda470007 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -584,9 +584,10 @@ struct GitInputScheme : InputScheme } try { - setWriteTime(localRefFile, now, now); + if (!input.getRev()) + setWriteTime(localRefFile, now, now); } catch (Error & e) { - warn("could not update mtime for file '%s': %s", localRefFile, e.msg()); + warn("could not update mtime for file '%s': %s", localRefFile, e.info().msg); } if (!originalRef && !storeCachedHead(repoInfo.url, ref)) warn("could not update cached head '%s' for '%s'", ref, repoInfo.url); From c55b285cf91139c4936ea439e7a43811f554dcdf Mon Sep 17 00:00:00 2001 From: Tom Bereknyei Date: Mon, 9 Sep 2024 22:15:45 -0400 Subject: [PATCH 54/77] tests: test was re-named --- tests/functional/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/meson.build b/tests/functional/meson.build index ebecdd9e85d4..5167fa814b6f 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -159,7 +159,7 @@ suites = [ 'derivation-advanced-attributes.sh', 'import-derivation.sh', 'nix_path.sh', - 'case-hack.sh', + 'nars.sh', 'placeholders.sh', 'ssh-relay.sh', 'build.sh', From be480971c20fdd859a13053bd6ebefa0b3a04042 Mon Sep 17 00:00:00 2001 From: zimbatm Date: Tue, 10 Sep 2024 15:26:35 +0200 Subject: [PATCH 55/77] doc: add HACKING.md symlink As a hacker, I should be able to checkout the repo, and find relevant information on how to develop in the project somewhere in the top-level. Either in the README.md, or CONTRIBUTING.md or HACKING.md files. This PR symlinks the HACKING.md into the right place in the manual. --- HACKING.md | 1 + 1 file changed, 1 insertion(+) create mode 120000 HACKING.md diff --git a/HACKING.md b/HACKING.md new file mode 120000 index 000000000000..d3576d60dcf6 --- /dev/null +++ b/HACKING.md @@ -0,0 +1 @@ +doc/manual/src/development/building.md \ No newline at end of file From 1ca1439b1f7bdaad9115e60b9d221c006cff1395 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 10 Sep 2024 16:40:08 +0200 Subject: [PATCH 56/77] add missing filesystem include (x86_64-darwin fix) --- src/libutil/args.hh | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libutil/args.hh b/src/libutil/args.hh index 513b8d811697..127a0809e8e7 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -2,6 +2,7 @@ ///@file #include +#include #include #include #include From c4766d7b8b659ce3f6fb1c339aeeec9ca3c7eeba Mon Sep 17 00:00:00 2001 From: zimbatm Date: Thu, 5 Sep 2024 10:29:48 +0200 Subject: [PATCH 57/77] fix(nix fmt): remove the default "." argument When `nix fmt` is called without an argument, Nix appends the "." argument before calling the formatter. The comment in the code is: > Format the current flake out of the box This also happens when formatting sub-folders. This means that the formatter is now unable to distinguish, as an interface, whether the "." argument is coming from the flake or the user's intent to format the current folder. This decision should be up to the formatter. Treefmt, for example, will automatically look up the project's root and format all the files. This is the desired behaviour. But because the "." argument is passed, it cannot function as expected. --- doc/manual/rl-next/nix-fmt-default-argument.md | 17 +++++++++++++++++ src/nix/fmt.cc | 10 ++-------- tests/functional/fmt.sh | 11 +++++++---- tests/functional/fmt.simple.sh | 3 ++- 4 files changed, 28 insertions(+), 13 deletions(-) create mode 100644 doc/manual/rl-next/nix-fmt-default-argument.md diff --git a/doc/manual/rl-next/nix-fmt-default-argument.md b/doc/manual/rl-next/nix-fmt-default-argument.md new file mode 100644 index 000000000000..54161ab304c9 --- /dev/null +++ b/doc/manual/rl-next/nix-fmt-default-argument.md @@ -0,0 +1,17 @@ +--- +synopsis: Removing the default argument passed to the `nix fmt` formatter +issues: [] +prs: [11438] +--- + +The underlying formatter no longer receives the ". " default argument when `nix fmt` is called with no arguments. + +This change was necessary as the formatter wasn't able to distinguish between +a user wanting to format the current folder with `nix fmt .` or the generic +`nix fmt`. + +The default behaviour is now the responsibility of the formatter itself, and +allows tools such as treefmt to format the whole tree instead of only the +current directory and below. + +Author: [**@zimbatm**](https://github.com/zimbatm) diff --git a/src/nix/fmt.cc b/src/nix/fmt.cc index d65834495758..f444d6addf12 100644 --- a/src/nix/fmt.cc +++ b/src/nix/fmt.cc @@ -40,14 +40,8 @@ struct CmdFmt : SourceExprCommand { Strings programArgs{app.program}; // Propagate arguments from the CLI - if (args.empty()) { - // Format the current flake out of the box - programArgs.push_back("."); - } else { - // User wants more power, let them decide which paths to include/exclude - for (auto &i : args) { - programArgs.push_back(i); - } + for (auto &i : args) { + programArgs.push_back(i); } // Release our references to eval caches to ensure they are persisted to disk, because diff --git a/tests/functional/fmt.sh b/tests/functional/fmt.sh index b29fe64d6bc1..9a79260e66c2 100755 --- a/tests/functional/fmt.sh +++ b/tests/functional/fmt.sh @@ -5,11 +5,11 @@ source common.sh TODO_NixOS # Provide a `shell` variable. Try not to `export` it, perhaps. clearStoreIfPossible -rm -rf $TEST_HOME/.cache $TEST_HOME/.config $TEST_HOME/.local +rm -rf "$TEST_HOME"/.cache "$TEST_HOME"/.config "$TEST_HOME"/.local -cp ./simple.nix ./simple.builder.sh ./fmt.simple.sh ./config.nix $TEST_HOME +cp ./simple.nix ./simple.builder.sh ./fmt.simple.sh ./config.nix "$TEST_HOME" -cd $TEST_HOME +cd "$TEST_HOME" nix fmt --help | grep "Format" @@ -30,6 +30,9 @@ cat << EOF > flake.nix }; } EOF -nix fmt ./file ./folder | grep 'Formatting: ./file ./folder' +# No arguments check +[[ "$(nix fmt)" = "Formatting(0):" ]] +# Argument forwarding check +nix fmt ./file ./folder | grep 'Formatting(2): ./file ./folder' nix flake check nix flake show | grep -P "package 'formatter'" diff --git a/tests/functional/fmt.simple.sh b/tests/functional/fmt.simple.sh index 4c8c67ebb8fb..e53f6c9be9a1 100755 --- a/tests/functional/fmt.simple.sh +++ b/tests/functional/fmt.simple.sh @@ -1 +1,2 @@ -echo Formatting: "${@}" +#!/usr/bin/env bash +echo "Formatting(${#}):" "${@}" From ebebe626ff4ec6da98c0a043c64b35efe1c05bc3 Mon Sep 17 00:00:00 2001 From: Artturin Date: Wed, 11 Sep 2024 00:17:03 +0300 Subject: [PATCH 58/77] Fix making the build directory kept by `keep-failed` readable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Caused by 1d3696f0fb88d610abc234a60e0d6d424feafdf1 Without this fix the kept build directory is readable only by root ``` $ sudo ls -ld /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5 drwx------ root root 60 B Wed Sep 11 00:09:48 2024  /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5/ $ sudo ls -ld /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5/build drwxr-xr-x nixbld1 nixbld 80 B Wed Sep 11 00:09:58 2024  /comp-temp/nix-build-openssh-static-x86_64-unknown-linux-musl-9.8p1.drv-5/build/ ``` --- src/libstore/unix/build/local-derivation-goal.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index d55278a52960..08b973cd1428 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -2997,6 +2997,7 @@ void LocalDerivationGoal::deleteTmpDir(bool force) might have privileged stuff (like a copy of netrc). */ if (settings.keepFailed && !force && !drv->isBuiltin()) { printError("note: keeping build directory '%s'", tmpDir); + chmod(topTmpDir.c_str(), 0755); chmod(tmpDir.c_str(), 0755); } else From f2e7e996dafe069a8671f8c5a6796174c415da44 Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Tue, 10 Sep 2024 20:54:09 -0500 Subject: [PATCH 59/77] sequoia-nixbld-user-migration: disable trace mode Was hoping to leave this enabled for a little while as core community members test this script out, but Apple's aggressive release timeline for macOS 15 Sequoia has caught us off-guard here. It's probably not ideal for a general audience if the script spews all of this output--and people can still force bash to run in trace mode if we really need to debug a problem. --- scripts/sequoia-nixbld-user-migration.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/sequoia-nixbld-user-migration.sh b/scripts/sequoia-nixbld-user-migration.sh index ecf19fc870ba..12c736d8e765 100755 --- a/scripts/sequoia-nixbld-user-migration.sh +++ b/scripts/sequoia-nixbld-user-migration.sh @@ -1,7 +1,5 @@ #!/usr/bin/env bash -set -x - ((NEW_NIX_FIRST_BUILD_UID=351)) ((TEMP_NIX_FIRST_BUILD_UID=31000)) From 04ce0e648aeac282b114cf426cea8a078c97e0a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 5 Sep 2024 14:08:20 +0200 Subject: [PATCH 60/77] add release notes for filesystem fixes Update doc/manual/rl-next/filesystem-errors.md Co-authored-by: John Ericson --- doc/manual/rl-next/filesystem-errors.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 doc/manual/rl-next/filesystem-errors.md diff --git a/doc/manual/rl-next/filesystem-errors.md b/doc/manual/rl-next/filesystem-errors.md new file mode 100644 index 000000000000..2d5b26228600 --- /dev/null +++ b/doc/manual/rl-next/filesystem-errors.md @@ -0,0 +1,14 @@ +--- +synopsis: wrap filesystem exceptions more correctly +issues: [] +prs: [11378] +--- + + +With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + +This lead to no longer generating error traces, for example when listing a non-existing directory, and can also lead to crashes inside the Nix REPL. + +This version catches these types of exception correctly and wrap them into Nix's own exeception type. + +Author: [**@Mic92**](https://github.com/Mic92) From 38bfbb297c380f8b07d8a20ffdeb72da71c1567c Mon Sep 17 00:00:00 2001 From: Noam Yorav-Raphael Date: Wed, 11 Sep 2024 13:36:46 +0300 Subject: [PATCH 61/77] Use envvars NIX_CACHE_HOME, NIX_CONFIG_HOME, NIX_DATA_HOME, NIX_STATE_HOME if defined (#11351) --- doc/manual/rl-next/add-nix-state-home.md | 14 ++++++ doc/manual/src/command-ref/env-common.md | 15 ++++++- scripts/nix-profile.sh.in | 44 +++++++++--------- src/libcmd/repl.cc | 2 +- src/libexpr/eval-cache.cc | 2 +- src/libexpr/eval-settings.cc | 2 +- src/libfetchers/cache.cc | 2 +- src/libfetchers/git-utils.cc | 2 +- src/libfetchers/git.cc | 2 +- src/libfetchers/mercurial.cc | 2 +- src/libfetchers/registry.cc | 4 +- src/libflake/flake/config.cc | 2 +- src/libstore/globals.cc | 2 +- src/libstore/nar-info-disk-cache.cc | 2 +- src/libstore/store-api.cc | 2 +- src/libutil/users.cc | 57 ++++++++++++++++++++---- src/libutil/users.hh | 8 ++-- 17 files changed, 117 insertions(+), 47 deletions(-) create mode 100644 doc/manual/rl-next/add-nix-state-home.md diff --git a/doc/manual/rl-next/add-nix-state-home.md b/doc/manual/rl-next/add-nix-state-home.md new file mode 100644 index 000000000000..bbfdd5d3846d --- /dev/null +++ b/doc/manual/rl-next/add-nix-state-home.md @@ -0,0 +1,14 @@ +--- +synopsis: Use envvars NIX_CACHE_HOME, NIX_CONFIG_HOME, NIX_DATA_HOME, NIX_STATE_HOME if defined +prs: [11351] +--- + +Added new environment variables: + +- `NIX_CACHE_HOME` +- `NIX_CONFIG_HOME` +- `NIX_DATA_HOME` +- `NIX_STATE_HOME` + +Each, if defined, takes precedence over the corresponding [XDG environment variable](@docroot@/command-ref/env-common.md#xdg-base-directories). +This provides more fine-grained control over where Nix looks for files, and allows to have a stand-alone Nix environment, which only uses files in a specific directory, and doesn't interfere with the user environment. diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md index 0b5017882938..ee3995111e97 100644 --- a/doc/manual/src/command-ref/env-common.md +++ b/doc/manual/src/command-ref/env-common.md @@ -138,6 +138,19 @@ The following environment variables are used to determine locations of various s - [`XDG_STATE_HOME`]{#env-XDG_STATE_HOME} (default `~/.local/state`) - [`XDG_CACHE_HOME`]{#env-XDG_CACHE_HOME} (default `~/.cache`) - [XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html [`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories + +In addition, setting the following environment variables overrides the XDG base directories: + +- [`NIX_CONFIG_HOME`]{#env-NIX_CONFIG_HOME} (default `$XDG_CONFIG_HOME/nix`) +- [`NIX_STATE_HOME`]{#env-NIX_STATE_HOME} (default `$XDG_STATE_HOME/nix`) +- [`NIX_CACHE_HOME`]{#env-NIX_CACHE_HOME} (default `$XDG_CACHE_HOME/nix`) + +When [`use-xdg-base-directories`] is enabled, the configuration directory is: + +1. `$NIX_CONFIG_HOME`, if it is defined +2. Otherwise, `$XDG_CONFIG_HOME/nix`, if `XDG_CONFIG_HOME` is defined +3. Otherwise, `~/.config/nix`. + +Likewise for the state and cache directories. diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index e868399b1430..3d0e498f4799 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -3,29 +3,33 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then # Set up the per-user profile. - NIX_LINK="$HOME/.nix-profile" - if [ -n "${XDG_STATE_HOME-}" ]; then - NIX_LINK_NEW="$XDG_STATE_HOME/nix/profile" + if [ -n "$NIX_STATE_HOME" ]; then + NIX_LINK="$NIX_STATE_HOME/profile" else - NIX_LINK_NEW="$HOME/.local/state/nix/profile" - fi - if [ -e "$NIX_LINK_NEW" ]; then - if [ -t 2 ] && [ -e "$NIX_LINK" ]; then - warning="\033[1;35mwarning:\033[0m" - printf "$warning Both %s and legacy %s exist; using the former.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2 - if [ "$(realpath "$NIX_LINK")" = "$(realpath "$NIX_LINK_NEW")" ]; then - printf " Since the profiles match, you can safely delete either of them.\n" 1>&2 - else - # This should be an exceptionally rare occasion: the only way to get it would be to - # 1. Update to newer Nix; - # 2. Remove .nix-profile; - # 3. Set the $NIX_LINK_NEW to something other than the default user profile; - # 4. Roll back to older Nix. - # If someone did all that, they can probably figure out how to migrate the profile. - printf "$warning Profiles do not match. You should manually migrate from %s to %s.\n" "$NIX_LINK" "$NIX_LINK_NEW" 1>&2 + NIX_LINK="$HOME/.nix-profile" + if [ -n "${XDG_STATE_HOME-}" ]; then + NIX_LINK_NEW="$XDG_STATE_HOME/nix/profile" + else + NIX_LINK_NEW="$HOME/.local/state/nix/profile" + fi + if [ -e "$NIX_LINK_NEW" ]; then + if [ -t 2 ] && [ -e "$NIX_LINK" ]; then + warning="\033[1;35mwarning:\033[0m" + printf "$warning Both %s and legacy %s exist; using the former.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2 + if [ "$(realpath "$NIX_LINK")" = "$(realpath "$NIX_LINK_NEW")" ]; then + printf " Since the profiles match, you can safely delete either of them.\n" 1>&2 + else + # This should be an exceptionally rare occasion: the only way to get it would be to + # 1. Update to newer Nix; + # 2. Remove .nix-profile; + # 3. Set the $NIX_LINK_NEW to something other than the default user profile; + # 4. Roll back to older Nix. + # If someone did all that, they can probably figure out how to migrate the profile. + printf "$warning Profiles do not match. You should manually migrate from %s to %s.\n" "$NIX_LINK" "$NIX_LINK_NEW" 1>&2 + fi fi + NIX_LINK="$NIX_LINK_NEW" fi - NIX_LINK="$NIX_LINK_NEW" fi # Set up environment. diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 63f6c1bdd0db..b09ae3d09ebc 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -134,7 +134,7 @@ NixRepl::NixRepl(const LookupPath & lookupPath, nix::ref store, refstaticBaseEnv.get())) , runNixPtr{runNix} - , interacter(make_unique(getDataDir() + "/nix/repl-history")) + , interacter(make_unique(getDataDir() + "/repl-history")) { } diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index 9019edc1f4d4..c407cc89a754 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -69,7 +69,7 @@ struct AttrDb { auto state(_state->lock()); - Path cacheDir = getCacheDir() + "/nix/eval-cache-v5"; + Path cacheDir = getCacheDir() + "/eval-cache-v5"; createDirs(cacheDir); Path dbPath = cacheDir + "/" + fingerprint.to_string(HashFormat::Base16, false) + ".sqlite"; diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 2846eccbce15..4cbcb39b9e0b 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -99,7 +99,7 @@ const std::string & EvalSettings::getCurrentSystem() const Path getNixDefExpr() { return settings.useXDGBaseDirectories - ? getStateDir() + "/nix/defexpr" + ? getStateDir() + "/defexpr" : getHome() + "/.nix-defexpr"; } diff --git a/src/libfetchers/cache.cc b/src/libfetchers/cache.cc index 7019b0325d7f..b0b6cb8873df 100644 --- a/src/libfetchers/cache.cc +++ b/src/libfetchers/cache.cc @@ -36,7 +36,7 @@ struct CacheImpl : Cache { auto state(_state.lock()); - auto dbPath = getCacheDir() + "/nix/fetcher-cache-v2.sqlite"; + auto dbPath = getCacheDir() + "/fetcher-cache-v2.sqlite"; createDirs(dirOf(dbPath)); state->db = SQLite(dbPath); diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 114aa4ec078b..31c42008fa4a 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -1083,7 +1083,7 @@ std::vector> GitRepoImpl::getSubmodules ref getTarballCache() { - static auto repoDir = std::filesystem::path(getCacheDir()) / "nix" / "tarball-cache"; + static auto repoDir = std::filesystem::path(getCacheDir()) / "tarball-cache"; return GitRepo::openRepo(repoDir, true, true); } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 076c757c5f52..13682231d90c 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -44,7 +44,7 @@ bool isCacheFileWithinTtl(time_t now, const struct stat & st) Path getCachePath(std::string_view key, bool shallow) { return getCacheDir() - + "/nix/gitv3/" + + "/gitv3/" + hashString(HashAlgorithm::SHA256, key).to_string(HashFormat::Nix32, false) + (shallow ? "-shallow" : ""); } diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index 3feb3cb19ba3..2c987f79d50a 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -263,7 +263,7 @@ struct MercurialInputScheme : InputScheme return makeResult(res->value, res->storePath); } - Path cacheDir = fmt("%s/nix/hg/%s", getCacheDir(), hashString(HashAlgorithm::SHA256, actualUrl).to_string(HashFormat::Nix32, false)); + Path cacheDir = fmt("%s/hg/%s", getCacheDir(), hashString(HashAlgorithm::SHA256, actualUrl).to_string(HashFormat::Nix32, false)); /* If this is a commit hash that we already have, we don't have to pull again. */ diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index 3c893c8ea33b..7f7a09053848 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -116,7 +116,7 @@ static std::shared_ptr getSystemRegistry(const Settings & settings) Path getUserRegistryPath() { - return getConfigDir() + "/nix/registry.json"; + return getConfigDir() + "/registry.json"; } std::shared_ptr getUserRegistry(const Settings & settings) @@ -159,7 +159,7 @@ static std::shared_ptr getGlobalRegistry(const Settings & settings, re if (!hasPrefix(path, "/")) { auto storePath = downloadFile(store, path, "flake-registry.json").storePath; if (auto store2 = store.dynamic_pointer_cast()) - store2->addPermRoot(storePath, getCacheDir() + "/nix/flake-registry.json"); + store2->addPermRoot(storePath, getCacheDir() + "/flake-registry.json"); path = store->toRealPath(storePath); } diff --git a/src/libflake/flake/config.cc b/src/libflake/flake/config.cc index e526cdddfa30..4879de46330e 100644 --- a/src/libflake/flake/config.cc +++ b/src/libflake/flake/config.cc @@ -12,7 +12,7 @@ typedef std::map> TrustedList; Path trustedListPath() { - return getDataDir() + "/nix/trusted-settings.json"; + return getDataDir() + "/trusted-settings.json"; } static TrustedList readTrustedList() diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 52ab35b4ce5c..8958e6997520 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -135,7 +135,7 @@ std::vector getUserConfigFiles() std::vector files; auto dirs = getConfigDirs(); for (auto & dir : dirs) { - files.insert(files.end(), dir + "/nix/nix.conf"); + files.insert(files.end(), dir + "/nix.conf"); } return files; } diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 83e63794e0df..80e8d34149d0 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -87,7 +87,7 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache Sync _state; - NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite") + NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/binary-cache-v6.sqlite") { auto state(_state.lock()); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index fc8d51c398fd..426a69ae292e 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -1315,7 +1315,7 @@ ref openStore(StoreReference && storeURI) /* If /nix doesn't exist, there is no daemon socket, and we're not root, then automatically set up a chroot store in ~/.local/share/nix/root. */ - auto chrootStore = getDataDir() + "/nix/root"; + auto chrootStore = getDataDir() + "/root"; if (!pathExists(chrootStore)) { try { createDirs(chrootStore); diff --git a/src/libutil/users.cc b/src/libutil/users.cc index d546e364f508..b4bc67cbcf2a 100644 --- a/src/libutil/users.cc +++ b/src/libutil/users.cc @@ -7,15 +7,33 @@ namespace nix { Path getCacheDir() { - auto cacheDir = getEnv("XDG_CACHE_HOME"); - return cacheDir ? *cacheDir : getHome() + "/.cache"; + auto dir = getEnv("NIX_CACHE_HOME"); + if (dir) { + return *dir; + } else { + auto xdgDir = getEnv("XDG_CACHE_HOME"); + if (xdgDir) { + return *xdgDir + "/nix"; + } else { + return getHome() + "/.cache/nix"; + } + } } Path getConfigDir() { - auto configDir = getEnv("XDG_CONFIG_HOME"); - return configDir ? *configDir : getHome() + "/.config"; + auto dir = getEnv("NIX_CONFIG_HOME"); + if (dir) { + return *dir; + } else { + auto xdgDir = getEnv("XDG_CONFIG_HOME"); + if (xdgDir) { + return *xdgDir + "/nix"; + } else { + return getHome() + "/.config/nix"; + } + } } std::vector getConfigDirs() @@ -23,6 +41,9 @@ std::vector getConfigDirs() Path configHome = getConfigDir(); auto configDirs = getEnv("XDG_CONFIG_DIRS").value_or("/etc/xdg"); std::vector result = tokenizeString>(configDirs, ":"); + for (auto& p : result) { + p += "/nix"; + } result.insert(result.begin(), configHome); return result; } @@ -30,19 +51,37 @@ std::vector getConfigDirs() Path getDataDir() { - auto dataDir = getEnv("XDG_DATA_HOME"); - return dataDir ? *dataDir : getHome() + "/.local/share"; + auto dir = getEnv("NIX_DATA_HOME"); + if (dir) { + return *dir; + } else { + auto xdgDir = getEnv("XDG_DATA_HOME"); + if (xdgDir) { + return *xdgDir + "/nix"; + } else { + return getHome() + "/.local/share/nix"; + } + } } Path getStateDir() { - auto stateDir = getEnv("XDG_STATE_HOME"); - return stateDir ? *stateDir : getHome() + "/.local/state"; + auto dir = getEnv("NIX_STATE_HOME"); + if (dir) { + return *dir; + } else { + auto xdgDir = getEnv("XDG_STATE_HOME"); + if (xdgDir) { + return *xdgDir + "/nix"; + } else { + return getHome() + "/.local/state/nix"; + } + } } Path createNixStateDir() { - Path dir = getStateDir() + "/nix"; + Path dir = getStateDir(); createDirs(dir); return dir; } diff --git a/src/libutil/users.hh b/src/libutil/users.hh index 153cc73fdae4..d22c3311d99d 100644 --- a/src/libutil/users.hh +++ b/src/libutil/users.hh @@ -24,12 +24,12 @@ Path getHomeOf(uid_t userId); Path getHome(); /** - * @return $XDG_CACHE_HOME or $HOME/.cache. + * @return $NIX_CACHE_HOME or $XDG_CACHE_HOME/nix or $HOME/.cache/nix. */ Path getCacheDir(); /** - * @return $XDG_CONFIG_HOME or $HOME/.config. + * @return $NIX_CONFIG_HOME or $XDG_CONFIG_HOME/nix or $HOME/.config/nix. */ Path getConfigDir(); @@ -39,12 +39,12 @@ Path getConfigDir(); std::vector getConfigDirs(); /** - * @return $XDG_DATA_HOME or $HOME/.local/share. + * @return $NIX_DATA_HOME or $XDG_DATA_HOME/nix or $HOME/.local/share/nix. */ Path getDataDir(); /** - * @return $XDG_STATE_HOME or $HOME/.local/state. + * @return $NIX_STATE_HOME or $XDG_STATE_HOME/nix or $HOME/.local/state/nix. */ Path getStateDir(); From 51a01aa6c5524c6aad571d324a0f9baa2bbb9f51 Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Wed, 11 Sep 2024 08:55:57 -0500 Subject: [PATCH 62/77] sequoia-nixbld-user-migration: halt on error Addressing user feedback about a case where actions the script takes may fail without a specific permission if run over SSH. --- scripts/sequoia-nixbld-user-migration.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/sequoia-nixbld-user-migration.sh b/scripts/sequoia-nixbld-user-migration.sh index 12c736d8e765..88e8017065af 100755 --- a/scripts/sequoia-nixbld-user-migration.sh +++ b/scripts/sequoia-nixbld-user-migration.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -eo pipefail + ((NEW_NIX_FIRST_BUILD_UID=351)) ((TEMP_NIX_FIRST_BUILD_UID=31000)) From 3fbd71701a39dea79dea576ba90fe42925d85459 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Sep 2024 17:27:39 +0200 Subject: [PATCH 63/77] Add test --- tests/nixos/github-flakes.nix | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index 221045009ee9..8e646f6dda23 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -181,8 +181,14 @@ in print(out) info = json.loads(out) assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}" + assert info["fingerprint"] cat_log() + # Fetching with the resolved URL should produce the same result. + info2 = json.loads(client.succeed(f"nix flake metadata {info['url']} --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")) + print(info["fingerprint"], info2["fingerprint"]) + assert info["fingerprint"] == info2["fingerprint"], "fingerprint mismatch" + client.succeed("nix registry pin nixpkgs") client.succeed("nix flake metadata nixpkgs --tarball-ttl 0 >&2") From e557096cefad10ccef86dc674fc5e053c13716e6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Sep 2024 17:31:53 +0200 Subject: [PATCH 64/77] Add release note --- doc/manual/rl-next/no-flake-substitution.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/manual/rl-next/no-flake-substitution.md diff --git a/doc/manual/rl-next/no-flake-substitution.md b/doc/manual/rl-next/no-flake-substitution.md new file mode 100644 index 000000000000..67ec5875099a --- /dev/null +++ b/doc/manual/rl-next/no-flake-substitution.md @@ -0,0 +1,8 @@ +--- +synopsis: Flakes are no longer substituted +prs: [10612] +--- + +Nix will no longer attempt to substitute the source code of flakes from a binary cache. This functionality was broken because it could lead to different evaluation results depending on whether the flake was available in the binary cache, or even depending on whether the flake was already in the local store. + +Author: [**@edolstra**](https://github.com/edolstra) From 193dc490971b0435c7de7565b86110a59d515ff2 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 11 Sep 2024 11:59:11 -0400 Subject: [PATCH 65/77] tweak unpack channel built-in, std::filesystem::path for tarball --- src/libstore/builtins/unpack-channel.cc | 36 ++++++++++++++----------- src/libutil/tarfile.cc | 22 ++++++++------- src/libutil/tarfile.hh | 6 ++--- 3 files changed, 37 insertions(+), 27 deletions(-) diff --git a/src/libstore/builtins/unpack-channel.cc b/src/libstore/builtins/unpack-channel.cc index 7f9a520eed36..d30626a309b9 100644 --- a/src/libstore/builtins/unpack-channel.cc +++ b/src/libstore/builtins/unpack-channel.cc @@ -3,46 +3,52 @@ namespace nix { +namespace fs { using namespace std::filesystem; } + void builtinUnpackChannel( const BasicDerivation & drv, const std::map & outputs) { - auto getAttr = [&](const std::string & name) { + auto getAttr = [&](const std::string & name) -> const std::string & { auto i = drv.env.find(name); if (i == drv.env.end()) throw Error("attribute '%s' missing", name); return i->second; }; - std::filesystem::path out(outputs.at("out")); - std::filesystem::path channelName(getAttr("channelName")); - auto src = getAttr("src"); + fs::path out{outputs.at("out")}; + auto & channelName = getAttr("channelName"); + auto & src = getAttr("src"); - if (channelName.filename() != channelName) { + if (fs::path{channelName}.filename().string() != channelName) { throw Error("channelName is not allowed to contain filesystem seperators, got %1%", channelName); } - createDirs(out); + try { + fs::create_directories(out); + } catch (fs::filesystem_error &) { + throw SysError("creating directory '%1%'", out.string()); + } unpackTarfile(src, out); size_t fileCount; std::string fileName; try { - auto entries = std::filesystem::directory_iterator{out}; + auto entries = fs::directory_iterator{out}; fileName = entries->path().string(); - fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries)); - } catch (std::filesystem::filesystem_error &e) { - throw SysError("failed to read directory %1%", out); + fileCount = std::distance(fs::begin(entries), fs::end(entries)); + } catch (fs::filesystem_error &) { + throw SysError("failed to read directory %1%", out.string()); } - if (fileCount != 1) throw Error("channel tarball '%s' contains more than one file", src); - std::filesystem::path target(out / channelName); + + auto target = out / channelName; try { - std::filesystem::rename(fileName, target); - } catch (std::filesystem::filesystem_error &e) { - throw SysError("failed to rename %1% to %2%", fileName, target); + fs::rename(fileName, target); + } catch (fs::filesystem_error &) { + throw SysError("failed to rename %1% to %2%", fileName, target.string()); } } diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc index 2e323629512d..a8a22d283f8a 100644 --- a/src/libutil/tarfile.cc +++ b/src/libutil/tarfile.cc @@ -8,6 +8,10 @@ namespace nix { +namespace fs { +using namespace std::filesystem; +} + namespace { int callback_open(struct archive *, void * self) @@ -102,14 +106,14 @@ TarArchive::TarArchive(Source & source, bool raw, std::optional com "Failed to open archive (%s)"); } -TarArchive::TarArchive(const Path & path) +TarArchive::TarArchive(const fs::path & path) : archive{archive_read_new()} , buffer(defaultBufferSize) { archive_read_support_filter_all(archive); enableSupportedFormats(archive); archive_read_set_option(archive, NULL, "mac-ext", NULL); - check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s"); + check(archive_read_open_filename(archive, path.string().c_str(), 16384), "failed to open archive: %s"); } void TarArchive::close() @@ -123,7 +127,7 @@ TarArchive::~TarArchive() archive_read_free(this->archive); } -static void extract_archive(TarArchive & archive, const Path & destDir) +static void extract_archive(TarArchive & archive, const fs::path & destDir) { int flags = ARCHIVE_EXTRACT_TIME | ARCHIVE_EXTRACT_SECURE_SYMLINKS | ARCHIVE_EXTRACT_SECURE_NODOTDOT; @@ -140,7 +144,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir) else archive.check(r); - archive_entry_copy_pathname(entry, (destDir + "/" + name).c_str()); + archive_entry_copy_pathname(entry, (destDir / name).string().c_str()); // sources can and do contain dirs with no rx bits if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500) @@ -149,7 +153,7 @@ static void extract_archive(TarArchive & archive, const Path & destDir) // Patch hardlink path const char * original_hardlink = archive_entry_hardlink(entry); if (original_hardlink) { - archive_entry_copy_hardlink(entry, (destDir + "/" + original_hardlink).c_str()); + archive_entry_copy_hardlink(entry, (destDir / original_hardlink).string().c_str()); } archive.check(archive_read_extract(archive.archive, entry, flags)); @@ -158,19 +162,19 @@ static void extract_archive(TarArchive & archive, const Path & destDir) archive.close(); } -void unpackTarfile(Source & source, const Path & destDir) +void unpackTarfile(Source & source, const fs::path & destDir) { auto archive = TarArchive(source); - createDirs(destDir); + fs::create_directories(destDir); extract_archive(archive, destDir); } -void unpackTarfile(const Path & tarFile, const Path & destDir) +void unpackTarfile(const fs::path & tarFile, const fs::path & destDir) { auto archive = TarArchive(tarFile); - createDirs(destDir); + fs::create_directories(destDir); extract_archive(archive, destDir); } diff --git a/src/libutil/tarfile.hh b/src/libutil/tarfile.hh index 0517177dbe66..5e29c6bbac3e 100644 --- a/src/libutil/tarfile.hh +++ b/src/libutil/tarfile.hh @@ -15,7 +15,7 @@ struct TarArchive void check(int err, const std::string & reason = "failed to extract archive (%s)"); - explicit TarArchive(const Path & path); + explicit TarArchive(const std::filesystem::path & path); /// @brief Create a generic archive from source. /// @param source - Input byte stream. @@ -37,9 +37,9 @@ struct TarArchive int getArchiveFilterCodeByName(const std::string & method); -void unpackTarfile(Source & source, const Path & destDir); +void unpackTarfile(Source & source, const std::filesystem::path & destDir); -void unpackTarfile(const Path & tarFile, const Path & destDir); +void unpackTarfile(const std::filesystem::path & tarFile, const std::filesystem::path & destDir); time_t unpackTarfileToSink(TarArchive & archive, ExtendedFileSystemObjectSink & parseSink); From 48477d4a3e7130c89b2ded4496c00ef74601091f Mon Sep 17 00:00:00 2001 From: "Travis A. Everett" Date: Wed, 11 Sep 2024 12:50:47 -0500 Subject: [PATCH 66/77] doc: add admonitions for macOS 15 Sequoia update (#11487) The impending release of macOS 15 Sequoia will break many existing nix installs on macOS, which may lead to an increased number of people who are looking to try to reinstall Nix without noticing the open/pinned issue (#10892) that explains the problem and outlines how to migrate existing installs. These admonitions are a short-term measure until we are over the hump and support volumes dwindle. --- doc/manual/src/installation/index.md | 8 ++++++++ doc/manual/src/installation/installing-binary.md | 8 ++++++++ doc/manual/src/installation/uninstall.md | 8 ++++++++ 3 files changed, 24 insertions(+) diff --git a/doc/manual/src/installation/index.md b/doc/manual/src/installation/index.md index dafdeb667e52..16a7f485a1d3 100644 --- a/doc/manual/src/installation/index.md +++ b/doc/manual/src/installation/index.md @@ -14,6 +14,14 @@ This option requires either: * Linux running systemd, with SELinux disabled * MacOS +> **Updating to macOS 15 Sequoia** +> +> If you recently updated to macOS 15 Sequoia and are getting +> ```console +> error: the user '_nixbld1' in the group 'nixbld' does not exist +> ``` +> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. + ```console $ bash <(curl -L https://nixos.org/nix/install) --daemon ``` diff --git a/doc/manual/src/installation/installing-binary.md b/doc/manual/src/installation/installing-binary.md index 6a168ff3dfd7..6a1a5ddcaff2 100644 --- a/doc/manual/src/installation/installing-binary.md +++ b/doc/manual/src/installation/installing-binary.md @@ -1,5 +1,13 @@ # Installing a Binary Distribution +> **Updating to macOS 15 Sequoia** +> +> If you recently updated to macOS 15 Sequoia and are getting +> ```console +> error: the user '_nixbld1' in the group 'nixbld' does not exist +> ``` +> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. + To install the latest version Nix, run the following command: ```console diff --git a/doc/manual/src/installation/uninstall.md b/doc/manual/src/installation/uninstall.md index bb21af24e32e..429fcae469a6 100644 --- a/doc/manual/src/installation/uninstall.md +++ b/doc/manual/src/installation/uninstall.md @@ -43,6 +43,14 @@ which you may remove. ### macOS +> **Updating to macOS 15 Sequoia** +> +> If you recently updated to macOS 15 Sequoia and are getting +> ```console +> error: the user '_nixbld1' in the group 'nixbld' does not exist +> ``` +> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. + 1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer: ```console From 30aa45a37313f54006ad88b9f42f11d8c175b1db Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Sep 2024 20:35:04 +0200 Subject: [PATCH 67/77] Formatting --- src/libflake/flake/lockfile.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libflake/flake/lockfile.cc b/src/libflake/flake/lockfile.cc index 80f14ff6fa3b..70b60716f622 100644 --- a/src/libflake/flake/lockfile.cc +++ b/src/libflake/flake/lockfile.cc @@ -54,12 +54,13 @@ StorePath LockedNode::computeStorePath(Store & store) const } -static std::shared_ptr doFind(const ref& root, const InputPath & path, std::vector& visited) { +static std::shared_ptr doFind(const ref & root, const InputPath & path, std::vector & visited) +{ auto pos = root; auto found = std::find(visited.cbegin(), visited.cend(), path); - if(found != visited.end()) { + if (found != visited.end()) { std::vector cycle; std::transform(found, visited.cend(), std::back_inserter(cycle), printInputPath); cycle.push_back(printInputPath(path)); From 12fd65d1799f6b1fe0c7e07d5a8022afc6b6dc40 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 11 Sep 2024 21:58:32 +0200 Subject: [PATCH 68/77] Disable subflakes test Relative path flakes ("subflakes") are basically fundamentally broken, since they produce lock file entries like "locked": { "lastModified": 1, "narHash": "sha256-/2tW9SKjQbRLzfcJs5SHijli6l3+iPr1235zylGynK8=", "path": "./flakeC", "type": "path" }, that don't specify what "./flakeC" is relative to. They *sometimes* worked by accident because the `narHash` field allowed `fetchToStore()` to get the store path of the subflake *if* it happened to exist in the local store or in a substituter. Subflakes are properly fixed in #10089 (which adds a "parent" field to the lock file). Rather than come up with some crazy hack to make them work in the interim, let's just disable the only test that depends on the broken behaviour for now. --- tests/functional/flakes/follow-paths.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/functional/flakes/follow-paths.sh b/tests/functional/flakes/follow-paths.sh index ea56b950389b..d908a03c4fb1 100755 --- a/tests/functional/flakes/follow-paths.sh +++ b/tests/functional/flakes/follow-paths.sh @@ -2,6 +2,9 @@ source ./common.sh +# FIXME: this test is disabled because relative path flakes are broken. Re-enable this in #10089. +exit 0 + requireGit flakeFollowsA=$TEST_ROOT/follows/flakeA From 421aa1add1cbae1fd51b8d9efa4ed47dce7a06ac Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 12 Sep 2024 14:58:33 +0200 Subject: [PATCH 69/77] Add tests for invalid file names in NARs Note: in general, we rely on the OS to tell us if a name is invalid or if two names normalize in the same way. But for security, we do want to make sure that we catch '.', '..', slashes and NUL characters. (NUL characters aren't really a security issue, but since they would be truncated when we pass them to the OS, it would be canonicity problem.) --- tests/functional/dot.nar | Bin 0 -> 288 bytes tests/functional/dotdot.nar | Bin 0 -> 288 bytes tests/functional/empty.nar | Bin 0 -> 280 bytes tests/functional/nars.sh | 20 ++++++++++++++++++++ tests/functional/nul.nar | Bin 0 -> 288 bytes tests/functional/slash.nar | Bin 0 -> 288 bytes 6 files changed, 20 insertions(+) create mode 100644 tests/functional/dot.nar create mode 100644 tests/functional/dotdot.nar create mode 100644 tests/functional/empty.nar create mode 100644 tests/functional/nul.nar create mode 100644 tests/functional/slash.nar diff --git a/tests/functional/dot.nar b/tests/functional/dot.nar new file mode 100644 index 0000000000000000000000000000000000000000..3a9452f67fd7dc8b8c9328c767337c5c51b006c4 GIT binary patch literal 288 zcmd;OfPlQr3f;t_5|HVR1lLB%1_BGN=+`wFRFy{S)p`l zUI|zXmpOTfxnOf(@_JBxFnjXyQ&8k_xq}_5uP8OWG$*l$fdk4<&d)0Wx}lg2%Fjs6 R$;szJ_)8Ni4znK@9{^kc8+8Bx literal 0 HcmV?d00001 diff --git a/tests/functional/dotdot.nar b/tests/functional/dotdot.nar new file mode 100644 index 0000000000000000000000000000000000000000..f8d019c3926a8285fd3258798a8f88efc65df48d GIT binary patch literal 288 zcmd;OfPlQr3f;t_5|HVR1lLB%1_BGN=+`wFRFy{S)p`l zUI|zXmpOTfxnOgcpz8JXAPks2dHE?|d0hHo?qG-NFG@`>%}Fd`;DGXz^Yco8ZYXAh V@-tF%a`L$l{?des!_3FU2LN}%8>9dL literal 0 HcmV?d00001 diff --git a/tests/functional/empty.nar b/tests/functional/empty.nar new file mode 100644 index 0000000000000000000000000000000000000000..43434f2b4404161a74e8a90c0b5c8e3a11194fdf GIT binary patch literal 280 zcmd;OfPlQr3f;t_5|HVR1lLB%1_BGN=+`wFRFy{S)p`l zUI|zXmpOTfxnOgk${Aqh=jEq>#c}C_+0PEuSd^Mxnv+<>zyaka=jW9G?Jj16@-tF% Ra`L$l{?UYr!_3FU2LQ_<8%6*C literal 0 HcmV?d00001 diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index 9f5f43dc6353..68b9b45d946b 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -92,3 +92,23 @@ else [[ -e $TEST_ROOT/out/â ]] [[ -e $TEST_ROOT/out/â ]] fi + +# Unpacking a NAR with a NUL character in a file name should fail. +rm -rf "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < nul.nar | grepQuiet "NAR contains invalid file name 'f" + +# Likewise for a '.' filename. +rm -rf "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dot.nar | grepQuiet "NAR contains invalid file name '.'" + +# Likewise for a '..' filename. +rm -rf "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dotdot.nar | grepQuiet "NAR contains invalid file name '..'" + +# Likewise for a filename containing a slash. +rm -rf "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < slash.nar | grepQuiet "NAR contains invalid file name 'x/y'" + +# Likewise for an empty filename. +rm -rf "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < empty.nar | grepQuiet "NAR contains invalid file name ''" diff --git a/tests/functional/nul.nar b/tests/functional/nul.nar new file mode 100644 index 0000000000000000000000000000000000000000..9ae48baf6fa658433e37afdb6cae18b2d056a06c GIT binary patch literal 288 zcmd;OfPlQr3f;t_5|HVR1lLB%1_BGN=+`wFRFy{S)p`l zUI|zXmpOTfxnOgcq3Y8Z^1&>aJ$d;lV0m2nVeVju>Mu%7FU?6TV&H)Clk@XRfNm&e WgYq*{b8_;z5dPAHio?vu#RmYtiX0vQ literal 0 HcmV?d00001 diff --git a/tests/functional/slash.nar b/tests/functional/slash.nar new file mode 100644 index 0000000000000000000000000000000000000000..118a60216cc12d33df202aec4c5dd8fbfbcfce75 GIT binary patch literal 288 zcmd;OfPlQr3f;t_5|HVR1lLB%1_BGN=+`wFRFy{S)p`l zUI|zXmpOTfxnOgcq3SF2Atu1=$;(dx%j41ya|b(Ae^F|BX-;Ah0|%6!oS#<$bVD&4 Vl%J8BlatSd@Ruf39A-W)J^<^_9BBXm literal 0 HcmV?d00001 From 4de9587e50644b59ca7fcf0e1ad940d0dddcb89c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 12 Sep 2024 15:27:55 +0200 Subject: [PATCH 70/77] Improve badArchive() --- src/libutil/archive.cc | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index e26b7eb935b9..d9a3afd415a8 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -128,9 +128,10 @@ void dumpString(std::string_view s, Sink & sink) } -static SerialisationError badArchive(const std::string & s) +template +static SerialisationError badArchive(std::string_view s, const Args & ... args) { - return SerialisationError("bad archive: " + s); + return SerialisationError("bad archive: " + s, args...); } @@ -223,7 +224,7 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath std::string name; s = getString(); - if (s != "(") throw badArchive("expected open tag"); + if (s != "(") throw badArchive("expected open tag '%s'", s); while (1) { s = getString(); @@ -233,9 +234,9 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath } else if (s == "name") { name = getString(); if (name.empty() || name == "." || name == ".." || name.find('/') != std::string::npos || name.find((char) 0) != std::string::npos) - throw Error("NAR contains invalid file name '%1%'", name); + throw badArchive("NAR contains invalid file name '%1%'", name); if (name <= prevName) - throw Error("NAR directory is not sorted"); + throw badArchive("NAR directory is not sorted"); prevName = name; if (archiveSettings.useCaseHack) { auto i = names.find(name); @@ -245,7 +246,7 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath name += std::to_string(++i->second); auto j = names.find(name); if (j != names.end()) - throw Error("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first); + throw badArchive("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first); } else names[name] = 0; } @@ -253,7 +254,7 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath if (name.empty()) throw badArchive("entry name missing"); parse(sink, source, path / name); } else - throw badArchive("unknown field " + s); + throw badArchive("unknown field '%s'", s); } } @@ -265,7 +266,7 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath s = getString(); if (s != "target") - throw badArchive("expected 'target' got " + s); + throw badArchive("expected 'target', got '%s'", s); std::string target = getString(); sink.createSymlink(path, target); @@ -274,12 +275,12 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath s = getString(); } - else throw badArchive("unknown file type " + t); + else throw badArchive("unknown file type '%s'", t); } else - throw badArchive("unknown field " + s); + throw badArchive("unknown field '%s'", s); } } From 69bf9947c78ea80b761dae81053ad50ad7ad8532 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 12 Sep 2024 15:29:54 +0200 Subject: [PATCH 71/77] Put 'names' in the right scope --- src/libutil/archive.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index d9a3afd415a8..9ddec44cd168 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -173,8 +173,6 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath s = readString(source); if (s != "(") throw badArchive("expected open tag"); - std::map names; - auto getString = [&]() { checkInterrupt(); return readString(source); @@ -215,6 +213,8 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath else if (t == "directory") { sink.createDirectory(path); + std::map names; + std::string prevName; while (1) { From 27ec0def740208f6884a1aad862f4e2e181ca0a9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 12 Sep 2024 15:33:55 +0200 Subject: [PATCH 72/77] Typo --- src/libutil/archive.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 9ddec44cd168..c9362d6cb516 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -82,7 +82,7 @@ void SourceAccessor::dumpPath( name.erase(pos); } if (!unhacked.emplace(name, i.first).second) - throw Error("file name collision in between '%s' and '%s'", + throw Error("file name collision between '%s' and '%s'", (path / unhacked[name]), (path / i.first)); } else From 7aa3e7e3a5281acf350eff0fe039656cd4986e2c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 12 Sep 2024 15:57:46 +0200 Subject: [PATCH 73/77] Make the NAR parser much stricter wrt field order We really want to enforce a canonical representation since NAR hashing/signing/deduplication depends on that. --- src/libutil/archive.cc | 157 ++++++++---------- .../functional/executable-after-contents.nar | Bin 0 -> 320 bytes tests/functional/name-after-node.nar | Bin 0 -> 288 bytes tests/functional/nars.sh | 8 + 4 files changed, 75 insertions(+), 90 deletions(-) create mode 100644 tests/functional/executable-after-contents.nar create mode 100644 tests/functional/name-after-node.nar diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index c9362d6cb516..20d8a1e09bec 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -168,120 +168,97 @@ struct CaseInsensitiveCompare static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath & path) { - std::string s; - - s = readString(source); - if (s != "(") throw badArchive("expected open tag"); - auto getString = [&]() { checkInterrupt(); return readString(source); }; - // For first iteration - s = getString(); + auto expectTag = [&](std::string_view expected) { + auto tag = getString(); + if (tag != expected) + throw badArchive("expected tag '%s', got '%s'", expected, tag); + }; - while (1) { + expectTag("("); - if (s == ")") { - break; - } + expectTag("type"); - else if (s == "type") { - std::string t = getString(); + auto type = getString(); - if (t == "regular") { - sink.createRegularFile(path, [&](auto & crf) { - while (1) { - s = getString(); + if (type == "regular") { + sink.createRegularFile(path, [&](auto & crf) { + auto tag = getString(); - if (s == "contents") { - parseContents(crf, source); - } + if (tag == "executable") { + auto s2 = getString(); + if (s2 != "") throw badArchive("executable marker has non-empty value"); + crf.isExecutable(); + tag = getString(); + } - else if (s == "executable") { - auto s2 = getString(); - if (s2 != "") throw badArchive("executable marker has non-empty value"); - crf.isExecutable(); - } + if (tag == "contents") + parseContents(crf, source); - else break; - } - }); - } + expectTag(")"); + }); + } - else if (t == "directory") { - sink.createDirectory(path); - - std::map names; - - std::string prevName; - - while (1) { - s = getString(); - - if (s == "entry") { - std::string name; - - s = getString(); - if (s != "(") throw badArchive("expected open tag '%s'", s); - - while (1) { - s = getString(); - - if (s == ")") { - break; - } else if (s == "name") { - name = getString(); - if (name.empty() || name == "." || name == ".." || name.find('/') != std::string::npos || name.find((char) 0) != std::string::npos) - throw badArchive("NAR contains invalid file name '%1%'", name); - if (name <= prevName) - throw badArchive("NAR directory is not sorted"); - prevName = name; - if (archiveSettings.useCaseHack) { - auto i = names.find(name); - if (i != names.end()) { - debug("case collision between '%1%' and '%2%'", i->first, name); - name += caseHackSuffix; - name += std::to_string(++i->second); - auto j = names.find(name); - if (j != names.end()) - throw badArchive("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first); - } else - names[name] = 0; - } - } else if (s == "node") { - if (name.empty()) throw badArchive("entry name missing"); - parse(sink, source, path / name); - } else - throw badArchive("unknown field '%s'", s); - } - } + else if (type == "directory") { + sink.createDirectory(path); - else break; - } - } + std::map names; + + std::string prevName; + + while (1) { + auto tag = getString(); - else if (t == "symlink") { - s = getString(); + if (tag == ")") break; - if (s != "target") - throw badArchive("expected 'target', got '%s'", s); + if (tag != "entry") + throw badArchive("expected tag 'entry' or ')', got '%s'", tag); - std::string target = getString(); - sink.createSymlink(path, target); + expectTag("("); - // for the next iteration - s = getString(); + expectTag("name"); + + auto name = getString(); + if (name.empty() || name == "." || name == ".." || name.find('/') != std::string::npos || name.find((char) 0) != std::string::npos) + throw badArchive("NAR contains invalid file name '%1%'", name); + if (name <= prevName) + throw badArchive("NAR directory is not sorted"); + prevName = name; + if (archiveSettings.useCaseHack) { + auto i = names.find(name); + if (i != names.end()) { + debug("case collision between '%1%' and '%2%'", i->first, name); + name += caseHackSuffix; + name += std::to_string(++i->second); + auto j = names.find(name); + if (j != names.end()) + throw badArchive("NAR contains file name '%s' that collides with case-hacked file name '%s'", prevName, j->first); + } else + names[name] = 0; } - else throw badArchive("unknown file type '%s'", t); + expectTag("node"); + parse(sink, source, path / name); + + expectTag(")"); } + } + + else if (type == "symlink") { + expectTag("target"); - else - throw badArchive("unknown field '%s'", s); + auto target = getString(); + sink.createSymlink(path, target); + + expectTag(")"); } + + else throw badArchive("unknown file type '%s'", type); } diff --git a/tests/functional/executable-after-contents.nar b/tests/functional/executable-after-contents.nar new file mode 100644 index 0000000000000000000000000000000000000000..f8c003480d786957a141aca605ae3e78819f61d9 GIT binary patch literal 320 zcmah@Ne;p=3=Co|690fh4?HQPhDHiD3NC7YPiiK|3d_=X#>@ERe!+2UeGYy6P5|HVR1lLB%1_BGN=+`wFRFy{S)p`l zUI|zXmpOU)DPVJO$;0enhniQEnqHcdSj4~qqH1W`puGQgd?hxe)Hwgo?x5 YotKykwvQPqo|c~vX2I--sYmAn07Q};jQ{`u literal 0 HcmV?d00001 diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index 68b9b45d946b..87fd7beec00f 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -112,3 +112,11 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < slash.nar | grepQuiet "NAR # Likewise for an empty filename. rm -rf "$TEST_ROOT/out" expectStderr 1 nix-store --restore "$TEST_ROOT/out" < empty.nar | grepQuiet "NAR contains invalid file name ''" + +# Test that the 'executable' field cannot come before the 'contents' field. +rm -rf "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < executable-after-contents.nar | grepQuiet "expected tag ')', got 'executable'" + +# Test that the 'name' field cannot come before the 'node' field in a directory entry. +rm -rf "$TEST_ROOT/out" +expectStderr 1 nix-store --restore "$TEST_ROOT/out" < name-after-node.nar | grepQuiet "expected tag 'name'" From 5737d31d4ea7afceaebde0a44fbf188bfe39783b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 12 Sep 2024 17:25:25 +0200 Subject: [PATCH 74/77] Test the case hack a bit more --- tests/functional/nars.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index 87fd7beec00f..28876e497c77 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -67,6 +67,12 @@ expectStderr 1 nix-store --restore "$TEST_ROOT/out" < "$TEST_ROOT/tmp.nar" | gre rm -rf "$TEST_ROOT/case" opts=("--option" "use-case-hack" "true") nix-store "${opts[@]}" --restore "$TEST_ROOT/case" < case.nar +[[ -e "$TEST_ROOT/case/xt_CONNMARK.h" ]] +[[ -e "$TEST_ROOT/case/xt_CONNmark.h~nix~case~hack~1" ]] +[[ -e "$TEST_ROOT/case/xt_connmark.h~nix~case~hack~2" ]] +[[ -e "$TEST_ROOT/case/x/FOO" ]] +[[ -d "$TEST_ROOT/case/x/Foo~nix~case~hack~1" ]] +[[ -e "$TEST_ROOT/case/x/foo~nix~case~hack~2/a~nix~case~hack~1/foo" ]] nix-store "${opts[@]}" --dump "$TEST_ROOT/case" > "$TEST_ROOT/case.nar" cmp case.nar "$TEST_ROOT/case.nar" [ "$(nix-hash "${opts[@]}" --type sha256 "$TEST_ROOT/case")" = "$(nix-hash --flat --type sha256 case.nar)" ] From 2226f9864ed1b529bb300bf444a57bdd6c3d22fa Mon Sep 17 00:00:00 2001 From: Bryan Honof Date: Thu, 12 Sep 2024 03:05:41 +0200 Subject: [PATCH 75/77] feat(run): inherit from MixEnvironment --- src/nix/run.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/nix/run.cc b/src/nix/run.cc index 63ae8a195795..9565635917aa 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -75,7 +75,7 @@ void execProgramInStore(ref store, } -struct CmdRun : InstallableValueCommand +struct CmdRun : InstallableValueCommand, MixEnvironment { using InstallableCommand::run; @@ -135,6 +135,8 @@ struct CmdRun : InstallableValueCommand // we are about to exec out of this process without running C++ destructors. state->evalCaches.clear(); + setEnviron(); + execProgramInStore(store, UseLookupPath::DontUse, app.program, allArgs); } }; From 976f539f7dd2ee6345b605def105883d16b32c60 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 28 Aug 2024 02:35:46 +0200 Subject: [PATCH 76/77] Make Repo::flush interruptible --- src/libfetchers/git-utils.cc | 115 ++++++++++++++++++++++++++--------- 1 file changed, 85 insertions(+), 30 deletions(-) diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 70391a2878bb..265eb35d2272 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -166,6 +166,45 @@ static Object peelToTreeOrBlob(git_object * obj) return peelObject(obj, GIT_OBJECT_TREE); } +struct PackBuilderContext { + std::exception_ptr exception; + + void handleException(const char * activity, int errCode) + { + switch (errCode) { + case GIT_OK: + break; + case GIT_EUSER: + if (!exception) + panic("PackBuilderContext::handleException: user error, but exception was not set"); + + std::rethrow_exception(exception); + default: + throw Error("%s: %i, %s", Uncolored(activity), errCode, git_error_last()->message); + } + } +}; + +extern "C" { + +/** + * A `git_packbuilder_progress` implementation that aborts the pack building if needed. + */ +static int packBuilderProgressCheckInterrupt(int stage, uint32_t current, uint32_t total, void *payload) +{ + PackBuilderContext & args = * (PackBuilderContext *) payload; + try { + checkInterrupt(); + return GIT_OK; + } catch (const std::exception & e) { + args.exception = std::current_exception(); + return GIT_EUSER; + } +}; +static git_packbuilder_progress PACKBUILDER_PROGRESS_CHECK_INTERRUPT = &packBuilderProgressCheckInterrupt; + +} // extern "C" + struct GitRepoImpl : GitRepo, std::enable_shared_from_this { /** Location of the repository on disk. */ @@ -213,42 +252,58 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this } void flush() override { + checkInterrupt(); + git_buf buf = GIT_BUF_INIT; - try { - PackBuilder packBuilder; - git_packbuilder_new(Setter(packBuilder), *this); - checkInterrupt(); - git_mempack_write_thin_pack(mempack_backend, packBuilder.get()); - checkInterrupt(); - // TODO make git_packbuilder_write_buf() interruptible - git_packbuilder_write_buf(&buf, packBuilder.get()); - checkInterrupt(); + Finally _disposeBuf { [&] { git_buf_dispose(&buf); } }; + PackBuilder packBuilder; + PackBuilderContext packBuilderContext; + git_packbuilder_new(Setter(packBuilder), *this); + git_packbuilder_set_callbacks(packBuilder.get(), PACKBUILDER_PROGRESS_CHECK_INTERRUPT, &packBuilderContext); + git_packbuilder_set_threads(packBuilder.get(), 0 /* autodetect */); + + packBuilderContext.handleException( + "preparing packfile", + git_mempack_write_thin_pack(mempack_backend, packBuilder.get()) + ); + checkInterrupt(); + packBuilderContext.handleException( + "writing packfile", + git_packbuilder_write_buf(&buf, packBuilder.get()) + ); + checkInterrupt(); - std::string repo_path = std::string(git_repository_path(repo.get())); - while (!repo_path.empty() && repo_path.back() == '/') - repo_path.pop_back(); - std::string pack_dir_path = repo_path + "/objects/pack"; - - // TODO: could the indexing be done in a separate thread? - Indexer indexer; - git_indexer_progress stats; - if (git_indexer_new(Setter(indexer), pack_dir_path.c_str(), 0, nullptr, nullptr)) - throw Error("creating git packfile indexer: %s", git_error_last()->message); - // TODO: feed buf in (fairly large) chunk to make this interruptible - if (git_indexer_append(indexer.get(), buf.ptr, buf.size, &stats)) + std::string repo_path = std::string(git_repository_path(repo.get())); + while (!repo_path.empty() && repo_path.back() == '/') + repo_path.pop_back(); + std::string pack_dir_path = repo_path + "/objects/pack"; + + // TODO (performance): could the indexing be done in a separate thread? + // we'd need a more streaming variation of + // git_packbuilder_write_buf, or incur the cost of + // copying parts of the buffer to a separate thread. + // (synchronously on the git_packbuilder_write_buf thread) + Indexer indexer; + git_indexer_progress stats; + if (git_indexer_new(Setter(indexer), pack_dir_path.c_str(), 0, nullptr, nullptr)) + throw Error("creating git packfile indexer: %s", git_error_last()->message); + + // TODO: provide index callback for checkInterrupt() termination + // though this is about an order of magnitude faster than the packbuilder + // expect up to 1 sec latency due to uninterruptible git_indexer_append. + constexpr size_t chunkSize = 128 * 1024; + for (size_t offset = 0; offset < buf.size; offset += chunkSize) { + if (git_indexer_append(indexer.get(), buf.ptr + offset, std::min(chunkSize, buf.size - offset), &stats)) throw Error("appending to git packfile index: %s", git_error_last()->message); checkInterrupt(); - if (git_indexer_commit(indexer.get(), &stats)) - throw Error("committing git packfile index: %s", git_error_last()->message); + } - if (git_mempack_reset(mempack_backend)) - throw Error("resetting git mempack backend: %s", git_error_last()->message); + if (git_indexer_commit(indexer.get(), &stats)) + throw Error("committing git packfile index: %s", git_error_last()->message); + + if (git_mempack_reset(mempack_backend)) + throw Error("resetting git mempack backend: %s", git_error_last()->message); - git_buf_dispose(&buf); - } catch (...) { - git_buf_dispose(&buf); - throw; - } checkInterrupt(); } From 459d02672c5ea9c6b909c0ace6b11141af79421e Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Wed, 28 Aug 2024 11:13:03 +0200 Subject: [PATCH 77/77] fix Windows build --- packaging/dependencies.nix | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 0182f29c0d05..e5f4c0f91807 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -116,20 +116,25 @@ scope: { cmakeFlags = attrs.cmakeFlags or [] ++ [ "-DUSE_SSH=exec" ]; nativeBuildInputs = attrs.nativeBuildInputs or [] - ++ [ + # gitMinimal does not build on Windows. See packbuilder patch. + ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ # Needed for `git apply`; see `prePatch` pkgs.buildPackages.gitMinimal ]; # Only `git apply` can handle git binary patches - prePatch = '' - patch() { - git apply - } - ''; + prePatch = attrs.prePatch or "" + + lib.optionalString (!stdenv.hostPlatform.isWindows) '' + patch() { + git apply + } + ''; patches = attrs.patches or [] ++ [ ./patches/libgit2-mempack-thin-packfile.patch - + ] + # gitMinimal does not build on Windows, but fortunately this patch only + # impacts interruptibility + ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ # binary patch; see `prePatch` ./patches/libgit2-packbuilder-callback-interruptible.patch ];