aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEelco Dolstra <edolstra@gmail.com>2023-02-13 13:02:19 +0100
committerGitHub <noreply@github.com>2023-02-13 13:02:19 +0100
commitc205d10c669137da90c176669838a6c6d9158939 (patch)
tree43221498c1da8329073b60f90ce4ef48075d9446 /src
parent2037f8a3ee55406e743d81b4d09fed24fe846082 (diff)
parent19b495a48a45ac4f04d9b362c0bbaa6fc122c404 (diff)
Merge pull request #7616 from hercules-ci/fix-3898
Fix foreign key error inserting into NARs #3898
Diffstat (limited to 'src')
-rw-r--r--src/libstore/http-binary-cache-store.cc2
-rw-r--r--src/libstore/nar-info-disk-cache.cc49
-rw-r--r--src/libstore/nar-info-disk-cache.hh7
-rw-r--r--src/libstore/s3-binary-cache-store.cc2
-rw-r--r--src/libstore/sqlite.cc14
-rw-r--r--src/libstore/tests/nar-info-disk-cache.cc123
6 files changed, 181 insertions, 16 deletions
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index 73bcd6e81..1479822a9 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -56,7 +56,7 @@ public:
void init() override
{
// FIXME: do this lazily?
- if (auto cacheInfo = diskCache->cacheExists(cacheUri)) {
+ if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) {
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
priority.setDefault(cacheInfo->priority);
} else {
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 3e0689534..2645f468b 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -84,11 +84,10 @@ public:
Sync<State> _state;
- NarInfoDiskCacheImpl()
+ NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite")
{
auto state(_state.lock());
- Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite";
createDirs(dirOf(dbPath));
state->db = SQLite(dbPath);
@@ -98,7 +97,7 @@ public:
state->db.exec(schema);
state->insertCache.create(state->db,
- "insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
+ "insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?1, ?2, ?3, ?4, ?5) on conflict (url) do update set timestamp = ?2, storeDir = ?3, wantMassQuery = ?4, priority = ?5 returning id;");
state->queryCache.create(state->db,
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
@@ -166,6 +165,8 @@ public:
return i->second;
}
+private:
+
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
{
auto i = state.caches.find(uri);
@@ -173,15 +174,21 @@ public:
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
if (!queryCache.next())
return std::nullopt;
- state.caches.emplace(uri,
- Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
+ auto cache = Cache {
+ .id = (int) queryCache.getInt(0),
+ .storeDir = queryCache.getStr(1),
+ .wantMassQuery = queryCache.getInt(2) != 0,
+ .priority = (int) queryCache.getInt(3),
+ };
+ state.caches.emplace(uri, cache);
}
return getCache(state, uri);
}
- void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
+public:
+ int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
- retrySQLite<void>([&]() {
+ return retrySQLite<int>([&]() {
auto state(_state.lock());
SQLiteTxn txn(state->db);
@@ -190,17 +197,29 @@ public:
auto cache(queryCacheRaw(*state, uri));
if (cache)
- return;
+ return cache->id;
- state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
- assert(sqlite3_changes(state->db) == 1);
- state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+ Cache ret {
+ .id = -1, // set below
+ .storeDir = storeDir,
+ .wantMassQuery = wantMassQuery,
+ .priority = priority,
+ };
+
+ {
+ auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
+ assert(r.next());
+ ret.id = (int) r.getInt(0);
+ }
+
+ state->caches[uri] = ret;
txn.commit();
+ return ret.id;
});
}
- std::optional<CacheInfo> cacheExists(const std::string & uri) override
+ std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) override
{
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
auto state(_state.lock());
@@ -208,6 +227,7 @@ public:
if (!cache)
return std::nullopt;
return CacheInfo {
+ .id = cache->id,
.wantMassQuery = cache->wantMassQuery,
.priority = cache->priority
};
@@ -371,4 +391,9 @@ ref<NarInfoDiskCache> getNarInfoDiskCache()
return cache;
}
+ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath)
+{
+ return make_ref<NarInfoDiskCacheImpl>(dbPath);
+}
+
}
diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh
index 2dcaa76a4..4877f56d8 100644
--- a/src/libstore/nar-info-disk-cache.hh
+++ b/src/libstore/nar-info-disk-cache.hh
@@ -13,16 +13,17 @@ public:
virtual ~NarInfoDiskCache() { }
- virtual void createCache(const std::string & uri, const Path & storeDir,
+ virtual int createCache(const std::string & uri, const Path & storeDir,
bool wantMassQuery, int priority) = 0;
struct CacheInfo
{
+ int id;
bool wantMassQuery;
int priority;
};
- virtual std::optional<CacheInfo> cacheExists(const std::string & uri) = 0;
+ virtual std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) = 0;
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
const std::string & uri, const std::string & hashPart) = 0;
@@ -45,4 +46,6 @@ public:
multiple threads. */
ref<NarInfoDiskCache> getNarInfoDiskCache();
+ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath);
+
}
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 844553ad3..8d76eee99 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -238,7 +238,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
void init() override
{
- if (auto cacheInfo = diskCache->cacheExists(getUri())) {
+ if (auto cacheInfo = diskCache->upToDateCacheExists(getUri())) {
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
priority.setDefault(cacheInfo->priority);
} else {
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 353dff9fa..871f2f3be 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -41,6 +41,15 @@ SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int ex
throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf));
}
+static void traceSQL(void * x, const char * sql)
+{
+ // wacky delimiters:
+ // so that we're quite unambiguous without escaping anything
+ // notice instead of trace:
+ // so that this can be enabled without getting the firehose in our face.
+ notice("SQL<[%1%]>", sql);
+};
+
SQLite::SQLite(const Path & path, bool create)
{
// useSQLiteWAL also indicates what virtual file system we need. Using
@@ -58,6 +67,11 @@ SQLite::SQLite(const Path & path, bool create)
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
SQLiteError::throw_(db, "setting timeout");
+ if (getEnv("NIX_DEBUG_SQLITE_TRACES") == "1") {
+ // To debug sqlite statements; trace all of them
+ sqlite3_trace(db, &traceSQL, nullptr);
+ }
+
exec("pragma foreign_keys = 1");
}
diff --git a/src/libstore/tests/nar-info-disk-cache.cc b/src/libstore/tests/nar-info-disk-cache.cc
new file mode 100644
index 000000000..b4bdb8329
--- /dev/null
+++ b/src/libstore/tests/nar-info-disk-cache.cc
@@ -0,0 +1,123 @@
+#include "nar-info-disk-cache.hh"
+
+#include <gtest/gtest.h>
+#include <rapidcheck/gtest.h>
+#include "sqlite.hh"
+#include <sqlite3.h>
+
+
+namespace nix {
+
+TEST(NarInfoDiskCacheImpl, create_and_read) {
+ // This is a large single test to avoid some setup overhead.
+
+ int prio = 12345;
+ bool wantMassQuery = true;
+
+ Path tmpDir = createTempDir();
+ AutoDelete delTmpDir(tmpDir);
+ Path dbPath(tmpDir + "/test-narinfo-disk-cache.sqlite");
+
+ int savedId;
+ int barId;
+ SQLite db;
+ SQLiteStmt getIds;
+
+ {
+ auto cache = getTestNarInfoDiskCache(dbPath);
+
+ // Set up "background noise" and check that different caches receive different ids
+ {
+ auto bc1 = cache->createCache("https://bar", "/nix/storedir", wantMassQuery, prio);
+ auto bc2 = cache->createCache("https://xyz", "/nix/storedir", false, 12);
+ ASSERT_NE(bc1, bc2);
+ barId = bc1;
+ }
+
+ // Check that the fields are saved and returned correctly. This does not test
+ // the select statement yet, because of in-memory caching.
+ savedId = cache->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);;
+ {
+ auto r = cache->upToDateCacheExists("http://foo");
+ ASSERT_TRUE(r);
+ ASSERT_EQ(r->priority, prio);
+ ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ ASSERT_EQ(savedId, r->id);
+ }
+
+ // We're going to pay special attention to the id field because we had a bug
+ // that changed it.
+ db = SQLite(dbPath);
+ getIds.create(db, "select id from BinaryCaches where url = 'http://foo'");
+
+ {
+ auto q(getIds.use());
+ ASSERT_TRUE(q.next());
+ ASSERT_EQ(savedId, q.getInt(0));
+ ASSERT_FALSE(q.next());
+ }
+
+ // Pretend that the caches are older, but keep one up to date, as "background noise"
+ db.exec("update BinaryCaches set timestamp = timestamp - 1 - 7 * 24 * 3600 where url <> 'https://xyz';");
+
+ // This shows that the in-memory cache works
+ {
+ auto r = cache->upToDateCacheExists("http://foo");
+ ASSERT_TRUE(r);
+ ASSERT_EQ(r->priority, prio);
+ ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ }
+ }
+
+ {
+ // We can't clear the in-memory cache, so we use a new cache object. This is
+ // more realistic anyway.
+ auto cache2 = getTestNarInfoDiskCache(dbPath);
+
+ {
+ auto r = cache2->upToDateCacheExists("http://foo");
+ ASSERT_FALSE(r);
+ }
+
+ // "Update", same data, check that the id number is reused
+ cache2->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);
+
+ {
+ auto r = cache2->upToDateCacheExists("http://foo");
+ ASSERT_TRUE(r);
+ ASSERT_EQ(r->priority, prio);
+ ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ ASSERT_EQ(r->id, savedId);
+ }
+
+ {
+ auto q(getIds.use());
+ ASSERT_TRUE(q.next());
+ auto currentId = q.getInt(0);
+ ASSERT_FALSE(q.next());
+ ASSERT_EQ(currentId, savedId);
+ }
+
+ // Check that the fields can be modified, and the id remains the same
+ {
+ auto r0 = cache2->upToDateCacheExists("https://bar");
+ ASSERT_FALSE(r0);
+
+ cache2->createCache("https://bar", "/nix/storedir", !wantMassQuery, prio + 10);
+ auto r = cache2->upToDateCacheExists("https://bar");
+ ASSERT_EQ(r->wantMassQuery, !wantMassQuery);
+ ASSERT_EQ(r->priority, prio + 10);
+ ASSERT_EQ(r->id, barId);
+ }
+
+ // // Force update (no use case yet; we only retrieve cache metadata when stale based on timestamp)
+ // {
+ // cache2->createCache("https://bar", "/nix/storedir", wantMassQuery, prio + 20);
+ // auto r = cache2->upToDateCacheExists("https://bar");
+ // ASSERT_EQ(r->wantMassQuery, wantMassQuery);
+ // ASSERT_EQ(r->priority, prio + 20);
+ // }
+ }
+}
+
+}