diff --git a/librepomgr/serversetup.cpp b/librepomgr/serversetup.cpp index e3dddb0..72d9c95 100644 --- a/librepomgr/serversetup.cpp +++ b/librepomgr/serversetup.cpp @@ -656,10 +656,12 @@ void ServiceSetup::Locks::clear() { auto log = LogContext(); const auto lock = std::lock_guard(m_mutex); - for (auto i = m_locksByName.begin(), end = m_locksByName.end(); i != end; ++i) { + for (auto i = m_locksByName.begin(), end = m_locksByName.end(); i != end;) { if (auto lock2 = i->second.tryLockToWrite(log, std::string(i->first)); lock2.lock()) { // check whether nobody holds the lock anymore lock2.lock().unlock(); // ~shared_mutex(): The behavior is undefined if the mutex is owned by any thread [...]. - m_locksByName.erase(i); // we can be sure no other thead aquires i->second in the meantime because we're holding m_mutex + m_locksByName.erase(i++); // we can be sure no other thead aquires i->second in the meantime because we're holding m_mutex + } else { + ++i; } } } diff --git a/librepomgr/serversetup.h b/librepomgr/serversetup.h index a169d6c..e110e6b 100644 --- a/librepomgr/serversetup.h +++ b/librepomgr/serversetup.h @@ -122,15 +122,18 @@ struct LIBREPOMGR_EXPORT ServiceSetup : public LibPkg::Lockable { } auth; struct LIBREPOMGR_EXPORT Locks { + using LockTable = std::unordered_map; + [[nodiscard]] SharedLoggingLock acquireToRead(LogContext &log, std::string &&lockName); [[nodiscard]] UniqueLoggingLock acquireToWrite(LogContext &log, std::string &&lockName); + [[nodiscard]] std::pair> acquireLockTable(); void clear(); static std::string forDatabase(std::string_view dbName, std::string_view dbArch); static std::string forDatabase(const LibPkg::Database &db); private: std::mutex m_mutex; - std::unordered_map m_locksByName; + LockTable m_locksByName; } locks; void loadConfigFiles(bool restoreStateAndDiscardDatabases); @@ -161,6 +164,11 @@ inline UniqueLoggingLock ServiceSetup::Locks::acquireToWrite(LogContext &log, st return m_locksByName[lockName].lockToWrite(log, std::move(lockName)); } +inline std::pair> ServiceSetup::Locks::acquireLockTable() +{ + return std::make_pair(&m_locksByName, std::unique_lock(m_mutex)); +} + struct LIBREPOMGR_EXPORT ServiceStatus : public ReflectiveRapidJSON::JsonSerializable { ServiceStatus(const ServiceSetup &setup); diff --git a/librepomgr/tests/utils.cpp b/librepomgr/tests/utils.cpp index ac407a6..a1626ee 100644 --- a/librepomgr/tests/utils.cpp +++ b/librepomgr/tests/utils.cpp @@ -1,5 +1,6 @@ #include "../globallock.h" #include "../logging.h" +#include "../serversetup.h" #include #include @@ -20,9 +21,11 @@ using namespace LibRepoMgr; class UtilsTests : public TestFixture { CPPUNIT_TEST_SUITE(UtilsTests); CPPUNIT_TEST(testGlobalLock); + CPPUNIT_TEST(testLockTable); CPPUNIT_TEST_SUITE_END(); void testGlobalLock(); + void testLockTable(); public: UtilsTests(); @@ -69,3 +72,17 @@ void UtilsTests::testGlobalLock() CPPUNIT_ASSERT_MESSAGE("try_lock() possible if mutex not locked", mutex.try_lock()); mutex.unlock(); } + +void UtilsTests::testLockTable() +{ + auto log = LogContext(); + auto locks = ServiceSetup::Locks(); + auto readLock = locks.acquireToRead(log, "foo"); + locks.clear(); // should not deadlock (and simply ignore the still acquired lock) + readLock.lock().unlock(); + auto lockTable = locks.acquireLockTable(); + CPPUNIT_ASSERT_EQUAL_MESSAGE("read lock still present", 1_st, lockTable.first->size()); + lockTable.second.unlock(); + locks.clear(); // should free up all locks now + CPPUNIT_ASSERT_EQUAL_MESSAGE("read lock cleared", 0_st, lockTable.first->size()); +}