Fix clearing locks
This commit is contained in:
parent
f147dc3179
commit
4416922397
|
@ -656,10 +656,12 @@ void ServiceSetup::Locks::clear()
|
|||
{
|
||||
auto log = LogContext();
|
||||
const auto lock = std::lock_guard(m_mutex);
|
||||
for (auto i = m_locksByName.begin(), end = m_locksByName.end(); i != end; ++i) {
|
||||
for (auto i = m_locksByName.begin(), end = m_locksByName.end(); i != end;) {
|
||||
if (auto lock2 = i->second.tryLockToWrite(log, std::string(i->first)); lock2.lock()) { // check whether nobody holds the lock anymore
|
||||
lock2.lock().unlock(); // ~shared_mutex(): The behavior is undefined if the mutex is owned by any thread [...].
|
||||
m_locksByName.erase(i); // we can be sure no other thead aquires i->second in the meantime because we're holding m_mutex
|
||||
m_locksByName.erase(i++); // we can be sure no other thead aquires i->second in the meantime because we're holding m_mutex
|
||||
} else {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,15 +122,18 @@ struct LIBREPOMGR_EXPORT ServiceSetup : public LibPkg::Lockable {
|
|||
} auth;
|
||||
|
||||
struct LIBREPOMGR_EXPORT Locks {
|
||||
using LockTable = std::unordered_map<std::string, GlobalLockable>;
|
||||
|
||||
[[nodiscard]] SharedLoggingLock acquireToRead(LogContext &log, std::string &&lockName);
|
||||
[[nodiscard]] UniqueLoggingLock acquireToWrite(LogContext &log, std::string &&lockName);
|
||||
[[nodiscard]] std::pair<LockTable *, std::unique_lock<std::mutex>> acquireLockTable();
|
||||
void clear();
|
||||
static std::string forDatabase(std::string_view dbName, std::string_view dbArch);
|
||||
static std::string forDatabase(const LibPkg::Database &db);
|
||||
|
||||
private:
|
||||
std::mutex m_mutex;
|
||||
std::unordered_map<std::string, GlobalLockable> m_locksByName;
|
||||
LockTable m_locksByName;
|
||||
} locks;
|
||||
|
||||
void loadConfigFiles(bool restoreStateAndDiscardDatabases);
|
||||
|
@ -161,6 +164,11 @@ inline UniqueLoggingLock ServiceSetup::Locks::acquireToWrite(LogContext &log, st
|
|||
return m_locksByName[lockName].lockToWrite(log, std::move(lockName));
|
||||
}
|
||||
|
||||
inline std::pair<ServiceSetup::Locks::LockTable *, std::unique_lock<std::mutex>> ServiceSetup::Locks::acquireLockTable()
|
||||
{
|
||||
return std::make_pair(&m_locksByName, std::unique_lock(m_mutex));
|
||||
}
|
||||
|
||||
struct LIBREPOMGR_EXPORT ServiceStatus : public ReflectiveRapidJSON::JsonSerializable<ServiceStatus> {
|
||||
ServiceStatus(const ServiceSetup &setup);
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include "../globallock.h"
|
||||
#include "../logging.h"
|
||||
#include "../serversetup.h"
|
||||
|
||||
#include <c++utilities/conversion/stringbuilder.h>
|
||||
#include <c++utilities/io/misc.h>
|
||||
|
@ -20,9 +21,11 @@ using namespace LibRepoMgr;
|
|||
class UtilsTests : public TestFixture {
|
||||
CPPUNIT_TEST_SUITE(UtilsTests);
|
||||
CPPUNIT_TEST(testGlobalLock);
|
||||
CPPUNIT_TEST(testLockTable);
|
||||
CPPUNIT_TEST_SUITE_END();
|
||||
|
||||
void testGlobalLock();
|
||||
void testLockTable();
|
||||
|
||||
public:
|
||||
UtilsTests();
|
||||
|
@ -69,3 +72,17 @@ void UtilsTests::testGlobalLock()
|
|||
CPPUNIT_ASSERT_MESSAGE("try_lock() possible if mutex not locked", mutex.try_lock());
|
||||
mutex.unlock();
|
||||
}
|
||||
|
||||
void UtilsTests::testLockTable()
|
||||
{
|
||||
auto log = LogContext();
|
||||
auto locks = ServiceSetup::Locks();
|
||||
auto readLock = locks.acquireToRead(log, "foo");
|
||||
locks.clear(); // should not deadlock (and simply ignore the still acquired lock)
|
||||
readLock.lock().unlock();
|
||||
auto lockTable = locks.acquireLockTable();
|
||||
CPPUNIT_ASSERT_EQUAL_MESSAGE("read lock still present", 1_st, lockTable.first->size());
|
||||
lockTable.second.unlock();
|
||||
locks.clear(); // should free up all locks now
|
||||
CPPUNIT_ASSERT_EQUAL_MESSAGE("read lock cleared", 0_st, lockTable.first->size());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue