From 9455632c5f016e26aefaa7617525d47a83cfea6f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Jan 2016 22:11:43 +0000 Subject: [PATCH] Add S3 store locking using Amazon SimpleDB. --- lib/backupclient/BackupDaemonConfigVerify.cpp | 11 +- lib/backupstore/BackupFileSystem.cpp | 265 +++++++++++++++++- lib/backupstore/BackupFileSystem.h | 57 +++- lib/httpserver/SimpleDBClient.h | 13 + modules.txt | 2 +- test/backupstore/testbackupstore.cpp | 154 +++++++++- test/backupstore/testfiles/bbackupd.conf | 65 +++++ test/backupstore/testfiles/s3simulator.conf | 10 + 8 files changed, 552 insertions(+), 25 deletions(-) create mode 100644 test/backupstore/testfiles/bbackupd.conf create mode 100644 test/backupstore/testfiles/s3simulator.conf diff --git a/lib/backupclient/BackupDaemonConfigVerify.cpp b/lib/backupclient/BackupDaemonConfigVerify.cpp index 865ee4132..e0650cfe2 100644 --- a/lib/backupclient/BackupDaemonConfigVerify.cpp +++ b/lib/backupclient/BackupDaemonConfigVerify.cpp @@ -50,9 +50,16 @@ static const ConfigurationVerifyKey verifys3keys[] = // These values are only required for Amazon S3-compatible stores ConfigurationVerifyKey("HostName", ConfigTest_Exists), ConfigurationVerifyKey("Port", ConfigTest_Exists | ConfigTest_IsInt, 80), - ConfigurationVerifyKey("BasePath", ConfigTest_Exists), + ConfigurationVerifyKey("BasePath", 0, "/"), ConfigurationVerifyKey("AccessKey", ConfigTest_Exists), - ConfigurationVerifyKey("SecretKey", ConfigTest_Exists | ConfigTest_LastEntry) + ConfigurationVerifyKey("SecretKey", ConfigTest_Exists), + ConfigurationVerifyKey("SimpleDBHostName", 0, "sdb.amazonaws.com"), + ConfigurationVerifyKey("SimpleDBPort", ConfigTest_IsInt, 80), + ConfigurationVerifyKey("SimpleDBEndpoint", 0, ""), + ConfigurationVerifyKey("SimpleDBDomain", 0, "boxbackup_locks"), + ConfigurationVerifyKey("SimpleDBLockName", 0), + ConfigurationVerifyKey("SimpleDBLockValue", 0), + ConfigurationVerifyKey("CacheDirectory", ConfigTest_Exists | ConfigTest_LastEntry) }; static const ConfigurationVerify verifyserver[] = diff --git a/lib/backupstore/BackupFileSystem.cpp b/lib/backupstore/BackupFileSystem.cpp index 440892c0d..a9b73ea24 100644 --- a/lib/backupstore/BackupFileSystem.cpp +++ b/lib/backupstore/BackupFileSystem.cpp @@ -10,6 +10,9 @@ #include "Box.h" +#include +#include +#include #include #include "autogen_BackupStoreException.h" @@ -500,6 +503,42 @@ void RaidBackupFileSystem::DeleteFile(int64_t ObjectID) deleteFile.Delete(); } +S3BackupFileSystem::S3BackupFileSystem(const Configuration& config, + const std::string& BasePath, S3Client& rClient) +: mrConfig(config), + mBasePath(BasePath), + mrClient(rClient), + mHaveLock(false) +{ + const Configuration s3config = config.GetSubConfiguration("S3Store"); + const std::string& s3_hostname(s3config.GetKeyValue("HostName")); + const std::string& s3_base_path(s3config.GetKeyValue("BasePath")); + mSimpleDBDomain = s3config.GetKeyValue("SimpleDBDomain"); + + // The lock name should be the same for all hosts/files/daemons potentially + // writing to the same region of the S3 store. The default is the Amazon S3 bucket + // name and path, concatenated. + mLockName = s3config.GetKeyValueDefault("SimpleDBLockName", + s3_hostname + s3_base_path); + + // The lock value should be unique for each host potentially accessing the same + // region of the store, and should help you to identify which one is currently + // holding the lock. The default is username@hostname(pid). + mCurrentUserName = getpwuid(getuid())->pw_name; + + char hostname_buf[1024]; + if(gethostname(hostname_buf, sizeof(hostname_buf)) != 0) + { + THROW_SYS_ERROR("Failed to get hostname", CommonException, Internal); + } + mCurrentHostName = hostname_buf; + + std::ostringstream lock_value_buf; + lock_value_buf << mCurrentUserName << "@" << hostname_buf << "(" << getpid() << ")"; + mLockValue = s3config.GetKeyValueDefault("SimpleDBLockValue", + lock_value_buf.str()); +} + int S3BackupFileSystem::GetBlockSize() { return S3_NOTIONAL_BLOCK_SIZE; @@ -588,13 +627,13 @@ void S3BackupFileSystem::PutBackupStoreInfo(BackupStoreInfo& rInfo) //! revision ID, which for a RaidFile is based on its timestamp and file size. bool S3BackupFileSystem::ObjectExists(int64_t ObjectID, int64_t *pRevisionID) { - std::string uri = GetDirectoryURI(ObjectID); + std::string uri = GetObjectURI(GetDirectoryURI(ObjectID)); HTTPResponse response = mrClient.HeadObject(uri); if(response.GetResponseCode() == HTTPResponse::Code_NotFound) { // A file might exist, check that too. - uri = GetFileURI(ObjectID); + uri = GetObjectURI(GetFileURI(ObjectID)); response = mrClient.HeadObject(uri); } @@ -648,7 +687,6 @@ void S3BackupFileSystem::GetDirectory(int64_t ObjectID, BackupStoreDirectory& rD rDirOut.ReadFromStream(response, mrClient.GetNetworkTimeout()); rDirOut.SetRevisionID(GetRevisionID(uri, response)); - ASSERT(false); // set the size in blocks rDirOut.SetUserInfo1_SizeInBlocks(GetSizeInBlocks(response.GetContentLength())); } @@ -669,3 +707,224 @@ void S3BackupFileSystem::PutDirectory(BackupStoreDirectory& rDir) rDir.SetUserInfo1_SizeInBlocks(GetSizeInBlocks(out.GetSize())); } +void S3BackupFileSystem::ReportLockMismatches(str_map_diff_t mismatches) +{ + if(!mismatches.empty()) + { + std::ostringstream error_buf; + bool first_item = true; + for(str_map_diff_t::iterator i = mismatches.begin(); + i != mismatches.end(); i++) + { + if(!first_item) + { + error_buf << ", "; + } + first_item = false; + const std::string& name(i->first); + const std::string& expected(i->second.first); + const std::string& actual(i->second.second); + + error_buf << name << " was not '" << expected << "' but '" << + actual << "'"; + } + THROW_EXCEPTION_MESSAGE(BackupStoreException, + CouldNotLockStoreAccount, "Lock on '" << mLockName << + "' was concurrently modified: " << error_buf.str()); + } +} + +void S3BackupFileSystem::TryGetLock() +{ + if(mHaveLock) + { + return; + } + + const Configuration s3config = mrConfig.GetSubConfiguration("S3Store"); + + if(!mapSimpleDBClient.get()) + { + mapSimpleDBClient.reset(new SimpleDBClient(s3config)); + // timeout left at the default 300 seconds. + } + + // Create the domain, to ensure that it exists. This is idempotent. + mapSimpleDBClient->CreateDomain(mSimpleDBDomain); + SimpleDBClient::str_map_t conditional; + + // Check to see whether someone already holds the lock + try + { + SimpleDBClient::str_map_t attributes; + { + HideSpecificExceptionGuard hex(HTTPException::ExceptionType, + HTTPException::SimpleDBItemNotFound); + attributes = mapSimpleDBClient->GetAttributes(mSimpleDBDomain, + mLockName); + } + + // This succeeded, which means that someone once held the lock. If the + // locked attribute is empty, then they released it cleanly, and we can + // access the account safely. + box_time_t since_time = strtoull(attributes["since"].c_str(), NULL, 10); + + if(attributes["locked"] == "") + { + // The account was locked, but no longer. Make sure it stays that + // way, to avoid a race condition. + conditional = attributes; + } + // Otherwise, someone holds the lock right now. If the lock is held by + // this computer (same hostname) and the PID is no longer running, then + // it's reasonable to assume that we can override it because the original + // process is dead. + else if(attributes["hostname"] == mCurrentHostName) + { + char* end_ptr; + int locking_pid = strtol(attributes["pid"].c_str(), &end_ptr, 10); + if(*end_ptr != 0) + { + THROW_EXCEPTION_MESSAGE(BackupStoreException, + CouldNotLockStoreAccount, "Failed to parse PID " + "from existing lock: " << attributes["pid"]); + } + + if(kill(locking_pid, 0) == 0) + { + THROW_EXCEPTION_MESSAGE(BackupStoreException, + CouldNotLockStoreAccount, "Lock on '" << + mLockName << "' is held by '" << + attributes["locker"] << "' (process " << + locking_pid << " on this host, " << + mCurrentHostName << ", which is still running), " + "since " << FormatTime(since_time, + true)); // includeDate + } + else + { + BOX_WARNING( + "Lock on '" << mLockName << "' was held by '" << + attributes["locker"] << "' (process " << + locking_pid << " on this host, " << + mCurrentHostName << ", which appears to have ended) " + "since " << FormatTime(since_time, + true) // includeDate + << ", overriding it"); + conditional = attributes; + } + } + else + { + // If the account is locked by a process on a different host, then + // we have no way to check whether it is still running, so we can + // only give up. + THROW_EXCEPTION_MESSAGE(BackupStoreException, + CouldNotLockStoreAccount, "Lock on '" << mLockName << + "' is held by '" << attributes["locker"] << " since " << + FormatTime(since_time, true)); // includeDate + } + } + catch(HTTPException &e) + { + if(EXCEPTION_IS_TYPE(e, HTTPException, SimpleDBItemNotFound)) + { + // The lock doesn't exist, so it's safe to create it. We can't + // make this request conditional, so there is a race condition + // here! We deal with that by reading back the attributes with + // a ConsistentRead after writing them. + } + else + { + // Something else went wrong. + throw; + } + } + + mLockAttributes["locked"] = "true"; + mLockAttributes["locker"] = mLockValue; + mLockAttributes["hostname"] = mCurrentHostName; + { + std::ostringstream pid_buf; + pid_buf << getpid(); + mLockAttributes["pid"] = pid_buf.str(); + } + { + std::ostringstream since_buf; + since_buf << GetCurrentBoxTime(); + mLockAttributes["since"] = since_buf.str(); + } + + // This will throw an exception if the conditional PUT fails: + mapSimpleDBClient->PutAttributes(mSimpleDBDomain, mLockName, mLockAttributes, + conditional); + + // To avoid the race condition, read back the attribute values with a consistent + // read, to check that nobody else sneaked in at the same time: + SimpleDBClient::str_map_t attributes_read = mapSimpleDBClient->GetAttributes( + mSimpleDBDomain, mLockName, true); // consistent_read + + str_map_diff_t mismatches = compare_str_maps(mLockAttributes, attributes_read); + + // This should throw an exception if there are any mismatches: + ReportLockMismatches(mismatches); + ASSERT(mismatches.empty()); + + // Now we have the lock! + mHaveLock = true; +} + +void S3BackupFileSystem::ReleaseLock() +{ + // Releasing is so much easier! + if(!mHaveLock) + { + return; + } + + // If we have a lock, we should also have the SimpleDBClient that we used to + // acquire it! + ASSERT(mapSimpleDBClient.get()); + + // Read the current values, and check that they match what we expected, i.e. that + // nobody stole the lock from under us + SimpleDBClient::str_map_t attributes_read = mapSimpleDBClient->GetAttributes( + mSimpleDBDomain, mLockName, true); // consistent_read + str_map_diff_t mismatches = compare_str_maps(mLockAttributes, attributes_read); + + // This should throw an exception if there are any mismatches: + ReportLockMismatches(mismatches); + ASSERT(mismatches.empty()); + + // Now write the same values back, except with "locked" = "" + mLockAttributes["locked"] = ""; + + // Conditional PUT, using the values that we just read, to ensure that nobody + // changes it under our feet right now. This will throw an exception if the + // conditional PUT fails: + mapSimpleDBClient->PutAttributes(mSimpleDBDomain, mLockName, mLockAttributes, + attributes_read); + + // Read back, to check that we unlocked successfully: + attributes_read = mapSimpleDBClient->GetAttributes(mSimpleDBDomain, mLockName, + true); // consistent_read + mismatches = compare_str_maps(mLockAttributes, attributes_read); + + // This should throw an exception if there are any mismatches: + ReportLockMismatches(mismatches); + ASSERT(mismatches.empty()); + + // Now we no longer have the lock! + mHaveLock = false; +} + +S3BackupFileSystem::~S3BackupFileSystem() +{ + // This needs to be in the source file, not inline, as long as we don't include + // the whole of SimpleDBClient.h in BackupFileSystem.h. + if(mHaveLock) + { + ReleaseLock(); + } +} + diff --git a/lib/backupstore/BackupFileSystem.h b/lib/backupstore/BackupFileSystem.h index d555d216a..4db159de5 100644 --- a/lib/backupstore/BackupFileSystem.h +++ b/lib/backupstore/BackupFileSystem.h @@ -13,10 +13,10 @@ #include -#include "autogen_BackupStoreException.h" #include "HTTPResponse.h" #include "NamedLock.h" #include "S3Client.h" +#include "SimpleDBClient.h" class BackupStoreDirectory; class BackupStoreInfo; @@ -87,6 +87,13 @@ class RaidBackupFileSystem : public BackupFileSystem mAccountRootDir(AccountRootDir), mStoreDiscSet(discSet) { } + ~RaidBackupFileSystem() + { + if(mWriteLock.GotLock()) + { + ReleaseLock(); + } + } virtual void TryGetLock(); virtual void ReleaseLock() { @@ -121,23 +128,23 @@ class S3BackupFileSystem : public BackupFileSystem const Configuration& mrConfig; std::string mBasePath; S3Client& mrClient; - std::string GetDirectoryURI(int64_t ObjectID); - std::string GetFileURI(int64_t ObjectID); + std::auto_ptr mapSimpleDBClient; int64_t GetRevisionID(const std::string& uri, HTTPResponse& response) const; - int GetSizeInBlocks(int64_t bytes) - { - return (bytes + S3_NOTIONAL_BLOCK_SIZE - 1) / S3_NOTIONAL_BLOCK_SIZE; - } + bool mHaveLock; + std::string mSimpleDBDomain, mLockName, mLockValue, mCurrentUserName, + mCurrentHostName; + SimpleDBClient::str_map_t mLockAttributes; + void ReportLockMismatches(str_map_diff_t mismatches); + + S3BackupFileSystem(const S3BackupFileSystem& forbidden); // no copying + S3BackupFileSystem& operator=(const S3BackupFileSystem& forbidden); // no assignment public: S3BackupFileSystem(const Configuration& config, const std::string& BasePath, - S3Client& rClient) - : mrConfig(config), - mBasePath(BasePath), - mrClient(rClient) - { } - virtual void TryGetLock() { THROW_EXCEPTION(BackupStoreException, CouldNotLockStoreAccount); } - virtual void ReleaseLock() { } + S3Client& rClient); + ~S3BackupFileSystem(); + virtual void TryGetLock(); + virtual void ReleaseLock(); virtual int GetBlockSize(); virtual std::auto_ptr GetBackupStoreInfo(int32_t AccountID, bool ReadOnly); @@ -196,6 +203,28 @@ class S3BackupFileSystem : public BackupFileSystem return mrClient.PutObject(GetObjectURI(ObjectPath), rStreamToSend, pContentType); } + + // These should not really be APIs, but they are public to make them testable: + const std::string& GetSimpleDBDomain() const { return mSimpleDBDomain; } + const std::string& GetSimpleDBLockName() const { return mLockName; } + const std::string& GetSimpleDBLockValue() const { return mLockValue; } + const std::string& GetCurrentUserName() const { return mCurrentUserName; } + const std::string& GetCurrentHostName() const { return mCurrentHostName; } + const box_time_t GetSinceTime() const + { + // Unfortunately operator[] is not const, so use a const_iterator to + // get the value that we want. + const std::string& since(mLockAttributes.find("since")->second); + return strtoull(since.c_str(), NULL, 10); + } + + // And these are public to help with writing tests ONLY: + std::string GetDirectoryURI(int64_t ObjectID); + std::string GetFileURI(int64_t ObjectID); + int GetSizeInBlocks(int64_t bytes) + { + return (bytes + S3_NOTIONAL_BLOCK_SIZE - 1) / S3_NOTIONAL_BLOCK_SIZE; + } }; #define S3_INFO_FILE_NAME "boxbackup.info" diff --git a/lib/httpserver/SimpleDBClient.h b/lib/httpserver/SimpleDBClient.h index 4bdff1306..871c910ea 100644 --- a/lib/httpserver/SimpleDBClient.h +++ b/lib/httpserver/SimpleDBClient.h @@ -17,6 +17,7 @@ #include #include "BoxTime.h" +#include "Configuration.h" #include "HTTPRequest.h" using boost::property_tree::ptree; @@ -60,6 +61,18 @@ class SimpleDBClient mTimeout(timeout) { } + SimpleDBClient(const Configuration& s3config) + : mHostName(s3config.GetKeyValue("SimpleDBHostName")), + mEndpoint(s3config.GetKeyValue("SimpleDBEndpoint")), + mAccessKey(s3config.GetKeyValue("AccessKey")), + mSecretKey(s3config.GetKeyValue("SecretKey")), + mFixedTimestamp(0), + mOffsetMinutes(0), + mPort(s3config.GetKeyValueInt("SimpleDBPort")), + // Set a default timeout of 300 seconds to make debugging easier + mTimeout(300) + { } + typedef std::vector list_t; typedef std::map str_map_t; diff --git a/modules.txt b/modules.txt index 829268567..42d229c69 100644 --- a/modules.txt +++ b/modules.txt @@ -35,7 +35,7 @@ bin/bbackupd lib/backupclient qdbm bin/bbackupquery lib/backupclient bin/bbackupctl bin/bbackupd lib/backupclient qdbm -test/backupstore bin/bbstored bin/bbstoreaccounts lib/server lib/backupstore lib/backupclient lib/raidfile +test/backupstore bin/bbstored bin/bbstoreaccounts lib/server lib/backupstore lib/backupclient lib/raidfile bin/s3simulator test/backupstorefix bin/bbstored bin/bbstoreaccounts lib/backupstore lib/raidfile bin/bbackupquery bin/bbackupd bin/bbackupctl test/backupstorepatch bin/bbstored bin/bbstoreaccounts lib/backupclient test/backupdiff lib/backupclient diff --git a/test/backupstore/testbackupstore.cpp b/test/backupstore/testbackupstore.cpp index 44c1eff0b..576cbea6c 100644 --- a/test/backupstore/testbackupstore.cpp +++ b/test/backupstore/testbackupstore.cpp @@ -13,8 +13,10 @@ #include #include "Archive.h" +#include "BackupAccountControl.h" #include "BackupClientCryptoKeys.h" #include "BackupClientFileAttributes.h" +#include "BackupDaemonConfigVerify.h" #include "BackupProtocol.h" #include "BackupStoreAccountDatabase.h" #include "BackupStoreAccounts.h" @@ -38,8 +40,10 @@ #include "RaidFileException.h" #include "RaidFileRead.h" #include "RaidFileWrite.h" +#include "S3Simulator.h" #include "SSLLib.h" #include "ServerControl.h" +#include "SimpleDBClient.h" #include "Socket.h" #include "SocketStreamTLS.h" #include "StoreStructure.h" @@ -53,12 +57,14 @@ #define ENCFILE_SIZE 2765 // Make some test attributes -#define ATTR1_SIZE 245 -#define ATTR2_SIZE 23 -#define ATTR3_SIZE 122 +#define ATTR1_SIZE 245 +#define ATTR2_SIZE 23 +#define ATTR3_SIZE 122 #define SHORT_TIMEOUT 5000 +#define DEFAULT_BBACKUPD_CONFIG_FILE "testfiles/bbackupd.conf" + int attr1[ATTR1_SIZE]; int attr2[ATTR2_SIZE]; int attr3[ATTR3_SIZE]; @@ -141,6 +147,32 @@ static const char *uploads_filenames[] = {"49587fds", "cvhjhj324", "sdfcscs324", #define UNLINK_IF_EXISTS(filename) \ if (FileExists(filename)) { TEST_THAT(unlink(filename) == 0); } +int s3simulator_pid = 0; + +bool StartSimulator() +{ + s3simulator_pid = StartDaemon(s3simulator_pid, + "../../bin/s3simulator/s3simulator " + bbstored_args + + " testfiles/s3simulator.conf", "testfiles/s3simulator.pid"); + return s3simulator_pid != 0; +} + +bool StopSimulator() +{ + bool result = StopDaemon(s3simulator_pid, "testfiles/s3simulator.pid", + "s3simulator.memleaks", true); + s3simulator_pid = 0; + return result; +} + +bool kill_running_daemons() +{ + TEST_THAT_OR(::system("test ! -r testfiles/s3simulator.pid || " + "kill `cat testfiles/s3simulator.pid`") == 0, FAIL); + TEST_THAT_OR(::system("rm -f testfiles/s3simulator.pid") == 0, FAIL); + return true; +} + //! Simplifies calling setUp() with the current function name in each test. #define SETUP_TEST_BACKUPSTORE() \ SETUP(); \ @@ -2336,7 +2368,7 @@ bool test_encoding() size_t file_size = enc.GetPosition(); TEST_EQUAL(file_size, contents.GetSize()); - for(int buffer_size = 1; ; buffer_size <<= 1) + for(size_t buffer_size = 1; ; buffer_size <<= 1) { enc.Seek(0, IOStream::SeekType_Absolute); BackupStoreFile::VerifyStream verifier(enc); @@ -3242,6 +3274,109 @@ bool test_read_write_attr_streamformat() TEARDOWN_TEST_BACKUPSTORE(); } +// Test that the S3 backend correctly locks and unlocks the store using SimpleDB. +bool test_simpledb_locking(Configuration& config, S3BackupAccountControl& s3control) +{ + SETUP_TEST_BACKUPSTORE(); + + const Configuration s3config = config.GetSubConfiguration("S3Store"); + S3Client s3client(s3config); + SimpleDBClient client(s3config); + + // There should be no locks at the beginning. In fact the domain should not even + // exist: the client should create it itself. + std::vector expected_domains; + TEST_THAT(compare_lists(expected_domains, client.ListDomains())); + + SimpleDBClient::str_map_t expected; + + // Create a client in a scope, so it will be destroyed when the scope ends. + { + S3BackupFileSystem fs(config, "/foo/", s3client); + + // Check that it hasn't acquired a lock yet. + TEST_CHECK_THROWS( + client.GetAttributes("boxbackup_locks", "localhost/subdir/"), + HTTPException, SimpleDBItemNotFound); + + box_time_t before = GetCurrentBoxTime(); + // If this fails, it will throw an exception: + fs.GetLock(); + box_time_t after = GetCurrentBoxTime(); + + // Check that it has now acquired a lock. + SimpleDBClient::str_map_t attributes = + client.GetAttributes("boxbackup_locks", "localhost/subdir/"); + expected["locked"] = "true"; + + std::ostringstream locker_buf; + locker_buf << fs.GetCurrentUserName() << "@" << fs.GetCurrentHostName() << + "(" << getpid() << ")"; + TEST_EQUAL(locker_buf.str(), fs.GetSimpleDBLockValue()); + expected["locker"] = locker_buf.str(); + + std::ostringstream pid_buf; + pid_buf << getpid(); + expected["pid"] = pid_buf.str(); + + char hostname_buf[1024]; + TEST_EQUAL(0, gethostname(hostname_buf, sizeof(hostname_buf))); + TEST_EQUAL(hostname_buf, fs.GetCurrentHostName()); + expected["hostname"] = hostname_buf; + + TEST_THAT(fs.GetSinceTime() >= before); + TEST_THAT(fs.GetSinceTime() <= after); + std::ostringstream since_buf; + since_buf << fs.GetSinceTime(); + expected["since"] = since_buf.str(); + + TEST_THAT(test_equal_maps(expected, attributes)); + + // Try to acquire another one, check that it fails. + S3BackupFileSystem fs2(config, "/foo/", s3client); + TEST_CHECK_THROWS( + fs2.GetLock(), + BackupStoreException, CouldNotLockStoreAccount); + + // And that the lock was not disturbed + TEST_THAT(test_equal_maps(expected, attributes)); + } + + // Check that when the S3BackupFileSystem went out of scope, it released the lock + expected["locked"] = ""; + { + SimpleDBClient::str_map_t attributes = + client.GetAttributes("boxbackup_locks", "localhost/subdir/"); + TEST_THAT(test_equal_maps(expected, attributes)); + } + + // And that we can acquire it again: + { + S3BackupFileSystem fs(config, "/foo/", s3client); + fs.GetLock(); + + expected["locked"] = "true"; + std::ostringstream since_buf; + since_buf << fs.GetSinceTime(); + expected["since"] = since_buf.str(); + + SimpleDBClient::str_map_t attributes = + client.GetAttributes("boxbackup_locks", "localhost/subdir/"); + TEST_THAT(test_equal_maps(expected, attributes)); + } + + // And release it again: + expected["locked"] = ""; + { + SimpleDBClient::str_map_t attributes = + client.GetAttributes("boxbackup_locks", "localhost/subdir/"); + TEST_THAT(test_equal_maps(expected, attributes)); + } + + TEARDOWN_TEST_BACKUPSTORE(); +} + + int test(int argc, const char *argv[]) { TEST_THAT(test_open_files_with_limited_win32_permissions()); @@ -3289,7 +3424,16 @@ int test(int argc, const char *argv[]) TEST_THAT(test_bbstoreaccounts_create()); TEST_THAT(test_bbstoreaccounts_delete()); TEST_THAT(test_backupstore_directory()); - TEST_THAT(test_directory_parent_entry_tracks_directory_size()); + + std::auto_ptr s3config = load_config_file( + DEFAULT_BBACKUPD_CONFIG_FILE, BackupDaemonConfigVerify); + S3BackupAccountControl s3control(*s3config); + + TEST_THAT(kill_running_daemons()); + TEST_THAT(StartSimulator()); + TEST_THAT(test_simpledb_locking(*s3config, s3control)); + TEST_THAT(StopSimulator()); + TEST_THAT(test_cannot_open_multiple_writable_connections()); TEST_THAT(test_encoding()); TEST_THAT(test_symlinks()); diff --git a/test/backupstore/testfiles/bbackupd.conf b/test/backupstore/testfiles/bbackupd.conf new file mode 100644 index 000000000..a7354db09 --- /dev/null +++ b/test/backupstore/testfiles/bbackupd.conf @@ -0,0 +1,65 @@ + +CertificateFile = testfiles/clientCerts.pem +PrivateKeyFile = testfiles/clientPrivKey.pem +TrustedCAsFile = testfiles/clientTrustedCAs.pem + +KeysFile = testfiles/bbackupd.keys + +DataDirectory = testfiles/bbackupd-data + +S3Store +{ + HostName = localhost + Port = 22080 + BasePath = /subdir/ + AccessKey = 0PN5J17HBGZHT7JJ3X82 + SecretKey = uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o + SimpleDBHostName = localhost + SimpleDBPort = 22080 + SimpleDBEndpoint = sdb.localhost + CacheDirectory = testfiles/cache +} + +UpdateStoreInterval = 3 +BackupErrorDelay = 10 +MinimumFileAge = 4 +MaxUploadWait = 24 +DeleteRedundantLocationsAfter = 10 + +FileTrackingSizeThreshold = 1024 +DiffingUploadSizeThreshold = 1024 + +MaximumDiffingTime = 3 +KeepAliveTime = 1 + +ExtendedLogging = no +ExtendedLogFile = testfiles/bbackupd.log + +CommandSocket = testfiles/bbackupd.sock + +NotifyScript = /usr/bin/perl testfiles/notifyscript.pl +SyncAllowScript = /usr/bin/perl testfiles/syncallowscript.pl + +Server +{ + PidFile = testfiles/bbackupd.pid +} + +BackupLocations +{ + Test1 + { + Path = testfiles/TestDir1 + + ExcludeFile = testfiles/TestDir1/excluded_1 + ExcludeFile = testfiles/TestDir1/excluded_2 + ExcludeFilesRegex = \.excludethis$ + ExcludeFilesRegex = EXCLUDE + AlwaysIncludeFile = testfiles/TestDir1/dont.excludethis + ExcludeDir = testfiles/TestDir1/exclude_dir + ExcludeDir = testfiles/TestDir1/exclude_dir_2 + ExcludeDirsRegex = not_this_dir + AlwaysIncludeDirsRegex = ALWAYSINCLUDE + } +} + diff --git a/test/backupstore/testfiles/s3simulator.conf b/test/backupstore/testfiles/s3simulator.conf new file mode 100644 index 000000000..c9895e9ff --- /dev/null +++ b/test/backupstore/testfiles/s3simulator.conf @@ -0,0 +1,10 @@ +AccessKey = 0PN5J17HBGZHT7JJ3X82 +SecretKey = uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o +StoreDirectory = testfiles/store +AddressPrefix = http://localhost:22080 + +Server +{ + PidFile = testfiles/s3simulator.pid + ListenAddresses = inet:localhost:22080 +}