Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 3b37a6d

Browse files
committed
Add basic spinlock tests to regression tests.
As s_lock_test, the already existing test for spinlocks, isn't run in an automated fashion (and doesn't test a normal backend environment), adding tests that are run as part of a normal regression run is a good idea. Particularly in light of several recent and upcoming spinlock related fixes. Currently the new tests are run as part of the pre-existing test_atomic_ops() test. That perhaps can be quibbled about, but for now seems ok. The only operations that s_lock_test tests but the new tests don't are the detection of a stuck spinlock and S_LOCK_FREE (which is otherwise unused, not implemented on all platforms, and will be removed). This currently contains a test for more than INT_MAX spinlocks (only run with --disable-spinlocks), to ensure the recent commit fixing a bug with more than INT_MAX spinlock initializations is correct. That test is somewhat slow, so we might want to disable it after a few days. It might be worth retiring s_lock_test after this. The added coverage of a stuck spinlock probably isn't worth the added complexity? Author: Andres Freund Discussion: https://postgr.es/m/20200606023103.avzrctgv7476xj7i@alap3.anarazel.de
1 parent a3235a5 commit 3b37a6d

File tree

1 file changed

+109
-0
lines changed

1 file changed

+109
-0
lines changed

src/test/regress/regress.c

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include "optimizer/optimizer.h"
3535
#include "optimizer/plancat.h"
3636
#include "port/atomics.h"
37+
#include "storage/spin.h"
3738
#include "utils/builtins.h"
3839
#include "utils/geo_decls.h"
3940
#include "utils/memutils.h"
@@ -794,6 +795,108 @@ test_atomic_uint64(void)
794795
EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~0), 0);
795796
}
796797

798+
/*
799+
* Perform, fairly minimal, testing of the spinlock implementation.
800+
*
801+
* It's likely worth expanding these to actually test concurrency etc, but
802+
* having some regularly run tests is better than none.
803+
*/
804+
static void
805+
test_spinlock(void)
806+
{
807+
/*
808+
* Basic tests for spinlocks, as well as the underlying operations.
809+
*
810+
* We embed the spinlock in a struct with other members to test that the
811+
* spinlock operations don't perform too wide writes.
812+
*/
813+
{
814+
struct test_lock_struct
815+
{
816+
char data_before[4];
817+
slock_t lock;
818+
char data_after[4];
819+
} struct_w_lock;
820+
821+
memcpy(struct_w_lock.data_before, "abcd", 4);
822+
memcpy(struct_w_lock.data_after, "ef12", 4);
823+
824+
/* test basic operations via the SpinLock* API */
825+
SpinLockInit(&struct_w_lock.lock);
826+
SpinLockAcquire(&struct_w_lock.lock);
827+
SpinLockRelease(&struct_w_lock.lock);
828+
829+
/* test basic operations via underlying S_* API */
830+
S_INIT_LOCK(&struct_w_lock.lock);
831+
S_LOCK(&struct_w_lock.lock);
832+
S_UNLOCK(&struct_w_lock.lock);
833+
834+
/* and that "contended" acquisition works */
835+
s_lock(&struct_w_lock.lock, "testfile", 17, "testfunc");
836+
S_UNLOCK(&struct_w_lock.lock);
837+
838+
/*
839+
* Check, using TAS directly, that a single spin cycle doesn't block
840+
* when acquiring an already acquired lock.
841+
*/
842+
#ifdef TAS
843+
S_LOCK(&struct_w_lock.lock);
844+
845+
if (!TAS(&struct_w_lock.lock))
846+
elog(ERROR, "acquired already held spinlock");
847+
848+
#ifdef TAS_SPIN
849+
if (!TAS_SPIN(&struct_w_lock.lock))
850+
elog(ERROR, "acquired already held spinlock");
851+
#endif /* defined(TAS_SPIN) */
852+
853+
S_UNLOCK(&struct_w_lock.lock);
854+
#endif /* defined(TAS) */
855+
856+
/*
857+
* Verify that after all of this the non-lock contents are still
858+
* correct.
859+
*/
860+
if (memcmp(struct_w_lock.data_before, "abcd", 4) != 0)
861+
elog(ERROR, "padding before spinlock modified");
862+
if (memcmp(struct_w_lock.data_after, "ef12", 4) != 0)
863+
elog(ERROR, "padding after spinlock modified");
864+
}
865+
866+
/*
867+
* Ensure that allocating more than INT32_MAX emulated spinlocks
868+
* works. That's interesting because the spinlock emulation uses a 32bit
869+
* integer to map spinlocks onto semaphores. There've been bugs...
870+
*/
871+
#ifndef HAVE_SPINLOCKS
872+
{
873+
/*
874+
* Initialize enough spinlocks to advance counter close to
875+
* wraparound. It's too expensive to perform acquire/release for each,
876+
* as those may be syscalls when the spinlock emulation is used (and
877+
* even just atomic TAS would be expensive).
878+
*/
879+
for (uint32 i = 0; i < INT32_MAX - 100000; i++)
880+
{
881+
slock_t lock;
882+
883+
SpinLockInit(&lock);
884+
}
885+
886+
for (uint32 i = 0; i < 200000; i++)
887+
{
888+
slock_t lock;
889+
890+
SpinLockInit(&lock);
891+
892+
SpinLockAcquire(&lock);
893+
SpinLockRelease(&lock);
894+
SpinLockAcquire(&lock);
895+
SpinLockRelease(&lock);
896+
}
897+
}
898+
#endif
899+
}
797900

798901
PG_FUNCTION_INFO_V1(test_atomic_ops);
799902
Datum
@@ -805,6 +908,12 @@ test_atomic_ops(PG_FUNCTION_ARGS)
805908

806909
test_atomic_uint64();
807910

911+
/*
912+
* Arguably this shouldn't be tested as part of this function, but it's
913+
* closely enough related that that seems ok for now.
914+
*/
915+
test_spinlock();
916+
808917
PG_RETURN_BOOL(true);
809918
}
810919

0 commit comments

Comments
 (0)