@@ -425,82 +425,107 @@ pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
425
425
static inline void
426
426
pg_atomic_init_u64 (volatile pg_atomic_uint64 * ptr , uint64 val )
427
427
{
428
+ /*
429
+ * Can't necessarily enforce alignment - and don't need it - when using
430
+ * the spinlock based fallback implementation. Therefore only assert when
431
+ * not using it.
432
+ */
433
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
428
434
AssertPointerAlignment (ptr , 8 );
429
-
435
+ #endif
430
436
pg_atomic_init_u64_impl (ptr , val );
431
437
}
432
438
433
439
static inline uint64
434
440
pg_atomic_read_u64 (volatile pg_atomic_uint64 * ptr )
435
441
{
442
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
436
443
AssertPointerAlignment (ptr , 8 );
444
+ #endif
437
445
return pg_atomic_read_u64_impl (ptr );
438
446
}
439
447
440
448
static inline void
441
449
pg_atomic_write_u64 (volatile pg_atomic_uint64 * ptr , uint64 val )
442
450
{
451
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
443
452
AssertPointerAlignment (ptr , 8 );
453
+ #endif
444
454
pg_atomic_write_u64_impl (ptr , val );
445
455
}
446
456
447
457
static inline uint64
448
458
pg_atomic_exchange_u64 (volatile pg_atomic_uint64 * ptr , uint64 newval )
449
459
{
460
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
450
461
AssertPointerAlignment (ptr , 8 );
451
-
462
+ #endif
452
463
return pg_atomic_exchange_u64_impl (ptr , newval );
453
464
}
454
465
455
466
static inline bool
456
467
pg_atomic_compare_exchange_u64 (volatile pg_atomic_uint64 * ptr ,
457
468
uint64 * expected , uint64 newval )
458
469
{
470
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
459
471
AssertPointerAlignment (ptr , 8 );
460
472
AssertPointerAlignment (expected , 8 );
473
+ #endif
461
474
return pg_atomic_compare_exchange_u64_impl (ptr , expected , newval );
462
475
}
463
476
464
477
static inline uint64
465
478
pg_atomic_fetch_add_u64 (volatile pg_atomic_uint64 * ptr , int64 add_ )
466
479
{
480
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
467
481
AssertPointerAlignment (ptr , 8 );
482
+ #endif
468
483
return pg_atomic_fetch_add_u64_impl (ptr , add_ );
469
484
}
470
485
471
486
static inline uint64
472
487
pg_atomic_fetch_sub_u64 (volatile pg_atomic_uint64 * ptr , int64 sub_ )
473
488
{
489
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
474
490
AssertPointerAlignment (ptr , 8 );
491
+ #endif
475
492
Assert (sub_ != PG_INT64_MIN );
476
493
return pg_atomic_fetch_sub_u64_impl (ptr , sub_ );
477
494
}
478
495
479
496
static inline uint64
480
497
pg_atomic_fetch_and_u64 (volatile pg_atomic_uint64 * ptr , uint64 and_ )
481
498
{
499
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
482
500
AssertPointerAlignment (ptr , 8 );
501
+ #endif
483
502
return pg_atomic_fetch_and_u64_impl (ptr , and_ );
484
503
}
485
504
486
505
static inline uint64
487
506
pg_atomic_fetch_or_u64 (volatile pg_atomic_uint64 * ptr , uint64 or_ )
488
507
{
508
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
489
509
AssertPointerAlignment (ptr , 8 );
510
+ #endif
490
511
return pg_atomic_fetch_or_u64_impl (ptr , or_ );
491
512
}
492
513
493
514
static inline uint64
494
515
pg_atomic_add_fetch_u64 (volatile pg_atomic_uint64 * ptr , int64 add_ )
495
516
{
517
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
496
518
AssertPointerAlignment (ptr , 8 );
519
+ #endif
497
520
return pg_atomic_add_fetch_u64_impl (ptr , add_ );
498
521
}
499
522
500
523
static inline uint64
501
524
pg_atomic_sub_fetch_u64 (volatile pg_atomic_uint64 * ptr , int64 sub_ )
502
525
{
526
+ #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
503
527
AssertPointerAlignment (ptr , 8 );
528
+ #endif
504
529
Assert (sub_ != PG_INT64_MIN );
505
530
return pg_atomic_sub_fetch_u64_impl (ptr , sub_ );
506
531
}
0 commit comments