Ruby 3.3.2p78 (2024-05-30 revision e5a195edf62fe1bf7146a191da13fa1c4fecbd71)
atomic.h
Go to the documentation of this file.
1#ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/
2#define RUBY_ATOMIC_H
27#include "ruby/internal/config.h"
28
29#ifdef STDC_HEADERS
30# include <stddef.h> /* size_t */
31#endif
32
33#ifdef HAVE_SYS_TYPES_H
34# include <sys/types.h> /* ssize_t */
35#endif
36
37#if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
38# pragma intrinsic(_InterlockedOr)
39#elif defined(__sun) && defined(HAVE_ATOMIC_H)
40# include <atomic.h>
41#endif
42
43#include "ruby/assert.h"
44#include "ruby/backward/2/limits.h"
49#include "ruby/internal/cast.h"
50#include "ruby/internal/value.h"
53
54/*
55 * Asserts that your environment supports more than one atomic types. These
56 * days systems tend to have such property (C11 was a standard of decades ago,
57 * right?) but we still support older ones.
58 */
59#if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
60# define RUBY_ATOMIC_GENERIC_MACRO 1
61#endif
62
68#if defined(__DOXYGEN__)
69using rb_atomic_t = std::atomic<unsigned>;
70#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
71typedef unsigned int rb_atomic_t;
72#elif defined(HAVE_GCC_SYNC_BUILTINS)
73typedef unsigned int rb_atomic_t;
74#elif defined(_WIN32)
75typedef LONG rb_atomic_t;
76#elif defined(__sun) && defined(HAVE_ATOMIC_H)
77typedef unsigned int rb_atomic_t;
78#else
79# error No atomic operation found
80#endif
81
91#define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val))
92
102#define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val))
103
114#define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val))
115
125#define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val))
126
138#define RUBY_ATOMIC_CAS(var, oldval, newval) \
139 rbimpl_atomic_cas(&(var), (oldval), (newval))
140
148#define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var))
149
158#define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val))
159
168#define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val))
169
178#define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val))
179
187#define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var))
188
196#define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var))
197
207#define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var))
208
218#define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var))
219
231#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
232 rbimpl_atomic_size_exchange(&(var), (val))
233
245#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
246 rbimpl_atomic_size_cas(&(var), (oldval), (newval))
247
258#define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val))
259
270#define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val))
271
288#define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
289 RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val))
290
299#define RUBY_ATOMIC_PTR_LOAD(var) \
300 RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var))
301
313#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
314 RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (oldval), (newval)))
315
327#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
328 rbimpl_atomic_value_exchange(&(var), (val))
329
341#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
342 rbimpl_atomic_value_cas(&(var), (oldval), (newval))
343
348static inline rb_atomic_t
349rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
350{
351#if 0
352
353#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
354 return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
355
356#elif defined(HAVE_GCC_SYNC_BUILTINS)
357 return __sync_fetch_and_add(ptr, val);
358
359#elif defined(_WIN32)
360 return InterlockedExchangeAdd(ptr, val);
361
362#elif defined(__sun) && defined(HAVE_ATOMIC_H)
363 /*
364 * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our
365 * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to
366 * manually check integer overflow.
367 */
368 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
369 return atomic_add_int_nv(ptr, val) - val;
370
371#else
372# error Unsupported platform.
373#endif
374}
375
379static inline void
380rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
381{
382#if 0
383
384#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
385 /*
386 * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s
387 * return value is not used, then compiles it into single `LOCK ADD`
388 * instruction.
389 */
390 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
391
392#elif defined(HAVE_GCC_SYNC_BUILTINS)
393 __sync_add_and_fetch(ptr, val);
394
395#elif defined(_WIN32)
396 /*
397 * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is
398 * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to
399 * `InterlockedExchangeAdd` for better portability.
400 */
401 InterlockedExchangeAdd(ptr, val);
402
403#elif defined(__sun) && defined(HAVE_ATOMIC_H)
404 /* Ditto for `atomic_add_int_nv`. */
405 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
406 atomic_add_int(ptr, val);
407
408#else
409# error Unsupported platform.
410#endif
411}
412
416static inline void
417rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
418{
419#if 0
420
421#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
422 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
423
424#elif defined(HAVE_GCC_SYNC_BUILTINS)
425 __sync_add_and_fetch(ptr, val);
426
427#elif defined(_WIN32) && defined(_M_AMD64)
428 /* Ditto for `InterlockeExchangedAdd`. */
429 InterlockedExchangeAdd64(ptr, val);
430
431#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
432 /* Ditto for `atomic_add_int_nv`. */
433 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
434 atomic_add_long(ptr, val);
435
436#else
437 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
438
439 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
440 rbimpl_atomic_add(tmp, val);
441
442#endif
443}
444
448static inline void
449rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
450{
451#if 0
452
453#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
454 rbimpl_atomic_add(ptr, 1);
455
456#elif defined(_WIN32)
457 InterlockedIncrement(ptr);
458
459#elif defined(__sun) && defined(HAVE_ATOMIC_H)
460 atomic_inc_uint(ptr);
461
462#else
463 rbimpl_atomic_add(ptr, 1);
464
465#endif
466}
467
471static inline void
472rbimpl_atomic_size_inc(volatile size_t *ptr)
473{
474#if 0
475
476#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
477 rbimpl_atomic_size_add(ptr, 1);
478
479#elif defined(_WIN32) && defined(_M_AMD64)
480 InterlockedIncrement64(ptr);
481
482#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
483 atomic_inc_ulong(ptr);
484
485#else
486 rbimpl_atomic_size_add(ptr, 1);
487
488#endif
489}
490
494static inline rb_atomic_t
495rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
496{
497#if 0
498
499#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
500 return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST);
501
502#elif defined(HAVE_GCC_SYNC_BUILTINS)
503 return __sync_fetch_and_sub(ptr, val);
504
505#elif defined(_WIN32)
506 /* rb_atomic_t is signed here! Safe to do `-val`. */
507 return InterlockedExchangeAdd(ptr, -val);
508
509#elif defined(__sun) && defined(HAVE_ATOMIC_H)
510 /* Ditto for `rbimpl_atomic_fetch_add`. */
511 const signed neg = -1;
512 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
513 return atomic_add_int_nv(ptr, neg * val) + val;
514
515#else
516# error Unsupported platform.
517#endif
518}
519
523static inline void
524rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
525{
526#if 0
527
528#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
529 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
530
531#elif defined(HAVE_GCC_SYNC_BUILTINS)
532 __sync_sub_and_fetch(ptr, val);
533
534#elif defined(_WIN32)
535 InterlockedExchangeAdd(ptr, -val);
536
537#elif defined(__sun) && defined(HAVE_ATOMIC_H)
538 const signed neg = -1;
539 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
540 atomic_add_int(ptr, neg * val);
541
542#else
543# error Unsupported platform.
544#endif
545}
546
550static inline void
551rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
552{
553#if 0
554
555#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
556 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
557
558#elif defined(HAVE_GCC_SYNC_BUILTINS)
559 __sync_sub_and_fetch(ptr, val);
560
561#elif defined(_WIN32) && defined(_M_AMD64)
562 const ssize_t neg = -1;
563 InterlockedExchangeAdd64(ptr, neg * val);
564
565#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
566 const signed neg = -1;
567 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
568 atomic_add_long(ptr, neg * val);
569
570#else
571 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
572
573 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
574 rbimpl_atomic_sub(tmp, val);
575
576#endif
577}
578
582static inline void
583rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
584{
585#if 0
586
587#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
588 rbimpl_atomic_sub(ptr, 1);
589
590#elif defined(_WIN32)
591 InterlockedDecrement(ptr);
592
593#elif defined(__sun) && defined(HAVE_ATOMIC_H)
594 atomic_dec_uint(ptr);
595
596#else
597 rbimpl_atomic_sub(ptr, 1);
598
599#endif
600}
601
605static inline void
606rbimpl_atomic_size_dec(volatile size_t *ptr)
607{
608#if 0
609
610#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
611 rbimpl_atomic_size_sub(ptr, 1);
612
613#elif defined(_WIN32) && defined(_M_AMD64)
614 InterlockedDecrement64(ptr);
615
616#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
617 atomic_dec_ulong(ptr);
618
619#else
620 rbimpl_atomic_size_sub(ptr, 1);
621
622#endif
623}
624
628static inline void
629rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val)
630{
631#if 0
632
633#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
634 __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST);
635
636#elif defined(HAVE_GCC_SYNC_BUILTINS)
637 __sync_or_and_fetch(ptr, val);
638
639#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
640 _InterlockedOr(ptr, val);
641
642#elif defined(_WIN32) && defined(__GNUC__)
643 /* This was for old MinGW. Maybe not needed any longer? */
644 __asm__(
645 "lock\n\t"
646 "orl\t%1, %0"
647 : "=m"(ptr)
648 : "Ir"(val));
649
650#elif defined(_WIN32) && defined(_M_IX86)
651 __asm mov eax, ptr;
652 __asm mov ecx, val;
653 __asm lock or [eax], ecx;
654
655#elif defined(__sun) && defined(HAVE_ATOMIC_H)
656 atomic_or_uint(ptr, val);
657
658#else
659# error Unsupported platform.
660#endif
661}
662
663/* Nobody uses this but for theoretical backwards compatibility... */
664#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
665static inline rb_atomic_t
666rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
667{
668 return rbimpl_atomic_or(var, val);
669}
670#endif
671
675static inline rb_atomic_t
676rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
677{
678#if 0
679
680#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
681 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
682
683#elif defined(HAVE_GCC_SYNC_BUILTINS)
684 return __sync_lock_test_and_set(ptr, val);
685
686#elif defined(_WIN32)
687 return InterlockedExchange(ptr, val);
688
689#elif defined(__sun) && defined(HAVE_ATOMIC_H)
690 return atomic_swap_uint(ptr, val);
691
692#else
693# error Unsupported platform.
694#endif
695}
696
700static inline size_t
701rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val)
702{
703#if 0
704
705#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
706 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
707
708#elif defined(HAVE_GCC_SYNC_BUILTINS)
709 return __sync_lock_test_and_set(ptr, val);
710
711#elif defined(_WIN32) && defined(_M_AMD64)
712 return InterlockedExchange64(ptr, val);
713
714#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
715 return atomic_swap_ulong(ptr, val);
716
717#else
718 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
719
720 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
721 const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val);
722 return RBIMPL_CAST((size_t)ret);
723
724#endif
725}
726
730static inline void *
731rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
732{
733#if 0
734
735#elif defined(InterlockedExchangePointer)
736 /* const_cast */
737 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
738 PVOID pval = RBIMPL_CAST((PVOID)val);
739 return InterlockedExchangePointer(pptr, pval);
740
741#elif defined(__sun) && defined(HAVE_ATOMIC_H)
742 return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val));
743
744#else
745 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
746
747 const size_t sval = RBIMPL_CAST((size_t)val);
748 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
749 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
750 return RBIMPL_CAST((void *)sret);
751
752#endif
753}
754
758static inline VALUE
759rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val)
760{
761 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
762
763 const size_t sval = RBIMPL_CAST((size_t)val);
764 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
765 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
766 return RBIMPL_CAST((VALUE)sret);
767}
768
772static inline rb_atomic_t
773rbimpl_atomic_load(volatile rb_atomic_t *ptr)
774{
775#if 0
776
777#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
778 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
779#else
780 return rbimpl_atomic_fetch_add(ptr, 0);
781#endif
782}
783
787static inline void
788rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val)
789{
790#if 0
791
792#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
793 __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
794
795#else
796 /* Maybe std::atomic<rb_atomic_t>::store can be faster? */
797 rbimpl_atomic_exchange(ptr, val);
798
799#endif
800}
801
805static inline rb_atomic_t
806rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval)
807{
808#if 0
809
810#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
811 __atomic_compare_exchange_n(
812 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
813 return oldval;
814
815#elif defined(HAVE_GCC_SYNC_BUILTINS)
816 return __sync_val_compare_and_swap(ptr, oldval, newval);
817
818#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
819 return InterlockedCompareExchange(ptr, newval, oldval);
820
821#elif defined(_WIN32)
822 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
823 PVOID pold = RBIMPL_CAST((PVOID)oldval);
824 PVOID pnew = RBIMPL_CAST((PVOID)newval);
825 PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
826 return RBIMPL_CAST((rb_atomic_t)pret);
827
828#elif defined(__sun) && defined(HAVE_ATOMIC_H)
829 return atomic_cas_uint(ptr, oldval, newval);
830
831#else
832# error Unsupported platform.
833#endif
834}
835
836/* Nobody uses this but for theoretical backwards compatibility... */
837#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
838static inline rb_atomic_t
839rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
840{
841 return rbimpl_atomic_cas(var, oldval, newval);
842}
843#endif
844
848static inline size_t
849rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval)
850{
851#if 0
852
853#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
854 __atomic_compare_exchange_n(
855 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
856 return oldval;
857
858#elif defined(HAVE_GCC_SYNC_BUILTINS)
859 return __sync_val_compare_and_swap(ptr, oldval, newval);
860
861#elif defined(_WIN32) && defined(_M_AMD64)
862 return InterlockedCompareExchange64(ptr, newval, oldval);
863
864#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
865 return atomic_cas_ulong(ptr, oldval, newval);
866
867#else
868 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
869
870 volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
871 return rbimpl_atomic_cas(tmp, oldval, newval);
872
873#endif
874}
875
879static inline void *
880rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
881{
882#if 0
883
884#elif defined(InterlockedExchangePointer)
885 /* ... Can we say that InterlockedCompareExchangePtr surly exists when
886 * InterlockedExchangePointer is defined? Seems so but...?*/
887 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
888 PVOID pold = RBIMPL_CAST((PVOID)oldval);
889 PVOID pnew = RBIMPL_CAST((PVOID)newval);
890 return InterlockedCompareExchangePointer(pptr, pnew, pold);
891
892#elif defined(__sun) && defined(HAVE_ATOMIC_H)
893 void *pold = RBIMPL_CAST((void *)oldval);
894 void *pnew = RBIMPL_CAST((void *)newval);
895 return atomic_cas_ptr(ptr, pold, pnew);
896
897
898#else
899 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
900
901 const size_t snew = RBIMPL_CAST((size_t)newval);
902 const size_t sold = RBIMPL_CAST((size_t)oldval);
903 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
904 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
905 return RBIMPL_CAST((void *)sret);
906
907#endif
908}
909
913static inline void *
914rbimpl_atomic_ptr_load(void **ptr)
915{
916#if 0
917
918#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
919 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
920#else
921 void *val = *ptr;
922 return rbimpl_atomic_ptr_cas(ptr, val, val);
923#endif
924}
925
929static inline VALUE
930rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval)
931{
932 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
933
934 const size_t snew = RBIMPL_CAST((size_t)newval);
935 const size_t sold = RBIMPL_CAST((size_t)oldval);
936 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
937 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
938 return RBIMPL_CAST((VALUE)sret);
939}
941#endif /* RUBY_ATOMIC_H */
Defines RBIMPL_ATTR_ARTIFICIAL.
#define RBIMPL_ATTR_ARTIFICIAL()
Wraps (or simulates) __attribute__((artificial))
Definition artificial.h:43
#define RBIMPL_ASSERT_OR_ASSUME(expr)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition assert.h:229
Atomic operations.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
Defines RBIMPL_COMPILER_SINCE.
Defines RBIMPL_STATIC_ASSERT.
#define RBIMPL_STATIC_ASSERT
Wraps (or simulates) static_assert
Defines RBIMPL_ATTR_NOALIAS.
#define RBIMPL_ATTR_NOALIAS()
Wraps (or simulates) __declspec((noalias))
Definition noalias.h:66
Defines RBIMPL_ATTR_NONNULL.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:66
C99 shim for <stdbool.h>
Defines VALUE and ID.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40