Ruby 3.3.2p78 (2024-05-30 revision e5a195edf62fe1bf7146a191da13fa1c4fecbd71)
vm_insnhelper.h
1#ifndef RUBY_INSNHELPER_H
2#define RUBY_INSNHELPER_H
3/**********************************************************************
4
5 insnhelper.h - helper macros to implement each instructions
6
7 $Author$
8 created at: 04/01/01 15:50:34 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14RUBY_EXTERN VALUE ruby_vm_const_missing_count;
15RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_invalidations;
16RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_misses;
17RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
18
19#if USE_YJIT || USE_RJIT // We want vm_insns_count on any JIT-enabled build.
20// Increment vm_insns_count for --yjit-stats. We increment this even when
21// --yjit or --yjit-stats is not used because branching to skip it is slower.
22// We also don't use ATOMIC_INC for performance, allowing inaccuracy on Ractors.
23#define JIT_COLLECT_USAGE_INSN(insn) rb_vm_insns_count++
24#else
25#define JIT_COLLECT_USAGE_INSN(insn) // none
26#endif
27
28#if VM_COLLECT_USAGE_DETAILS
29#define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn)
30#define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op)))
31#define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
32#else
33#define COLLECT_USAGE_INSN(insn) JIT_COLLECT_USAGE_INSN(insn)
34#define COLLECT_USAGE_OPERAND(insn, n, op) // none
35#define COLLECT_USAGE_REGISTER(reg, s) // none
36#endif
37
38/**********************************************************/
39/* deal with stack */
40/**********************************************************/
41
42#define PUSH(x) (SET_SV(x), INC_SP(1))
43#define TOPN(n) (*(GET_SP()-(n)-1))
44#define POPN(n) (DEC_SP(n))
45#define POP() (DEC_SP(1))
46#define STACK_ADDR_FROM_TOP(n) (GET_SP()-(n))
47
48/**********************************************************/
49/* deal with registers */
50/**********************************************************/
51
52#define VM_REG_CFP (reg_cfp)
53#define VM_REG_PC (VM_REG_CFP->pc)
54#define VM_REG_SP (VM_REG_CFP->sp)
55#define VM_REG_EP (VM_REG_CFP->ep)
56
57#define RESTORE_REGS() do { \
58 VM_REG_CFP = ec->cfp; \
59} while (0)
60
61#if VM_COLLECT_USAGE_DETAILS
62enum vm_regan_regtype {
63 VM_REGAN_PC = 0,
64 VM_REGAN_SP = 1,
65 VM_REGAN_EP = 2,
66 VM_REGAN_CFP = 3,
67 VM_REGAN_SELF = 4,
68 VM_REGAN_ISEQ = 5
69};
70enum vm_regan_acttype {
71 VM_REGAN_ACT_GET = 0,
72 VM_REGAN_ACT_SET = 1
73};
74
75#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) \
76 (COLLECT_USAGE_REGISTER((VM_REGAN_##a), (VM_REGAN_ACT_##b)), (v))
77#else
78#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) (v)
79#endif
80
81/* PC */
82#define GET_PC() (COLLECT_USAGE_REGISTER_HELPER(PC, GET, VM_REG_PC))
83#define SET_PC(x) (VM_REG_PC = (COLLECT_USAGE_REGISTER_HELPER(PC, SET, (x))))
84#define GET_CURRENT_INSN() (*GET_PC())
85#define GET_OPERAND(n) (GET_PC()[(n)])
86#define ADD_PC(n) (SET_PC(VM_REG_PC + (n)))
87#define JUMP(dst) (SET_PC(VM_REG_PC + (dst)))
88
89/* frame pointer, environment pointer */
90#define GET_CFP() (COLLECT_USAGE_REGISTER_HELPER(CFP, GET, VM_REG_CFP))
91#define GET_EP() (COLLECT_USAGE_REGISTER_HELPER(EP, GET, VM_REG_EP))
92#define SET_EP(x) (VM_REG_EP = (COLLECT_USAGE_REGISTER_HELPER(EP, SET, (x))))
93#define GET_LEP() (VM_EP_LEP(GET_EP()))
94
95/* SP */
96#define GET_SP() (COLLECT_USAGE_REGISTER_HELPER(SP, GET, VM_REG_SP))
97#define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
98#define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
99#define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
100#define SET_SV(x) (*GET_SP() = rb_ractor_confirm_belonging(x))
101 /* set current stack value as x */
102
103/* instruction sequence C struct */
104#define GET_ISEQ() (GET_CFP()->iseq)
105
106/**********************************************************/
107/* deal with variables */
108/**********************************************************/
109
110#define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
111
112/**********************************************************/
113/* deal with values */
114/**********************************************************/
115
116#define GET_SELF() (COLLECT_USAGE_REGISTER_HELPER(SELF, GET, GET_CFP()->self))
117
118/**********************************************************/
119/* deal with control flow 2: method/iterator */
120/**********************************************************/
121
122/* set fastpath when cached method is *NOT* protected
123 * because inline method cache does not care about receiver.
124 */
125
126static inline void
127CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
128{
129 if (LIKELY(enabled)) {
130 vm_cc_call_set(cc, func);
131 }
132}
133
134#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
135
136/**********************************************************/
137/* deal with control flow 3: exception */
138/**********************************************************/
139
140
141/**********************************************************/
142/* deal with stack canary */
143/**********************************************************/
144
145#if VM_CHECK_MODE > 0
146#define SETUP_CANARY(cond) \
147 VALUE *canary = 0; \
148 if (cond) { \
149 canary = GET_SP(); \
150 SET_SV(vm_stack_canary); \
151 } \
152 else {\
153 SET_SV(Qfalse); /* cleanup */ \
154 }
155#define CHECK_CANARY(cond, insn) \
156 if (cond) { \
157 if (*canary == vm_stack_canary) { \
158 *canary = Qfalse; /* cleanup */ \
159 } \
160 else { \
161 rb_vm_canary_is_found_dead(insn, *canary); \
162 } \
163 }
164#else
165#define SETUP_CANARY(cond) if (cond) {} else {}
166#define CHECK_CANARY(cond, insn) if (cond) {(void)(insn);}
167#endif
168
169/**********************************************************/
170/* others */
171/**********************************************************/
172
173#define CALL_SIMPLE_METHOD() do { \
174 rb_snum_t insn_width = attr_width_opt_send_without_block(0); \
175 ADD_PC(-insn_width); \
176 DISPATCH_ORIGINAL_INSN(opt_send_without_block); \
177} while (0)
178
179#define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
180#define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)
181
182static inline struct vm_throw_data *
183THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, int st)
184{
185 struct vm_throw_data *obj = (struct vm_throw_data *)rb_imemo_new(imemo_throw_data, val, (VALUE)cf, 0, 0);
186 obj->throw_state = st;
187 return obj;
188}
189
190static inline VALUE
191THROW_DATA_VAL(const struct vm_throw_data *obj)
192{
193 VM_ASSERT(THROW_DATA_P(obj));
194 return obj->throw_obj;
195}
196
197static inline const rb_control_frame_t *
198THROW_DATA_CATCH_FRAME(const struct vm_throw_data *obj)
199{
200 VM_ASSERT(THROW_DATA_P(obj));
201 return obj->catch_frame;
202}
203
204static inline int
205THROW_DATA_STATE(const struct vm_throw_data *obj)
206{
207 VM_ASSERT(THROW_DATA_P(obj));
208 return obj->throw_state;
209}
210
211static inline int
212THROW_DATA_CONSUMED_P(const struct vm_throw_data *obj)
213{
214 VM_ASSERT(THROW_DATA_P(obj));
215 return obj->flags & THROW_DATA_CONSUMED;
216}
217
218static inline void
219THROW_DATA_CATCH_FRAME_SET(struct vm_throw_data *obj, const rb_control_frame_t *cfp)
220{
221 VM_ASSERT(THROW_DATA_P(obj));
222 obj->catch_frame = cfp;
223}
224
225static inline void
226THROW_DATA_STATE_SET(struct vm_throw_data *obj, int st)
227{
228 VM_ASSERT(THROW_DATA_P(obj));
229 obj->throw_state = st;
230}
231
232static inline void
233THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
234{
235 if (THROW_DATA_P(obj) &&
236 THROW_DATA_STATE(obj) == TAG_BREAK) {
237 obj->flags |= THROW_DATA_CONSUMED;
238 }
239}
240
241#define IS_ARGS_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)
242#define IS_ARGS_KEYWORD(ci) (vm_ci_flag(ci) & VM_CALL_KWARG)
243#define IS_ARGS_KW_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT)
244#define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
245#define IS_ARGS_KW_SPLAT_MUT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT_MUT)
246
247static inline bool
248vm_call_cacheable(const struct rb_callinfo *ci, const struct rb_callcache *cc)
249{
250 return (vm_ci_flag(ci) & VM_CALL_FCALL) ||
251 METHOD_ENTRY_VISI(vm_cc_cme(cc)) != METHOD_VISI_PROTECTED;
252}
253/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
254 can be used as a fastpath. */
255static inline bool
256vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
257{
258 return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && vm_call_cacheable(ci, cc);
259}
260
261#endif /* RUBY_INSNHELPER_H */
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
THROW_DATA.
Definition imemo.h:61
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40