1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2018-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 /*
22 * Description: Process signal queue implementation.
23 *
24 * Author: Rickard Green
25 */
26
27
28 #ifdef HAVE_CONFIG_H
29 # include "config.h"
30 #endif
31
32 #include "sys.h"
33 #include "global.h"
34 #include "dist.h"
35 #include "erl_process.h"
36 #include "erl_port_task.h"
37 #include "erl_trace.h"
38 #include "beam_bp.h"
39 #include "erl_binary.h"
40 #include "big.h"
41 #include "erl_gc.h"
42 #include "bif.h"
43 #include "erl_bif_unique.h"
44 #include "erl_proc_sig_queue.h"
45 #include "dtrace-wrapper.h"
46
47 #define ERTS_SIG_REDS_CNT_FACTOR 4
48 #define ERTS_PROC_SIG_TRACE_COUNT_LIMIT 200
49
50 #define ERTS_SIG_IS_GEN_EXIT(sig) \
51 (ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag) == ERTS_SIG_Q_TYPE_GEN_EXIT)
52 #define ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig) \
53 (ASSERT(ERTS_SIG_IS_GEN_EXIT(sig)),is_non_value(get_exit_signal_data(sig)->reason))
54
55
56 #define ERTS_SIG_LNK_X_FLAG_NORMAL_KILLS (((Uint32) 1) << 0)
57 #define ERTS_SIG_LNK_X_FLAG_CONNECTION_LOST (((Uint32) 1) << 1)
58
59 #define ERTS_PROC_SIG_ADJ_MSGQ_SCAN_FACTOR \
60 (ERTS_CLA_SCAN_WORDS_PER_RED / ERTS_SIG_REDS_CNT_FACTOR)
61 #define ERTS_PROC_SIG_ADJ_MSGQ_COPY_FACTOR \
62 (ERTS_PROC_SIG_ADJ_MSGQ_SCAN_FACTOR / 8)
63 #define ERTS_PROC_SIG_ADJ_MSGQ_MSGS_FACTOR \
64 25
65
66 Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler);
67 Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_high);
68 Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_max);
69
70 #ifdef ERTS_SUPPORT_OLD_RECV_MARK_INSTRS
71 Eterm erts_old_recv_marker_id;
72 #endif
73
74 void
erts_proc_sig_queue_init(void)75 erts_proc_sig_queue_init(void)
76 {
77 ERTS_CT_ASSERT(ERTS_SIG_Q_OP_MASK > ERTS_SIG_Q_OP_MAX);
78 ERTS_CT_ASSERT(ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK > ERTS_SIG_Q_OP_MAX);
79 ERTS_CT_ASSERT(ERTS_SIG_Q_TYPE_MASK >= ERTS_SIG_Q_TYPE_MAX);
80
81 #ifdef ERTS_SUPPORT_OLD_RECV_MARK_INSTRS
82 {
83 Eterm *hp = erts_alloc(ERTS_ALC_T_LITERAL,
84 ERTS_REF_THING_SIZE*sizeof(Eterm));
85 erts_old_recv_marker_id = erts_make_ref_in_buffer(hp);
86 erts_set_literal_tag(&erts_old_recv_marker_id, hp, ERTS_REF_THING_SIZE);
87 }
88 #endif
89
90 }
91
92 typedef struct {
93 int active;
94 int procs;
95 struct {
96 int active;
97 #if defined(USE_VM_PROBES)
98 int vm_probes;
99 char receiver_name[DTRACE_TERM_BUF_SIZE];
100 #endif
101 int receive_trace;
102 int bp_ix;
103 ErtsMessage **next;
104 ErtsTracingEvent *event;
105 } messages;
106 } ErtsSigRecvTracing;
107
108 typedef struct {
109 Eterm message;
110 Eterm from;
111 Eterm reason;
112 union {
113 Eterm ref;
114 struct {
115 Uint32 flags;
116 /*
117 * connection_id is only set when the
118 * ERTS_SIG_LNK_X_FLAG_CONNECTION_LOST
119 * flag has been set...
120 */
121 Uint32 connection_id;
122 } link;
123 } u;
124 } ErtsExitSignalData;
125
126 typedef struct {
127 Eterm message;
128 Eterm key;
129 } ErtsPersistMonMsg;
130
131 typedef struct {
132 ErtsSignalCommon common;
133 Eterm nodename;
134 Uint32 connection_id;
135 Eterm local; /* internal pid (immediate) */
136 Eterm remote; /* external pid (heap for it follow) */
137 Uint64 id;
138 Eterm heap[EXTERNAL_PID_HEAP_SIZE];
139 } ErtsSigDistUnlinkOp;
140
141 typedef struct {
142 Eterm message;
143 Eterm ref;
144 Eterm result;
145 ErtsLink *link;
146 Eterm *patch_point;
147 } ErtsDistSpawnReplySigData;
148
149 typedef struct {
150 ErtsSignalCommon common;
151 Uint flags_on;
152 Uint flags_off;
153 Eterm tracer;
154 } ErtsSigTraceInfo;
155
156 #define ERTS_SIG_GL_FLG_ACTIVE (((erts_aint_t) 1) << 0)
157 #define ERTS_SIG_GL_FLG_RECEIVER (((erts_aint_t) 1) << 1)
158 #define ERTS_SIG_GL_FLG_SENDER (((erts_aint_t) 1) << 2)
159
160 typedef struct {
161 ErtsSignalCommon common;
162 erts_atomic_t flags;
163 Eterm group_leader;
164 Eterm reply_to;
165 Eterm ref;
166 ErlOffHeap oh;
167 Eterm heap[1];
168 } ErtsSigGroupLeader;
169
170 typedef struct {
171 Eterm message;
172 Eterm requester;
173 } ErtsIsAliveRequest;
174
175 typedef struct {
176 Eterm message;
177 Eterm requester;
178 int async;
179 } ErtsSyncSuspendRequest;
180
181 typedef struct {
182 ErtsMonitorSuspend *mon;
183 ErtsMessage *sync;
184 } ErtsProcSigPendingSuspend;
185
186 typedef struct {
187 ErtsSignalCommon common;
188 Sint refc;
189 Sint delayed_len;
190 Sint len_offset;
191 } ErtsProcSigMsgQLenOffsetMarker;
192
193 typedef struct {
194 ErtsSignalCommon common;
195 ErtsProcSigMsgQLenOffsetMarker marker;
196 Sint msgq_len_offset;
197 Eterm requester;
198 Eterm ref;
199 ErtsORefThing oref_thing;
200 Uint reserve_size;
201 Uint len;
202 int flags;
203 int item_ix[1]; /* of len size in reality... */
204 } ErtsProcessInfoSig;
205
206 #define ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE ((Sint) -1)
207 #define ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC ((Sint) -2)
208
209 typedef struct {
210 ErtsSignalCommon common;
211 Eterm requester;
212 Eterm (*func)(Process *, void *, int *, ErlHeapFragment **);
213 void *arg;
214 Eterm ref;
215 ErtsORefThing oref_thing;
216 } ErtsProcSigRPC;
217
218 typedef struct {
219 Eterm requester;
220 Eterm request_id;
221 } ErtsCLAData;
222
223 static void wait_handle_signals(Process *c_p);
224 static void wake_handle_signals(Process *proc);
225
226 static int handle_msg_tracing(Process *c_p,
227 ErtsSigRecvTracing *tracing,
228 ErtsMessage ***next_nm_sig);
229 static int handle_trace_change_state(Process *c_p,
230 ErtsSigRecvTracing *tracing,
231 Uint16 type,
232 ErtsMessage *sig,
233 ErtsMessage ***next_nm_sig);
234 static void getting_unlinked(Process *c_p, Eterm unlinker);
235 static void getting_linked(Process *c_p, Eterm linker);
236 static void linking(Process *c_p, Eterm to);
237
238 static void group_leader_reply(Process *c_p, Eterm to,
239 Eterm ref, int success);
240 static int stretch_limit(Process *c_p, ErtsSigRecvTracing *tp,
241 int abs_lim, int *limp, int save_in_msgq);
242 static int
243 handle_cla(Process *c_p,
244 ErtsMessage *sig,
245 ErtsMessage ***next_nm_sig,
246 int exiting);
247 static int
248 handle_move_msgq_off_heap(Process *c_p,
249 ErtsMessage *sig,
250 ErtsMessage ***next_nm_sig,
251 int exiting);
252 static void
253 send_cla_reply(Process *c_p, ErtsMessage *sig, Eterm to,
254 Eterm req_id, Eterm result);
255 static void handle_missing_spawn_reply(Process *c_p, ErtsMonitor *omon);
256
257 #ifdef ERTS_PROC_SIG_HARD_DEBUG
258 #define ERTS_PROC_SIG_HDBG_PRIV_CHKQ(P, T, NMN) \
259 do { \
260 ErtsMessage **nm_next__ = *(NMN); \
261 ErtsMessage **nm_last__ = (P)->sig_qs.nmsigs.last; \
262 if (!nm_next__ || !*nm_next__) { \
263 nm_next__ = NULL; \
264 nm_last__ = NULL; \
265 } \
266 proc_sig_hdbg_check_queue((P), \
267 1, \
268 &(P)->sig_qs.cont, \
269 (P)->sig_qs.cont_last, \
270 nm_next__, \
271 nm_last__, \
272 (T), \
273 NULL, \
274 ERTS_PSFLG_FREE); \
275 } while (0);
276 static Sint
277 proc_sig_hdbg_check_queue(Process *c_p,
278 int privq,
279 ErtsMessage **sig_next,
280 ErtsMessage **sig_last,
281 ErtsMessage **sig_nm_next,
282 ErtsMessage **sig_nm_last,
283 ErtsSigRecvTracing *tracing,
284 int *found_saved_last_p,
285 erts_aint32_t sig_psflg);
286 #else
287 #define ERTS_PROC_SIG_HDBG_PRIV_CHKQ(P, T, NMN)
288 #endif
289
290 typedef struct {
291 ErtsSignalCommon common;
292 Eterm ref;
293 Eterm heap[1];
294 } ErtsSigDistProcDemonitor;
295
296 static void
destroy_dist_proc_demonitor(ErtsSigDistProcDemonitor * dmon)297 destroy_dist_proc_demonitor(ErtsSigDistProcDemonitor *dmon)
298 {
299 Eterm ref = dmon->ref;
300 if (is_external(ref)) {
301 ExternalThing *etp = external_thing_ptr(ref);
302 erts_deref_node_entry(etp->node, ref);
303 }
304 erts_free(ERTS_ALC_T_DIST_DEMONITOR, dmon);
305 }
306
307 static ERTS_INLINE ErtsSigDistUnlinkOp *
make_sig_dist_unlink_op(int op,Eterm nodename,Uint32 conn_id,Eterm local,Eterm remote,Uint64 id)308 make_sig_dist_unlink_op(int op, Eterm nodename, Uint32 conn_id,
309 Eterm local, Eterm remote, Uint64 id)
310 {
311 Eterm *hp;
312 ErlOffHeap oh = {0};
313 ErtsSigDistUnlinkOp *sdulnk = erts_alloc(ERTS_ALC_T_SIG_DATA,
314 sizeof(ErtsSigDistUnlinkOp));
315 ASSERT(is_internal_pid(local));
316 ASSERT(is_external_pid(remote));
317
318 hp = &sdulnk->heap[0];
319
320 sdulnk->common.tag = ERTS_PROC_SIG_MAKE_TAG(op,
321 ERTS_SIG_Q_TYPE_DIST_LINK,
322 0);
323 sdulnk->nodename = nodename;
324 sdulnk->connection_id = conn_id;
325 sdulnk->local = local;
326 sdulnk->remote = STORE_NC(&hp, &oh, remote);
327 sdulnk->id = id;
328
329 ASSERT(&sdulnk->heap[0] < hp);
330 ASSERT(hp <= &sdulnk->heap[0] + sizeof(sdulnk->heap)/sizeof(sdulnk->heap[0]));
331 ASSERT(boxed_val(sdulnk->remote) == &sdulnk->heap[0]);
332
333 return sdulnk;
334 }
335
336 static ERTS_INLINE void
destroy_sig_dist_unlink_op(ErtsSigDistUnlinkOp * sdulnk)337 destroy_sig_dist_unlink_op(ErtsSigDistUnlinkOp *sdulnk)
338 {
339 ASSERT(is_external_pid(sdulnk->remote));
340 ASSERT(boxed_val(sdulnk->remote) == &sdulnk->heap[0]);
341 erts_deref_node_entry(((ExternalThing *) &sdulnk->heap[0])->node,
342 make_boxed(&sdulnk->heap[0]));
343 erts_free(ERTS_ALC_T_SIG_DATA, sdulnk);
344 }
345
346 static ERTS_INLINE ErtsExitSignalData *
get_exit_signal_data(ErtsMessage * xsig)347 get_exit_signal_data(ErtsMessage *xsig)
348 {
349 ASSERT(ERTS_SIG_IS_NON_MSG(xsig));
350 ASSERT((ERTS_PROC_SIG_OP(((ErtsSignal *) xsig)->common.tag)
351 == ERTS_SIG_Q_OP_EXIT)
352 || (ERTS_PROC_SIG_OP(((ErtsSignal *) xsig)->common.tag)
353 == ERTS_SIG_Q_OP_EXIT_LINKED)
354 || (ERTS_PROC_SIG_OP(((ErtsSignal *) xsig)->common.tag)
355 == ERTS_SIG_Q_OP_MONITOR_DOWN));
356 ASSERT(xsig->hfrag.alloc_size > xsig->hfrag.used_size);
357 ASSERT((xsig->hfrag.alloc_size - xsig->hfrag.used_size)*sizeof(UWord)
358 >= sizeof(ErtsExitSignalData));
359 return (ErtsExitSignalData *) (char *) (&xsig->hfrag.mem[0]
360 + xsig->hfrag.used_size);
361 }
362
363 static ERTS_INLINE ErtsDistSpawnReplySigData *
get_dist_spawn_reply_data(ErtsMessage * sig)364 get_dist_spawn_reply_data(ErtsMessage *sig)
365 {
366 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
367 ASSERT(sig->hfrag.alloc_size > sig->hfrag.used_size);
368 ASSERT((sig->hfrag.alloc_size - sig->hfrag.used_size)*sizeof(UWord)
369 >= sizeof(ErtsDistSpawnReplySigData));
370 return (ErtsDistSpawnReplySigData *) (char *) (&sig->hfrag.mem[0]
371 + sig->hfrag.used_size);
372 }
373
374 static ERTS_INLINE ErtsCLAData *
get_cla_data(ErtsMessage * sig)375 get_cla_data(ErtsMessage *sig)
376 {
377 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
378 ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) sig)->common.tag)
379 == ERTS_SIG_Q_OP_ADJ_MSGQ);
380 ASSERT(ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag)
381 == ERTS_SIG_Q_TYPE_CLA);
382 return (ErtsCLAData *) (char *) (&sig->hfrag.mem[0]
383 + sig->hfrag.used_size);
384 }
385
386 static ERTS_INLINE void
destroy_trace_info(ErtsSigTraceInfo * ti)387 destroy_trace_info(ErtsSigTraceInfo *ti)
388 {
389 if (is_value(ti->tracer))
390 erts_tracer_update(&ti->tracer, NIL);
391 erts_free(ERTS_ALC_T_SIG_DATA, ti);
392 }
393
394 static void
destroy_sig_group_leader(ErtsSigGroupLeader * sgl)395 destroy_sig_group_leader(ErtsSigGroupLeader *sgl)
396 {
397 erts_cleanup_offheap(&sgl->oh);
398 erts_free(ERTS_ALC_T_SIG_DATA, sgl);
399 }
400
401 static ERTS_INLINE void
sig_enqueue_trace(Process * c_p,ErtsMessage ** sigp,int op,Process * rp,ErtsMessage *** last_next)402 sig_enqueue_trace(Process *c_p, ErtsMessage **sigp, int op,
403 Process *rp, ErtsMessage ***last_next)
404 {
405 switch (op) {
406 case ERTS_SIG_Q_OP_LINK:
407 if (c_p
408 && ((!!IS_TRACED(c_p))
409 & (ERTS_TRACE_FLAGS(c_p) & (F_TRACE_SOL
410 | F_TRACE_SOL1)))) {
411 ErtsSigTraceInfo *ti;
412 Eterm tag;
413 /*
414 * Set on link enabled.
415 *
416 * Prepend a trace-change-state signal before the
417 * link signal...
418 */
419 tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_TRACE_CHANGE_STATE,
420 ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO,
421 0);
422 ti = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigTraceInfo));
423 ti->common.next = *sigp;
424 ti->common.specific.next = &ti->common.next;
425 ti->common.tag = tag;
426 ti->flags_on = ERTS_TRACE_FLAGS(c_p) & TRACEE_FLAGS;
427 if (!(ti->flags_on & F_TRACE_SOL1))
428 ti->flags_off = 0;
429 else {
430 ti->flags_off = F_TRACE_SOL1|F_TRACE_SOL;
431 erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
432 ERTS_TRACE_FLAGS(c_p) &= ~(F_TRACE_SOL1|F_TRACE_SOL);
433 erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
434 }
435 erts_tracer_update(&ti->tracer, ERTS_TRACER(c_p));
436 *sigp = (ErtsMessage *) ti;
437 if (!*last_next || *last_next == sigp)
438 *last_next = &ti->common.next;
439 }
440 break;
441
442 #ifdef USE_VM_PROBES
443 case ERTS_SIG_Q_OP_EXIT:
444 case ERTS_SIG_Q_OP_EXIT_LINKED:
445
446 if (DTRACE_ENABLED(process_exit_signal)) {
447 ErtsMessage* sig = *sigp;
448 Uint16 type = ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag);
449 Eterm reason, from;
450 ErtsExitSignalData *xsigd;
451
452 ASSERT(type == ERTS_SIG_Q_TYPE_GEN_EXIT);
453
454 xsigd = get_exit_signal_data(sig);
455 reason = xsigd->reason;
456 from = xsigd->from;
457
458 if (is_pid(from)) {
459
460 DTRACE_CHARBUF(sender_str, DTRACE_TERM_BUF_SIZE);
461 DTRACE_CHARBUF(receiver_str, DTRACE_TERM_BUF_SIZE);
462 DTRACE_CHARBUF(reason_buf, DTRACE_TERM_BUF_SIZE);
463
464 if (reason == am_kill) {
465 reason = am_killed;
466 }
467
468 dtrace_pid_str(from, sender_str);
469 dtrace_proc_str(rp, receiver_str);
470 erts_snprintf(reason_buf, sizeof(DTRACE_CHARBUF_NAME(reason_buf)) - 1, "%T", reason);
471 DTRACE3(process_exit_signal, sender_str, receiver_str, reason_buf);
472 }
473 }
474 break;
475
476 #endif
477
478 default:
479 break;
480 }
481 }
482
483 static void
sig_enqueue_trace_cleanup(ErtsMessage * first,ErtsSignal * sig)484 sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig)
485 {
486 ErtsMessage *tmp;
487
488 /* The usual case; no tracing signals... */
489 if (sig == (ErtsSignal *) first) {
490 ASSERT(sig->common.next == NULL);
491 return;
492 }
493
494 /* Got trace signals to clean up... */
495
496 tmp = first;
497
498 while (tmp) {
499 ErtsMessage *tmp_free = tmp;
500 tmp = tmp->next;
501 if (sig != (ErtsSignal *) tmp_free) {
502 switch (ERTS_PROC_SIG_OP(((ErtsSignal *) tmp_free)->common.tag)) {
503 case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
504 destroy_trace_info((ErtsSigTraceInfo *) tmp_free);
505 break;
506 case ERTS_SIG_Q_OP_MONITOR:
507 break; /* ignore flushed pending signal */
508 default:
509 ERTS_INTERNAL_ERROR("Unexpected signal op");
510 break;
511 }
512 }
513 }
514 }
515
516 #ifdef DEBUG
dbg_count_nmsigs(ErtsMessage * first)517 static int dbg_count_nmsigs(ErtsMessage *first)
518 {
519 ErtsMessage *sig;
520 int cnt = 0;
521
522 for (sig = first; sig; sig = sig->next) {
523 if (ERTS_SIG_IS_NON_MSG(sig))
524 ++cnt;
525 }
526 return cnt;
527 }
528 #endif
529
530 static ERTS_INLINE erts_aint32_t
enqueue_signals(Process * rp,ErtsMessage * first,ErtsMessage ** last,ErtsMessage ** last_next,Uint num_msgs,erts_aint32_t in_state)531 enqueue_signals(Process *rp, ErtsMessage *first,
532 ErtsMessage **last, ErtsMessage **last_next,
533 Uint num_msgs,
534 erts_aint32_t in_state)
535 {
536 erts_aint32_t state = in_state;
537 ErtsMessage **this = rp->sig_inq.last;
538
539 ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(rp);
540
541 ASSERT(!*this);
542 *this = first;
543 rp->sig_inq.last = last;
544
545 if (!rp->sig_inq.nmsigs.next) {
546 ASSERT(!rp->sig_inq.nmsigs.last);
547 if (ERTS_SIG_IS_NON_MSG(first)) {
548 rp->sig_inq.nmsigs.next = this;
549 }
550 else if (last_next) {
551 ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
552 rp->sig_inq.nmsigs.next = &first->next;
553 }
554 else
555 goto no_nmsig;
556
557 state = erts_atomic32_read_bor_nob(&rp->state,
558 ERTS_PSFLG_SIG_IN_Q);
559 no_nmsig:
560 ASSERT(!(state & ERTS_PSFLG_SIG_IN_Q));
561 }
562 else {
563 ErtsSignal *sig;
564 ASSERT(rp->sig_inq.nmsigs.last);
565
566 sig = (ErtsSignal *) *rp->sig_inq.nmsigs.last;
567
568 ASSERT(sig && !sig->common.specific.next);
569 ASSERT(state & ERTS_PSFLG_SIG_IN_Q);
570 if (ERTS_SIG_IS_NON_MSG(first)) {
571 sig->common.specific.next = this;
572 }
573 else if (last_next) {
574 ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
575 sig->common.specific.next = &first->next;
576 }
577 }
578
579 if (last_next) {
580 ASSERT(dbg_count_nmsigs(first) >= 2);
581 rp->sig_inq.nmsigs.last = last_next;
582 }
583 else if (ERTS_SIG_IS_NON_MSG(first)) {
584 ASSERT(dbg_count_nmsigs(first) == 1);
585 rp->sig_inq.nmsigs.last = this;
586 }
587 else
588 ASSERT(dbg_count_nmsigs(first) == 0);
589
590 rp->sig_inq.len += num_msgs;
591
592 ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(rp);
593
594 return state;
595 }
596
erts_enqueue_signals(Process * rp,ErtsMessage * first,ErtsMessage ** last,ErtsMessage ** last_next,Uint num_msgs,erts_aint32_t in_state)597 erts_aint32_t erts_enqueue_signals(Process *rp, ErtsMessage *first,
598 ErtsMessage **last, ErtsMessage **last_next,
599 Uint num_msgs,
600 erts_aint32_t in_state)
601 {
602 return enqueue_signals(rp, first, last, last_next, num_msgs, in_state);
603 }
604
605 void
erts_make_dirty_proc_handled(Eterm pid,erts_aint32_t state,erts_aint32_t prio)606 erts_make_dirty_proc_handled(Eterm pid,
607 erts_aint32_t state,
608 erts_aint32_t prio)
609 {
610 Eterm *hp;
611 ErtsMessage *mp;
612 Process *sig_handler;
613
614 ASSERT(state & ERTS_PSFLG_DIRTY_RUNNING);
615
616 if (prio < 0)
617 prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
618
619 switch (prio) {
620 case PRIORITY_MAX:
621 sig_handler = erts_dirty_process_signal_handler_max;
622 break;
623 case PRIORITY_HIGH:
624 sig_handler = erts_dirty_process_signal_handler_high;
625 break;
626 default:
627 sig_handler = erts_dirty_process_signal_handler;
628 break;
629 }
630
631 /* Make sure signals are handled... */
632 mp = erts_alloc_message(0, &hp);
633 erts_queue_message(sig_handler, 0, mp, pid, am_system);
634 }
635
636 static void
637 check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig);
638
639
640 static int
proc_queue_signal(Process * c_p,Eterm pid,ErtsSignal * sig,int op)641 proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
642 {
643 int res;
644 Process *rp;
645 ErtsMessage *first, *last, **last_next, **sigp;
646 ErtsSchedulerData *esdp = erts_get_scheduler_data();
647 int is_normal_sched = !!esdp && esdp->type == ERTS_SCHED_NORMAL;
648 erts_aint32_t state;
649 ErtsSignal *pend_sig;
650
651 if (is_normal_sched) {
652 pend_sig = esdp->pending_signal.sig;
653 if (op == ERTS_SIG_Q_OP_MONITOR
654 && ((ErtsMonitor*)sig)->type == ERTS_MON_TYPE_PROC) {
655
656 if (!pend_sig) {
657 esdp->pending_signal.sig = sig;
658 esdp->pending_signal.to = pid;
659 #ifdef DEBUG
660 esdp->pending_signal.dbg_from = esdp->current_process;
661 #endif
662 return 1;
663 }
664 ASSERT(esdp->pending_signal.dbg_from == esdp->current_process ||
665 esdp->pending_signal.dbg_from == esdp->free_process);
666 if (pend_sig != sig) {
667 /* Switch them and send previously pending signal instead */
668 Eterm pend_to = esdp->pending_signal.to;
669 esdp->pending_signal.sig = sig;
670 esdp->pending_signal.to = pid;
671 sig = pend_sig;
672 pid = pend_to;
673 }
674 else {
675 /* Caller wants to flush pending signal */
676 ASSERT(pid == esdp->pending_signal.to);
677 esdp->pending_signal.sig = NULL;
678 esdp->pending_signal.to = THE_NON_VALUE;
679 #ifdef DEBUG
680 esdp->pending_signal.dbg_from = NULL;
681 #endif
682 pend_sig = NULL;
683 }
684 rp = erts_proc_lookup_raw(pid);
685 if (!rp) {
686 erts_proc_sig_send_monitor_down((ErtsMonitor*)sig, am_noproc);
687 return 1;
688 }
689 }
690 else if (pend_sig && pid == esdp->pending_signal.to) {
691 /* Flush pending signal to maintain signal order */
692 esdp->pending_signal.sig = NULL;
693 esdp->pending_signal.to = THE_NON_VALUE;
694
695 rp = erts_proc_lookup_raw(pid);
696 if (!rp) {
697 erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
698 return 0;
699 }
700
701 /* Prepend pending signal */
702 pend_sig->common.next = (ErtsMessage*) sig;
703 pend_sig->common.specific.next = &pend_sig->common.next;
704 first = (ErtsMessage*) pend_sig;
705 last = (ErtsMessage*) sig;
706 sigp = last_next = &pend_sig->common.next;
707 goto first_last_done;
708 }
709 else {
710 pend_sig = NULL;
711 rp = erts_proc_lookup_raw(pid);
712 if (!rp)
713 return 0;
714 }
715 }
716 else {
717 rp = erts_proc_lookup_raw_inc_refc(pid);
718 if (!rp)
719 return 0;
720 pend_sig = NULL;
721 }
722
723 first = last = (ErtsMessage *) sig;
724 last_next = NULL;
725 sigp = &first;
726
727 first_last_done:
728 sig->common.specific.next = NULL;
729
730 /* may add signals before sig */
731 sig_enqueue_trace(c_p, sigp, op, rp, &last_next);
732
733 last->next = NULL;
734
735 erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
736
737 state = erts_atomic32_read_nob(&rp->state);
738
739 if (ERTS_PSFLG_FREE & state)
740 res = 0;
741 else {
742 state = enqueue_signals(rp, first, &last->next, last_next, 0, state);
743 if (ERTS_UNLIKELY(op == ERTS_SIG_Q_OP_PROCESS_INFO))
744 check_push_msgq_len_offs_marker(rp, sig);
745 res = !0;
746 }
747
748 erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
749
750 if (res == 0) {
751 sig_enqueue_trace_cleanup(first, sig);
752 if (pend_sig) {
753 erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
754 if (sig == pend_sig) {
755 /* We did a switch, callers signal is now pending (still ok) */
756 ASSERT(esdp->pending_signal.sig);
757 res = 1;
758 }
759 }
760 }
761 else
762 erts_proc_notify_new_sig(rp, state, 0);
763
764 if (!is_normal_sched)
765 erts_proc_dec_refc(rp);
766
767 return res;
768 }
769
erts_proc_sig_send_pending(ErtsSchedulerData * esdp)770 void erts_proc_sig_send_pending(ErtsSchedulerData* esdp)
771 {
772 ErtsSignal* sig = esdp->pending_signal.sig;
773 int op;
774
775 ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
776 ASSERT(sig);
777 ASSERT(is_internal_pid(esdp->pending_signal.to));
778
779 op = ERTS_SIG_Q_OP_MONITOR;
780 ASSERT(op == ERTS_PROC_SIG_OP(sig->common.tag));
781
782 if (!proc_queue_signal(NULL, esdp->pending_signal.to, sig, op)) {
783 ErtsMonitor* mon = (ErtsMonitor*)sig;
784 erts_proc_sig_send_monitor_down(mon, am_noproc);
785 }
786 }
787
788 static int
maybe_elevate_sig_handling_prio(Process * c_p,Eterm other)789 maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
790 {
791 /*
792 * returns:
793 * > 0 -> elevated prio; process alive or exiting
794 * < 0 -> no elevation needed; process alive or exiting
795 * 0 -> process terminated (free)
796 */
797 int res;
798 Process *rp;
799 erts_aint32_t state, my_prio, other_prio;
800
801 rp = erts_proc_lookup_raw(other);
802 if (!rp)
803 res = 0;
804 else {
805 res = -1;
806 state = erts_atomic32_read_nob(&c_p->state);
807 my_prio = ERTS_PSFLGS_GET_USR_PRIO(state);
808
809 state = erts_atomic32_read_nob(&rp->state);
810 other_prio = ERTS_PSFLGS_GET_USR_PRIO(state);
811
812 if (other_prio > my_prio) {
813 /* Others prio is lower than mine; elevate it... */
814 res = !!erts_sig_prio(other, my_prio);
815 if (res) {
816 /* ensure handled if dirty executing... */
817 state = erts_atomic32_read_nob(&rp->state);
818 /*
819 * We ignore ERTS_PSFLG_DIRTY_RUNNING_SYS. For
820 * more info see erts_execute_dirty_system_task()
821 * in erl_process.c.
822 */
823 if (state & ERTS_PSFLG_DIRTY_RUNNING)
824 erts_make_dirty_proc_handled(other, state, my_prio);
825 }
826 }
827 }
828 return res;
829 }
830
831 void
erts_proc_sig_fetch__(Process * proc)832 erts_proc_sig_fetch__(Process *proc)
833 {
834 ASSERT(proc->sig_inq.first);
835
836 if (!proc->sig_inq.nmsigs.next) {
837 ASSERT(!(ERTS_PSFLG_SIG_IN_Q
838 & erts_atomic32_read_nob(&proc->state)));
839 ASSERT(!proc->sig_inq.nmsigs.last);
840
841 if (proc->sig_qs.cont || ERTS_MSG_RECV_TRACED(proc)) {
842 *proc->sig_qs.cont_last = proc->sig_inq.first;
843 proc->sig_qs.cont_last = proc->sig_inq.last;
844 }
845 else {
846 *proc->sig_qs.last = proc->sig_inq.first;
847 proc->sig_qs.last = proc->sig_inq.last;
848 }
849 }
850 else {
851 erts_aint32_t s;
852 ASSERT(proc->sig_inq.nmsigs.last);
853 if (!proc->sig_qs.nmsigs.last) {
854 ASSERT(!proc->sig_qs.nmsigs.next);
855 if (proc->sig_inq.nmsigs.next == &proc->sig_inq.first)
856 proc->sig_qs.nmsigs.next = proc->sig_qs.cont_last;
857 else
858 proc->sig_qs.nmsigs.next = proc->sig_inq.nmsigs.next;
859
860 s = erts_atomic32_read_bset_nob(&proc->state,
861 (ERTS_PSFLG_SIG_Q
862 | ERTS_PSFLG_SIG_IN_Q),
863 ERTS_PSFLG_SIG_Q);
864
865 ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
866 == ERTS_PSFLG_SIG_IN_Q); (void)s;
867 }
868 else {
869 ErtsSignal *sig;
870 ASSERT(proc->sig_qs.nmsigs.next);
871 sig = ((ErtsSignal *) *proc->sig_qs.nmsigs.last);
872 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
873 ASSERT(!sig->common.specific.next);
874 if (proc->sig_inq.nmsigs.next == &proc->sig_inq.first)
875 sig->common.specific.next = proc->sig_qs.cont_last;
876 else
877 sig->common.specific.next = proc->sig_inq.nmsigs.next;
878
879 s = erts_atomic32_read_band_nob(&proc->state,
880 ~ERTS_PSFLG_SIG_IN_Q);
881
882 ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
883 == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)); (void)s;
884 }
885 if (proc->sig_inq.nmsigs.last == &proc->sig_inq.first)
886 proc->sig_qs.nmsigs.last = proc->sig_qs.cont_last;
887 else
888 proc->sig_qs.nmsigs.last = proc->sig_inq.nmsigs.last;
889 proc->sig_inq.nmsigs.next = NULL;
890 proc->sig_inq.nmsigs.last = NULL;
891
892 *proc->sig_qs.cont_last = proc->sig_inq.first;
893 proc->sig_qs.cont_last = proc->sig_inq.last;
894 }
895
896 proc->sig_qs.len += proc->sig_inq.len;
897
898 proc->sig_inq.first = NULL;
899 proc->sig_inq.last = &proc->sig_inq.first;
900 proc->sig_inq.len = 0;
901
902 }
903
904 Sint
erts_proc_sig_fetch_msgq_len_offs__(Process * proc)905 erts_proc_sig_fetch_msgq_len_offs__(Process *proc)
906 {
907 ErtsProcSigMsgQLenOffsetMarker *marker
908 = (ErtsProcSigMsgQLenOffsetMarker *) proc->sig_inq.first;
909
910 ASSERT(marker->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
911
912 if (marker->common.next) {
913 Sint len;
914
915 proc->sig_qs.flags |= FS_DELAYED_PSIGQS_LEN;
916
917 /*
918 * Prevent update of sig_qs.len in fetch. These
919 * updates are done via process-info signal(s)
920 * instead...
921 */
922 len = proc->sig_inq.len;
923 marker->delayed_len += len;
924 marker->len_offset -= len;
925 proc->sig_inq.len = 0;
926
927 /*
928 * Temorarily remove marker during fetch...
929 */
930
931 proc->sig_inq.first = marker->common.next;
932 if (proc->sig_inq.last == &marker->common.next)
933 proc->sig_inq.last = &proc->sig_inq.first;
934 if (proc->sig_inq.nmsigs.next == &marker->common.next)
935 proc->sig_inq.nmsigs.next = &proc->sig_inq.first;
936 if (proc->sig_inq.nmsigs.last == &marker->common.next)
937 proc->sig_inq.nmsigs.last = &proc->sig_inq.first;
938
939 erts_proc_sig_fetch__(proc);
940
941 marker->common.next = NULL;
942 proc->sig_inq.first = (ErtsMessage *) marker;
943 proc->sig_inq.last = &marker->common.next;
944
945 }
946
947 return marker->delayed_len;
948 }
949
950 static ERTS_INLINE Sint
proc_sig_privqs_len(Process * c_p,int have_qlock)951 proc_sig_privqs_len(Process *c_p, int have_qlock)
952 {
953 Sint res = c_p->sig_qs.len;
954
955 ERTS_LC_ASSERT(!have_qlock
956 ? (ERTS_PROC_LOCK_MAIN
957 == erts_proc_lc_my_proc_locks(c_p))
958 : ((ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_MAIN)
959 == ((ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_MAIN)
960 & erts_proc_lc_my_proc_locks(c_p))));
961
962 if (c_p->sig_qs.flags & FS_DELAYED_PSIGQS_LEN) {
963 ErtsProcSigMsgQLenOffsetMarker *marker;
964
965 if (!have_qlock)
966 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
967
968 marker = (ErtsProcSigMsgQLenOffsetMarker *) c_p->sig_inq.first;
969 ASSERT(marker);
970 ASSERT(marker->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
971
972 res += marker->delayed_len;
973
974 if (!have_qlock)
975 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
976 }
977
978 #ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
979 {
980 Sint len = 0;
981 ERTS_FOREACH_SIG_PRIVQS(
982 c_p, mp,
983 {
984 if (ERTS_SIG_IS_MSG(mp))
985 len++;
986 });
987 ERTS_ASSERT(res == len);
988 }
989 #endif
990
991 return res;
992 }
993
994 Sint
erts_proc_sig_privqs_len(Process * c_p)995 erts_proc_sig_privqs_len(Process *c_p)
996 {
997 return proc_sig_privqs_len(c_p, 0);
998 }
999
1000 void
erts_proc_sig_destroy_unlink_op(ErtsSigUnlinkOp * sulnk)1001 erts_proc_sig_destroy_unlink_op(ErtsSigUnlinkOp *sulnk)
1002 {
1003 erts_free(ERTS_ALC_T_SIG_DATA, sulnk);
1004 }
1005
1006 static ERTS_INLINE ErtsDistExternal *
get_external_non_msg_signal(ErtsMessage * sig)1007 get_external_non_msg_signal(ErtsMessage *sig)
1008 {
1009 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
1010
1011 if (ERTS_SIG_IS_DIST_ALIAS_MSG(sig)) {
1012 ErlHeapFragment *hfrag;
1013 if (sig->hfrag.alloc_size != 1)
1014 hfrag = &sig->hfrag;
1015 else {
1016 hfrag = sig->hfrag.next;
1017 if (!hfrag)
1018 return NULL;
1019 }
1020 return erts_get_dist_ext(hfrag);
1021 }
1022
1023 if (ERTS_SIG_IS_GEN_EXIT(sig) && ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig)) {
1024 ErtsExitSignalData *xsigd = get_exit_signal_data(sig);
1025 ASSERT(ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag)
1026 == ERTS_SIG_Q_TYPE_GEN_EXIT);
1027 ASSERT(is_non_value(xsigd->reason));
1028 if (sig->hfrag.next == NULL)
1029 return (ErtsDistExternal*)(xsigd + 1);
1030 return erts_get_dist_ext(sig->hfrag.next);
1031 }
1032
1033 return NULL;
1034 }
1035
1036 ErtsDistExternal *
erts_proc_sig_get_external(ErtsMessage * msgp)1037 erts_proc_sig_get_external(ErtsMessage *msgp)
1038 {
1039 if (ERTS_SIG_IS_EXTERNAL_MSG(msgp))
1040 return erts_get_dist_ext(msgp->data.heap_frag);
1041 if (ERTS_SIG_IS_NON_MSG(msgp))
1042 return get_external_non_msg_signal(msgp);
1043 return NULL;
1044 }
1045
1046 static void do_seq_trace_output(Eterm to, Eterm token, Eterm msg);
1047
1048 static void
send_gen_exit_signal(Process * c_p,Eterm from_tag,Eterm from,Eterm to,Sint16 op,Eterm reason,ErtsDistExternal * dist_ext,ErlHeapFragment * dist_ext_hfrag,Eterm ref,Eterm token,int normal_kills,Uint32 conn_lost,Uint32 conn_id)1049 send_gen_exit_signal(Process *c_p, Eterm from_tag,
1050 Eterm from, Eterm to,
1051 Sint16 op, Eterm reason, ErtsDistExternal *dist_ext,
1052 ErlHeapFragment *dist_ext_hfrag,
1053 Eterm ref, Eterm token, int normal_kills,
1054 Uint32 conn_lost, Uint32 conn_id)
1055 {
1056 ErtsExitSignalData *xsigd;
1057 Eterm *hp, *start_hp, s_reason, s_ref, s_message, s_token, s_from;
1058 ErtsMessage *mp;
1059 ErlHeapFragment *hfrag;
1060 ErlOffHeap *ohp;
1061 Uint hsz, from_sz, reason_sz, ref_sz, token_sz, dist_ext_sz = 0;
1062 int seq_trace;
1063 #ifdef USE_VM_PROBES
1064 Eterm s_utag, utag;
1065 Uint utag_sz;
1066 #endif
1067
1068 ASSERT((is_value(reason) && dist_ext == NULL) ||
1069 (is_non_value(reason) && dist_ext != NULL));
1070
1071 ASSERT(is_immed(from_tag));
1072
1073 hsz = sizeof(ErtsExitSignalData)/sizeof(Eterm);
1074
1075 seq_trace = c_p && have_seqtrace(token);
1076 if (seq_trace)
1077 seq_trace_update_serial(c_p);
1078
1079 #ifdef USE_VM_PROBES
1080 utag_sz = 0;
1081 utag = NIL;
1082 if (c_p && token != NIL && (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) {
1083 utag_sz = size_object(DT_UTAG(c_p));
1084 utag = DT_UTAG(c_p);
1085 }
1086 else if (token == am_have_dt_utag) {
1087 token = NIL;
1088 }
1089 hsz += utag_sz;
1090 #endif
1091
1092 token_sz = size_object(token);
1093 hsz += token_sz;
1094
1095 from_sz = size_object(from);
1096 hsz += from_sz;
1097
1098 ref_sz = size_object(ref);
1099 hsz += ref_sz;
1100
1101 reason_sz = 0; /* Set to silence gcc warning */
1102
1103 /* The reason was part of the control message,
1104 just use copy it into the xsigd */
1105 if (is_value(reason)) {
1106 reason_sz = size_object(reason);
1107 hsz += reason_sz;
1108
1109 switch (op) {
1110 case ERTS_SIG_Q_OP_EXIT:
1111 case ERTS_SIG_Q_OP_EXIT_LINKED: {
1112 /* {'EXIT', From, Reason} */
1113 hsz += 4; /* 3-tuple */
1114 break;
1115 }
1116 case ERTS_SIG_Q_OP_MONITOR_DOWN: {
1117 /* {'DOWN', Ref, process, From, Reason} */
1118 hsz += 6; /* 5-tuple */
1119 break;
1120 }
1121 default:
1122 ERTS_INTERNAL_ERROR("Invalid exit signal op");
1123 break;
1124 }
1125 } else if (dist_ext != NULL && dist_ext_hfrag == NULL) {
1126 /* The message was not fragmented so we need to create space
1127 for a single dist_ext element */
1128 dist_ext_sz = erts_dist_ext_size(dist_ext) / sizeof(Eterm);
1129 hsz += dist_ext_sz;
1130 }
1131
1132 /*
1133 * Allocate message combined with heap fragment...
1134 */
1135 mp = erts_alloc_message(hsz, &hp);
1136 hfrag = &mp->hfrag;
1137 mp->next = NULL;
1138 ohp = &hfrag->off_heap;
1139 start_hp = hp;
1140
1141 s_token = copy_struct(token, token_sz, &hp, ohp);
1142 s_from = copy_struct(from, from_sz, &hp, ohp);
1143 s_ref = copy_struct(ref, ref_sz, &hp, ohp);
1144
1145 if (is_value(reason)) {
1146 s_reason = copy_struct(reason, reason_sz, &hp, ohp);
1147
1148 switch (op) {
1149 case ERTS_SIG_Q_OP_EXIT:
1150 case ERTS_SIG_Q_OP_EXIT_LINKED:
1151 /* {'EXIT', From, Reason} */
1152 s_message = TUPLE3(hp, am_EXIT, s_from, s_reason);
1153 hp += 4;
1154 break;
1155 case ERTS_SIG_Q_OP_MONITOR_DOWN:
1156 /* {'DOWN', Ref, process, From, Reason} */
1157 s_message = TUPLE5(hp, am_DOWN, s_ref, am_process, s_from, s_reason);
1158 hp += 6;
1159 break;
1160 default:
1161 /* This cannot happen, used to silence gcc warning */
1162 s_message = THE_NON_VALUE;
1163 break;
1164 }
1165 } else {
1166 s_message = THE_NON_VALUE;
1167 s_reason = THE_NON_VALUE;
1168 }
1169
1170 #ifdef USE_VM_PROBES
1171 s_utag = (is_immed(utag)
1172 ? utag
1173 : copy_struct(utag, utag_sz, &hp, ohp));
1174 ERL_MESSAGE_DT_UTAG(mp) = s_utag;
1175 #endif
1176
1177 ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(op,
1178 ERTS_SIG_Q_TYPE_GEN_EXIT,
1179 0);
1180 ERL_MESSAGE_TOKEN(mp) = s_token;
1181 ERL_MESSAGE_FROM(mp) = from_tag; /* immediate... */
1182
1183 hfrag->used_size = hp - start_hp;
1184
1185 xsigd = (ErtsExitSignalData *) hp;
1186
1187 xsigd->message = s_message;
1188 xsigd->from = s_from;
1189 xsigd->reason = s_reason;
1190 hfrag->next = dist_ext_hfrag;
1191
1192 if (is_not_nil(s_ref)) {
1193 ASSERT(is_ref(s_ref));
1194 xsigd->u.ref = s_ref;
1195 }
1196 else {
1197 xsigd->u.link.flags = 0;
1198 if (normal_kills)
1199 xsigd->u.link.flags |= ERTS_SIG_LNK_X_FLAG_NORMAL_KILLS;
1200 if (conn_lost)
1201 xsigd->u.link.flags |= ERTS_SIG_LNK_X_FLAG_CONNECTION_LOST;
1202 xsigd->u.link.connection_id = conn_id;
1203 }
1204
1205 hp += sizeof(ErtsExitSignalData)/sizeof(Eterm);
1206
1207 if (dist_ext != NULL && dist_ext_hfrag == NULL && is_non_value(reason)) {
1208 erts_make_dist_ext_copy(dist_ext, (ErtsDistExternal *) hp);
1209 hp += dist_ext_sz;
1210 }
1211
1212 ASSERT(hp == mp->hfrag.mem + mp->hfrag.alloc_size);
1213
1214 if (seq_trace)
1215 do_seq_trace_output(to, s_token, s_message);
1216
1217 if (!proc_queue_signal(c_p, to, (ErtsSignal *) mp, op)) {
1218 mp->next = NULL;
1219 erts_cleanup_messages(mp);
1220 }
1221 }
1222
1223 static void
do_seq_trace_output(Eterm to,Eterm token,Eterm msg)1224 do_seq_trace_output(Eterm to, Eterm token, Eterm msg)
1225 {
1226 /*
1227 * We could do this when enqueuing the signal and avoid some
1228 * locking. However, the enqueuing code would then always
1229 * have the penalty of this seq-tracing code which we do not
1230 * want...
1231 */
1232 ErtsSchedulerData *esdp = erts_get_scheduler_data();
1233 int is_normal_sched = !!esdp && esdp->type == ERTS_SCHED_NORMAL;
1234 Process *rp;
1235
1236 if (is_normal_sched)
1237 rp = erts_proc_lookup_raw(to);
1238 else
1239 rp = erts_proc_lookup_raw_inc_refc(to);
1240
1241 if (rp) {
1242 erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
1243
1244 if (!ERTS_PROC_IS_EXITING(rp))
1245 seq_trace_output(token, msg, SEQ_TRACE_SEND, to, rp);
1246
1247 erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
1248
1249 if (!is_normal_sched)
1250 erts_proc_dec_refc(rp);
1251 }
1252 }
1253
1254 static ERTS_INLINE int
get_alias_msg_data(ErtsMessage * sig,Eterm * fromp,Eterm * aliasp,Eterm * msgp,void ** attachedp)1255 get_alias_msg_data(ErtsMessage *sig, Eterm *fromp, Eterm *aliasp,
1256 Eterm *msgp, void **attachedp)
1257 {
1258 int type = ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag);
1259 Eterm *tp;
1260
1261 if (type == ERTS_SIG_Q_TYPE_DIST) {
1262 if (fromp)
1263 *fromp = ERL_MESSAGE_FROM(sig);
1264 if (aliasp)
1265 *aliasp = sig->hfrag.mem[0];
1266 if (msgp)
1267 *msgp = THE_NON_VALUE;
1268 if (attachedp)
1269 *attachedp = ERTS_MSG_COMBINED_HFRAG;
1270 return type;
1271 }
1272
1273 ASSERT(is_tuple_arity(ERL_MESSAGE_FROM(sig), 3)
1274 || is_tuple_arity(ERL_MESSAGE_FROM(sig), 5));
1275
1276 tp = tuple_val(ERL_MESSAGE_FROM(sig));
1277 if (fromp)
1278 *fromp = tp[1];
1279 if (aliasp)
1280 *aliasp = tp[2];
1281 if (msgp)
1282 *msgp = tp[3];
1283
1284 if (!attachedp)
1285 return type;
1286
1287 if (is_tuple_arity(ERL_MESSAGE_FROM(sig), 3)) {
1288 if (type == ERTS_SIG_Q_TYPE_HEAP)
1289 *attachedp = NULL;
1290 else {
1291 ASSERT(type == ERTS_SIG_Q_TYPE_OFF_HEAP);
1292 *attachedp = ERTS_MSG_COMBINED_HFRAG;
1293 }
1294 }
1295 else {
1296 Uint low, high;
1297 ASSERT(type == ERTS_SIG_Q_TYPE_HEAP_FRAG);
1298 /*
1299 * Heap fragment pointer in element 4 and 5. See
1300 * erts_proc_sig_send_to_alias().
1301 */
1302 low = unsigned_val(tp[4]);
1303 high = unsigned_val(tp[5]);
1304 #ifdef ARCH_64
1305 ASSERT((((Uint) 1) << 32) > low);
1306 ASSERT((((Uint) 1) << 32) > high);
1307 *attachedp = (void *) ((((Uint) high) << 32) | ((Uint) low));
1308 #else /* ARCH_32 */
1309 ASSERT((((Uint) 1) << 16) > low);
1310 ASSERT((((Uint) 1) << 16) > high);
1311 *attachedp = (void *) ((((Uint) high) << 16) | ((Uint) low));
1312 #endif
1313 ASSERT(*attachedp != NULL);
1314 }
1315
1316 return type;
1317 }
1318
1319 void
erts_proc_sig_cleanup_non_msg_signal(ErtsMessage * sig)1320 erts_proc_sig_cleanup_non_msg_signal(ErtsMessage *sig)
1321 {
1322 ErlHeapFragment *hfrag;
1323 Eterm tag = ((ErtsSignal *) sig)->common.tag;
1324
1325 /*
1326 * Heap alias message, heap frag alias message and
1327 * adjust message queue signals are the only non-message
1328 * signals, which are allocated as messages, which do not
1329 * use a combined message / heap fragment.
1330 */
1331 if (ERTS_SIG_IS_HEAP_ALIAS_MSG_TAG(tag)
1332 || tag == ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_ADJ_MSGQ,
1333 ERTS_SIG_Q_TYPE_OFF_HEAP,
1334 0)) {
1335 sig->data.heap_frag = NULL;
1336 return;
1337 }
1338
1339
1340
1341 if(ERTS_SIG_IS_HEAP_FRAG_ALIAS_MSG_TAG(tag)) {
1342 /* Retreive pointer to heap fragment (may not be NULL). */
1343 void *attached;
1344 (void) get_alias_msg_data(sig, NULL, NULL, NULL, &attached);
1345 sig->data.heap_frag = hfrag = (ErlHeapFragment *) attached;
1346 ASSERT(hfrag);
1347 }
1348 else {
1349 /*
1350 * Using a combined heap fragment...
1351 */
1352 ErtsDistExternal *edep = get_external_non_msg_signal(sig);
1353 if (edep)
1354 erts_free_dist_ext_copy(edep);
1355
1356 sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
1357 hfrag = sig->hfrag.next;
1358 erts_cleanup_offheap(&sig->hfrag.off_heap);
1359 }
1360
1361 if (hfrag)
1362 free_message_buffer(hfrag);
1363 }
1364
1365 void
erts_proc_sig_send_to_alias(Process * c_p,Eterm from,Eterm to,Eterm msg,Eterm token)1366 erts_proc_sig_send_to_alias(Process *c_p, Eterm from, Eterm to, Eterm msg, Eterm token)
1367 {
1368 Process *rp;
1369 ErlHeapFragment *hfrag;
1370 ErtsProcLocks rp_locks = 0;
1371 erts_aint32_t rp_state;
1372 ErtsMessage *mp;
1373 ErlOffHeap *ohp;
1374 Uint hsz, to_sz, token_sz, msg_sz;
1375 Eterm *hp, pid, to_copy, token_copy, msg_copy;
1376 int type;
1377 #ifdef SHCOPY_SEND
1378 erts_shcopy_t info;
1379 #else
1380 erts_literal_area_t litarea;
1381 #endif
1382 #ifdef USE_VM_PROBES
1383 Eterm utag_copy, utag;
1384 Uint utag_sz;
1385 #endif
1386
1387 ASSERT(is_ref(to));
1388 ASSERT(is_internal_pid(from) || is_atom(from));
1389
1390 if (IS_TRACED_FL(c_p, F_TRACE_SEND))
1391 trace_send(c_p, to, msg);
1392 if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
1393 save_calls(c_p, &exp_send);
1394
1395 pid = erts_get_pid_of_ref(to);
1396 rp = erts_proc_lookup(pid);
1397 if (!rp)
1398 return;
1399
1400 rp_locks = c_p == rp ? ERTS_PROC_LOCK_MAIN : 0;
1401
1402 hsz = 0;
1403
1404 if (c_p && have_seqtrace(token)) {
1405 seq_trace_update_serial(c_p);
1406 seq_trace_output(token, msg, SEQ_TRACE_SEND, to, c_p);
1407 }
1408
1409 #ifdef USE_VM_PROBES
1410 utag_sz = 0;
1411 utag = NIL;
1412 if (c_p && token != NIL && (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) {
1413 utag_sz = size_object(DT_UTAG(c_p));
1414 utag = DT_UTAG(c_p);
1415 }
1416 else if (token == am_have_dt_utag) {
1417 token = NIL;
1418 }
1419 hsz += utag_sz;
1420 #endif
1421
1422 to_sz = size_object(to);
1423 hsz += to_sz;
1424
1425 token_sz = size_object(token);
1426 hsz += token_sz;
1427
1428 /*
1429 * SHCOPY corrupts the heap between copy_shared_calculate(), and
1430 * copy_shared_perform() by inserting move-markers (like the gc).
1431 * Make sure we don't use the heap between those instances.
1432 */
1433
1434 if (is_immed(msg))
1435 msg_sz = 0;
1436 else {
1437 #ifdef SHCOPY_SEND
1438 INITIALIZE_SHCOPY(info);
1439 msg_sz = copy_shared_calculate(msg, &info);
1440 #else
1441 INITIALIZE_LITERAL_PURGE_AREA(litarea);
1442 msg_sz = size_object_litopt(msg, &litarea);
1443 #endif
1444 hsz += msg_sz;
1445 }
1446
1447 rp_state = erts_atomic32_read_nob(&rp->state);
1448 if (rp_state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
1449 type = ERTS_SIG_Q_TYPE_OFF_HEAP;
1450 hsz += 4; /* 3-tuple containing from, alias, and message */
1451 mp = erts_alloc_message(hsz, &hp);
1452 ohp = &mp->hfrag.off_heap;
1453 hfrag = NULL;
1454 }
1455 else {
1456 int on_heap;
1457 hsz += 6; /*
1458 * 5-tuple containing from, alias, message, high part
1459 * of heap frag address, and low part of heap frag
1460 * address. If we manage to allocate on the heap, we
1461 * omit the heap frag address elements and use a
1462 * 3-tuple instead.
1463 */
1464 mp = erts_try_alloc_message_on_heap(rp, &rp_state, &rp_locks,
1465 hsz, &hp, &ohp, &on_heap);
1466 if (!on_heap) {
1467 type = ERTS_SIG_Q_TYPE_HEAP_FRAG;
1468 hfrag = mp->data.heap_frag;
1469 ASSERT(hfrag);
1470 }
1471 else {
1472 /* no need to save heap fragment pointer... */
1473 Eterm *tmp_hp, *end_hp;
1474 type = ERTS_SIG_Q_TYPE_HEAP;
1475 end_hp = hp + hsz;
1476 tmp_hp = end_hp - 2;
1477 HRelease(rp, end_hp, tmp_hp);
1478 hfrag = NULL;
1479 }
1480 }
1481
1482 mp->next = NULL;
1483
1484 if (is_immed(msg))
1485 msg_copy = msg;
1486 else {
1487 #ifdef SHCOPY_SEND
1488 msg_copy = copy_shared_perform(msg, msg_sz, &info, &hp, ohp);
1489 DESTROY_SHCOPY(info);
1490 #else
1491 msg_copy = copy_struct_litopt(msg, msg_sz, &hp, ohp, &litarea);
1492 #endif
1493 }
1494 to_copy = copy_struct(to, to_sz, &hp, ohp);
1495 token_copy = copy_struct(token, token_sz, &hp, ohp);
1496 #ifdef USE_VM_PROBES
1497 utag_copy = (is_immed(utag)
1498 ? utag
1499 : copy_struct(utag, utag_sz, &hp, ohp));
1500 ERL_MESSAGE_DT_UTAG(mp) = utag_copy;
1501 #endif
1502
1503 ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_ALIAS_MSG,
1504 type, 0);
1505 ERL_MESSAGE_TOKEN(mp) = token_copy;
1506
1507 if (type != ERTS_SIG_Q_TYPE_HEAP_FRAG) {
1508 /* 3-tuple containing from, alias, and message */
1509 ERL_MESSAGE_FROM(mp) = TUPLE3(hp, from, to_copy, msg_copy);
1510 }
1511 else {
1512 /*
1513 * 5-tuple containing from, alias, and message,
1514 * low halfword of heap frag address, and
1515 * high halfword of heap frag address.
1516 */
1517 Uint low, high;
1518 Eterm hfrag_low, hfrag_high;
1519 #ifdef ARCH_64
1520 low = ((UWord) hfrag) & ((UWord) 0xffffffff);
1521 high = (((UWord) hfrag) >> 32) & ((UWord) 0xffffffff);
1522 #else /* ARCH_32 */
1523 low = ((UWord) hfrag) & ((UWord) 0xffff);
1524 high = (((UWord) hfrag) >> 16) & ((UWord) 0xffff);
1525 #endif
1526 hfrag_low = make_small(low);
1527 hfrag_high = make_small(high);
1528 ERL_MESSAGE_FROM(mp) = TUPLE5(hp, from, to_copy, msg_copy,
1529 hfrag_low, hfrag_high);
1530 }
1531
1532 if (!proc_queue_signal(c_p, pid, (ErtsSignal *) mp,
1533 ERTS_SIG_Q_OP_ALIAS_MSG)) {
1534 mp->next = NULL;
1535 erts_cleanup_messages(mp);
1536 }
1537
1538 ERTS_LC_ASSERT(!(rp_locks & ERTS_PROC_LOCKS_ALL_MINOR));
1539 if (c_p != rp && rp_locks)
1540 erts_proc_unlock(rp, rp_locks);
1541
1542 if (c_p && hsz > ERTS_MSG_COPY_WORDS_PER_REDUCTION) {
1543 Uint reds = hsz / ERTS_MSG_COPY_WORDS_PER_REDUCTION;
1544 if (reds > CONTEXT_REDS)
1545 reds = CONTEXT_REDS;
1546 BUMP_REDS(c_p, (int) reds);
1547 }
1548 }
1549
1550 void
erts_proc_sig_send_dist_to_alias(Eterm alias,ErtsDistExternal * edep,ErlHeapFragment * hfrag,Eterm token)1551 erts_proc_sig_send_dist_to_alias(Eterm alias, ErtsDistExternal *edep,
1552 ErlHeapFragment *hfrag, Eterm token)
1553 {
1554 ErtsMessage* mp;
1555 Eterm token_copy;
1556 Eterm *hp;
1557 Eterm pid;
1558
1559 ASSERT(is_ref(alias));
1560 pid = erts_get_pid_of_ref(alias);
1561 if (!is_internal_pid(pid))
1562 return;
1563
1564 /*
1565 * The receiver can distinguish between these two scenarios by
1566 * size of combined heap fragment (1 and > 1).
1567 */
1568
1569 if (hfrag) {
1570 /*
1571 * Fragmented message. Data already allocated in heap fragment
1572 * including 'token' and 'to' ref. Only need room for the
1573 * 'alias' boxed pointer and a pointer to the heap fragment...
1574 */
1575 mp = erts_alloc_message(1, &hp);
1576 ASSERT(mp->hfrag.alloc_size == 1);
1577 hp[0] = alias;
1578 mp->hfrag.next = hfrag;
1579 token_copy = token;
1580 } else {
1581 /* Un-fragmented message, allocate space for
1582 token and dist_ext in message. */
1583 Uint dist_ext_sz = erts_dist_ext_size(edep) / sizeof(Eterm);
1584 Uint token_sz = is_immed(token) ? 0 : size_object(token);
1585 Uint alias_sz = size_object(alias);
1586 Uint sz = 1 + alias_sz + token_sz + dist_ext_sz;
1587 Eterm *aliasp;
1588
1589 mp = erts_alloc_message(sz, &hp);
1590 ASSERT(mp->hfrag.alloc_size > 1);
1591 aliasp = hp++;
1592 *aliasp = copy_struct(alias, alias_sz, &hp, &mp->hfrag.off_heap);
1593 token_copy = (is_immed(token)
1594 ? token
1595 : copy_struct(token, token_sz, &hp,
1596 &mp->hfrag.off_heap));
1597 mp->hfrag.used_size = 1 + alias_sz + token_sz;
1598 erts_make_dist_ext_copy(edep, erts_get_dist_ext(&mp->hfrag));
1599 }
1600
1601 ERL_MESSAGE_FROM(mp) = edep->dep->sysname;
1602 #ifdef USE_VM_PROBES
1603 ERL_MESSAGE_DT_UTAG(mp) = NIL;
1604 if (token == am_have_dt_utag)
1605 ERL_MESSAGE_TOKEN(mp) = NIL;
1606 else
1607 #endif
1608 ERL_MESSAGE_TOKEN(mp) = token_copy;
1609
1610 ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_ALIAS_MSG,
1611 ERTS_SIG_Q_TYPE_DIST,
1612 0);
1613
1614 if (!proc_queue_signal(NULL, pid, (ErtsSignal *) mp,
1615 ERTS_SIG_Q_OP_ALIAS_MSG)) {
1616 mp->next = NULL;
1617 erts_cleanup_messages(mp);
1618 }
1619
1620 }
1621
1622
1623 void
erts_proc_sig_send_persistent_monitor_msg(Uint16 type,Eterm key,Eterm from,Eterm to,Eterm msg,Uint msg_sz)1624 erts_proc_sig_send_persistent_monitor_msg(Uint16 type, Eterm key,
1625 Eterm from, Eterm to,
1626 Eterm msg, Uint msg_sz)
1627 {
1628 ErtsPersistMonMsg *prst_mon;
1629 ErtsMessage *mp;
1630 ErlHeapFragment *hfrag;
1631 Eterm *hp, *start_hp, message;
1632 ErlOffHeap *ohp;
1633 Uint hsz = sizeof(ErtsPersistMonMsg) + msg_sz;
1634
1635 /*
1636 * Allocate message combined with heap fragment...
1637 */
1638 mp = erts_alloc_message(hsz, &hp);
1639 hfrag = &mp->hfrag;
1640 mp->next = NULL;
1641 ohp = &hfrag->off_heap;
1642 start_hp = hp;
1643
1644 ASSERT(msg_sz == size_object(msg));
1645 message = copy_struct(msg, msg_sz, &hp, ohp);
1646 hfrag->used_size = hp - start_hp;
1647
1648 prst_mon = (ErtsPersistMonMsg *) (char *) hp;
1649 prst_mon->message = message;
1650
1651 switch (type) {
1652 case ERTS_MON_TYPE_NODES:
1653 ASSERT(is_small(key));
1654 prst_mon->key = key;
1655 break;
1656
1657 case ERTS_MON_TYPE_TIME_OFFSET:
1658 ASSERT(is_internal_ref(key));
1659 ASSERT(is_tuple_arity(message, 5));
1660
1661 prst_mon->key = tuple_val(message)[2];
1662
1663 ASSERT(eq(prst_mon->key, key));
1664 break;
1665
1666 default:
1667 ERTS_INTERNAL_ERROR("Invalid persistent monitor type");
1668 prst_mon->key = key;
1669 break;
1670 }
1671
1672 ASSERT(is_immed(from));
1673
1674 ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_PERSISTENT_MON_MSG,
1675 type, 0);
1676 ERL_MESSAGE_FROM(mp) = from;
1677 ERL_MESSAGE_TOKEN(mp) = am_undefined;
1678
1679 if (!proc_queue_signal(NULL, to, (ErtsSignal *) mp,
1680 ERTS_SIG_Q_OP_PERSISTENT_MON_MSG)) {
1681 mp->next = NULL;
1682 erts_cleanup_messages(mp);
1683 }
1684 }
1685
1686 static ERTS_INLINE Eterm
get_persist_mon_msg(ErtsMessage * sig,Eterm * msg)1687 get_persist_mon_msg(ErtsMessage *sig, Eterm *msg)
1688 {
1689 ErtsPersistMonMsg *prst_mon;
1690 prst_mon = ((ErtsPersistMonMsg *)
1691 (char *) (&sig->hfrag.mem[0]
1692 + sig->hfrag.used_size));
1693 *msg = prst_mon->message;
1694 return prst_mon->key;
1695 }
1696
1697 void
erts_proc_sig_send_exit(Process * c_p,Eterm from,Eterm to,Eterm reason,Eterm token,int normal_kills)1698 erts_proc_sig_send_exit(Process *c_p, Eterm from, Eterm to,
1699 Eterm reason, Eterm token,
1700 int normal_kills)
1701 {
1702 Eterm from_tag;
1703 ASSERT(!c_p || c_p->common.id == from);
1704 if (is_immed(from)) {
1705 ASSERT(is_internal_pid(from) || is_internal_port(from));
1706 from_tag = from;
1707 }
1708 else {
1709 DistEntry *dep;
1710 ASSERT(is_external_pid(from));
1711 dep = external_pid_dist_entry(from);
1712 from_tag = dep->sysname;
1713 }
1714 send_gen_exit_signal(c_p, from_tag, from, to, ERTS_SIG_Q_OP_EXIT,
1715 reason, NULL, NULL, NIL, token, normal_kills, 0, 0);
1716 }
1717
1718 void
erts_proc_sig_send_dist_exit(DistEntry * dep,Eterm from,Eterm to,ErtsDistExternal * dist_ext,ErlHeapFragment * hfrag,Eterm reason,Eterm token)1719 erts_proc_sig_send_dist_exit(DistEntry *dep,
1720 Eterm from, Eterm to,
1721 ErtsDistExternal *dist_ext,
1722 ErlHeapFragment *hfrag,
1723 Eterm reason, Eterm token)
1724 {
1725 send_gen_exit_signal(NULL, dep->sysname, from, to, ERTS_SIG_Q_OP_EXIT,
1726 reason, dist_ext, hfrag, NIL, token, 0, 0, 0);
1727
1728 }
1729
1730 void
erts_proc_sig_send_link_exit(Process * c_p,Eterm from,ErtsLink * lnk,Eterm reason,Eterm token)1731 erts_proc_sig_send_link_exit(Process *c_p, Eterm from, ErtsLink *lnk,
1732 Eterm reason, Eterm token)
1733 {
1734 Eterm to, from_tag, from_item;
1735 int conn_lost;
1736 Uint32 conn_id;
1737 ASSERT(!c_p || c_p->common.id == from);
1738 ASSERT(lnk);
1739 to = lnk->other.item;
1740 if (is_value(from)) {
1741 ASSERT(is_internal_pid(from) || is_internal_port(from));
1742 from_tag = from_item = from;
1743 conn_id = 0;
1744 conn_lost = 0;
1745 }
1746 else {
1747 ErtsLink *olnk;
1748 ErtsELink *elnk;
1749
1750 ASSERT(reason == am_noconnection);
1751 ASSERT(lnk->flags & ERTS_ML_FLG_EXTENDED);
1752 ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
1753
1754 olnk = erts_link_to_other(lnk, &elnk);
1755
1756 from_item = olnk->other.item;
1757 from_tag = elnk->dist->nodename;
1758 conn_id = elnk->dist->connection_id;
1759 conn_lost = !0;
1760 }
1761 send_gen_exit_signal(c_p, from_tag, from_item, to, ERTS_SIG_Q_OP_EXIT_LINKED,
1762 reason, NULL, NULL, NIL, token, 0, conn_lost, conn_id);
1763 erts_link_release(lnk);
1764 }
1765
1766 int
erts_proc_sig_send_link(Process * c_p,Eterm to,ErtsLink * lnk)1767 erts_proc_sig_send_link(Process *c_p, Eterm to, ErtsLink *lnk)
1768 {
1769 ErtsSignal *sig;
1770 Uint16 type = lnk->type;
1771
1772 ASSERT(!c_p || c_p->common.id == lnk->other.item);
1773 ASSERT(lnk);
1774 ASSERT(is_internal_pid(to));
1775
1776 sig = (ErtsSignal *) lnk;
1777 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_LINK,
1778 type, 0);
1779
1780 return proc_queue_signal(c_p, to, sig, ERTS_SIG_Q_OP_LINK);
1781 }
1782
1783 ErtsSigUnlinkOp *
erts_proc_sig_make_unlink_op(Process * c_p,Eterm from)1784 erts_proc_sig_make_unlink_op(Process *c_p, Eterm from)
1785 {
1786 Uint64 id;
1787 ErtsSigUnlinkOp *sulnk;
1788 if (c_p)
1789 id = erts_proc_sig_new_unlink_id(c_p);
1790 else {
1791 /*
1792 * *Only* ports are allowed to call without current
1793 * process pointer...
1794 */
1795 ASSERT(is_internal_port(from));
1796 id = (Uint64) erts_raw_get_unique_monotonic_integer();
1797 if (id == 0)
1798 id = (Uint64) erts_raw_get_unique_monotonic_integer();
1799 }
1800
1801 ASSERT(id != 0);
1802
1803 sulnk = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigUnlinkOp));
1804 sulnk->from = from;
1805 sulnk->id = id;
1806
1807 return sulnk;
1808 }
1809
1810 Uint64
erts_proc_sig_send_unlink(Process * c_p,Eterm from,ErtsLink * lnk)1811 erts_proc_sig_send_unlink(Process *c_p, Eterm from, ErtsLink *lnk)
1812 {
1813 int res;
1814 ErtsSignal *sig;
1815 Eterm to;
1816 ErtsSigUnlinkOp *sulnk;
1817 Uint64 id;
1818
1819 ASSERT(lnk->type != ERTS_LNK_TYPE_PROC
1820 || lnk->type != ERTS_LNK_TYPE_PORT);
1821 ASSERT(lnk->flags & ERTS_ML_FLG_IN_TABLE);
1822
1823 sulnk = erts_proc_sig_make_unlink_op(c_p, from);
1824 id = sulnk->id;
1825 sig = (ErtsSignal *) sulnk;
1826 to = lnk->other.item;
1827 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_UNLINK,
1828 lnk->type, 0);
1829
1830 ASSERT(is_internal_pid(to));
1831 res = proc_queue_signal(c_p, to, sig, ERTS_SIG_Q_OP_UNLINK);
1832 if (res == 0) {
1833 erts_proc_sig_destroy_unlink_op(sulnk);
1834 return 0;
1835 }
1836 return id;
1837 }
1838
1839 void
erts_proc_sig_send_unlink_ack(Process * c_p,Eterm from,ErtsSigUnlinkOp * sulnk)1840 erts_proc_sig_send_unlink_ack(Process *c_p, Eterm from, ErtsSigUnlinkOp *sulnk)
1841 {
1842 ErtsSignal *sig = (ErtsSignal *) sulnk;
1843 Eterm to = sulnk->from;
1844 Uint16 type;
1845
1846 ASSERT(is_internal_pid(to));
1847 ASSERT(is_internal_pid(from) || is_internal_port(from));
1848
1849 sulnk->from = from;
1850 type = is_internal_pid(from) ? ERTS_LNK_TYPE_PROC : ERTS_LNK_TYPE_PORT;
1851 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_UNLINK_ACK,
1852 type, 0);
1853 if (!proc_queue_signal(c_p, to, sig, ERTS_SIG_Q_OP_UNLINK_ACK))
1854 erts_proc_sig_destroy_unlink_op(sulnk);
1855 }
1856
1857 void
erts_proc_sig_send_dist_link_exit(DistEntry * dep,Eterm from,Eterm to,ErtsDistExternal * dist_ext,ErlHeapFragment * hfrag,Eterm reason,Eterm token)1858 erts_proc_sig_send_dist_link_exit(DistEntry *dep,
1859 Eterm from, Eterm to,
1860 ErtsDistExternal *dist_ext,
1861 ErlHeapFragment *hfrag,
1862 Eterm reason, Eterm token)
1863 {
1864 send_gen_exit_signal(NULL, dep->sysname, from, to, ERTS_SIG_Q_OP_EXIT_LINKED,
1865 reason, dist_ext, hfrag, NIL, token, 0, 0, 0);
1866
1867 }
1868
1869 static void
1870 reply_dist_unlink_ack(Process *c_p, ErtsSigDistUnlinkOp *sdulnk);
1871
1872 void
erts_proc_sig_send_dist_unlink(DistEntry * dep,Uint32 conn_id,Eterm from,Eterm to,Uint64 id)1873 erts_proc_sig_send_dist_unlink(DistEntry *dep, Uint32 conn_id,
1874 Eterm from, Eterm to, Uint64 id)
1875 {
1876 /* Remote to local */
1877 ErtsSignal *sig;
1878
1879 ASSERT(is_internal_pid(to));
1880 ASSERT(is_external_pid(from));
1881 ASSERT(dep == external_pid_dist_entry(from));
1882
1883 sig = (ErtsSignal *) make_sig_dist_unlink_op(ERTS_SIG_Q_OP_UNLINK,
1884 dep->sysname, conn_id,
1885 to, from, id);
1886
1887 if (!proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_UNLINK))
1888 reply_dist_unlink_ack(NULL, (ErtsSigDistUnlinkOp *) sig);
1889 }
1890
1891 void
erts_proc_sig_send_dist_unlink_ack(Process * c_p,DistEntry * dep,Uint32 conn_id,Eterm from,Eterm to,Uint64 id)1892 erts_proc_sig_send_dist_unlink_ack(Process *c_p, DistEntry *dep,
1893 Uint32 conn_id, Eterm from, Eterm to,
1894 Uint64 id)
1895 {
1896 /* Remote to local */
1897 ErtsSignal *sig;
1898
1899 ASSERT(is_internal_pid(to));
1900 ASSERT(is_external_pid(from));
1901 ASSERT(dep == external_pid_dist_entry(from));
1902
1903 sig = (ErtsSignal *) make_sig_dist_unlink_op(ERTS_SIG_Q_OP_UNLINK_ACK,
1904 dep->sysname, conn_id,
1905 to, from, id);
1906
1907 if (!proc_queue_signal(c_p, to, sig, ERTS_SIG_Q_OP_UNLINK_ACK))
1908 destroy_sig_dist_unlink_op((ErtsSigDistUnlinkOp *) sig);
1909 }
1910
1911 static void
reply_dist_unlink_ack(Process * c_p,ErtsSigDistUnlinkOp * sdulnk)1912 reply_dist_unlink_ack(Process *c_p, ErtsSigDistUnlinkOp *sdulnk)
1913 {
1914 /* Local to remote */
1915 ASSERT(is_external_pid(sdulnk->remote));
1916
1917 /*
1918 * 'id' is zero if the other side not understand
1919 * unlink-ack signals...
1920 */
1921 if (sdulnk->id) {
1922 DistEntry *dep = external_pid_dist_entry(sdulnk->remote);
1923
1924 /*
1925 * Do not set up new a connection; only send unlink ack
1926 * on the same connection which the unlink operation was
1927 * received on...
1928 */
1929 if (dep != erts_this_dist_entry && sdulnk->nodename == dep->sysname) {
1930 ErtsDSigSendContext ctx;
1931 int code = erts_dsig_prepare(&ctx, dep, NULL, 0,
1932 ERTS_DSP_NO_LOCK, 1, 1, 0);
1933 switch (code) {
1934 case ERTS_DSIG_PREP_CONNECTED:
1935 case ERTS_DSIG_PREP_PENDING:
1936 if (sdulnk->connection_id == ctx.connection_id) {
1937 code = erts_dsig_send_unlink_ack(&ctx,
1938 sdulnk->local,
1939 sdulnk->remote,
1940 sdulnk->id);
1941 ASSERT(code == ERTS_DSIG_SEND_OK);
1942 }
1943 break;
1944 default:
1945 break;
1946 }
1947 }
1948 }
1949
1950 destroy_sig_dist_unlink_op(sdulnk);
1951 }
1952
1953 void
erts_proc_sig_send_dist_monitor_down(DistEntry * dep,Eterm ref,Eterm from,Eterm to,ErtsDistExternal * dist_ext,ErlHeapFragment * hfrag,Eterm reason)1954 erts_proc_sig_send_dist_monitor_down(DistEntry *dep, Eterm ref,
1955 Eterm from, Eterm to,
1956 ErtsDistExternal *dist_ext,
1957 ErlHeapFragment *hfrag,
1958 Eterm reason)
1959 {
1960 Eterm monitored, heap[3];
1961 if (is_atom(from))
1962 monitored = TUPLE2(&heap[0], from, dep->sysname);
1963 else
1964 monitored = from;
1965 send_gen_exit_signal(NULL, dep->sysname, monitored,
1966 to, ERTS_SIG_Q_OP_MONITOR_DOWN,
1967 reason, dist_ext, hfrag, ref, NIL, 0, 0, 0);
1968 }
1969
1970 void
erts_proc_sig_send_monitor_down(ErtsMonitor * mon,Eterm reason)1971 erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason)
1972 {
1973 Eterm to;
1974
1975 ASSERT(erts_monitor_is_target(mon));
1976 ASSERT(!erts_monitor_is_in_table(mon));
1977
1978 to = mon->other.item;
1979 ASSERT(is_internal_pid(to));
1980
1981 if (is_immed(reason)) {
1982 /* Pass signal using old monitor structure... */
1983 ErtsSignal *sig;
1984
1985 send_using_monitor_struct:
1986
1987 mon->other.item = reason; /* Pass immed reason via other.item... */
1988 sig = (ErtsSignal *) mon;
1989 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MONITOR_DOWN,
1990 mon->type, 0);
1991 if (proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_MONITOR_DOWN))
1992 return; /* receiver will destroy mon structure */
1993 }
1994 else {
1995 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
1996 Eterm from_tag, monitored, heap[3];
1997
1998 if (mon->type == ERTS_MON_TYPE_SUSPEND) {
1999 /*
2000 * Set reason to 'undefined', since exit
2001 * reason is not used for suspend monitors,
2002 * and send using monitor structure. This
2003 * since we don't want to trigger
2004 * unnecessary memory allocation etc...
2005 */
2006 reason = am_undefined;
2007 goto send_using_monitor_struct;
2008 }
2009
2010 if (!(mon->flags & ERTS_ML_FLG_NAME)) {
2011 from_tag = monitored = mdp->origin.other.item;
2012 if (is_external_pid(from_tag)) {
2013 DistEntry *dep = external_pid_dist_entry(from_tag);
2014 from_tag = dep->sysname;
2015 }
2016 }
2017 else {
2018 ErtsMonitorDataExtended *mdep;
2019 Eterm name, node;
2020 mdep = (ErtsMonitorDataExtended *) mdp;
2021 name = mdep->u.name;
2022 ASSERT(is_atom(name));
2023 if (mdep->dist) {
2024 node = mdep->dist->nodename;
2025 from_tag = node;
2026 }
2027 else {
2028 node = erts_this_dist_entry->sysname;
2029 from_tag = mdp->origin.other.item;
2030 }
2031 ASSERT(is_internal_port(from_tag)
2032 || is_internal_pid(from_tag)
2033 || is_atom(from_tag));
2034 monitored = TUPLE2(&heap[0], name, node);
2035 }
2036 send_gen_exit_signal(NULL, from_tag, monitored,
2037 to, ERTS_SIG_Q_OP_MONITOR_DOWN,
2038 reason, NULL, NULL, mdp->ref, NIL,
2039 0, 0, 0);
2040 }
2041 erts_monitor_release(mon);
2042 }
2043
2044 void
erts_proc_sig_send_dist_demonitor(Eterm to,Eterm ref)2045 erts_proc_sig_send_dist_demonitor(Eterm to, Eterm ref)
2046 {
2047 ErtsSigDistProcDemonitor *dmon;
2048 ErtsSignal *sig;
2049 Eterm *hp;
2050 ErlOffHeap oh;
2051 size_t size;
2052
2053 ERTS_INIT_OFF_HEAP(&oh);
2054
2055 ASSERT(is_internal_pid(to));
2056
2057 size = sizeof(ErtsSigDistProcDemonitor) - sizeof(Eterm);
2058 ASSERT(is_ref(ref));
2059 size += NC_HEAP_SIZE(ref)*sizeof(Eterm);
2060
2061 dmon = erts_alloc(ERTS_ALC_T_DIST_DEMONITOR, size);
2062
2063 hp = &dmon->heap[0];
2064 dmon->ref = STORE_NC(&hp, &oh, ref);
2065 sig = (ErtsSignal *) dmon;
2066
2067 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_DEMONITOR,
2068 ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR,
2069 0);
2070
2071 if (!proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_DEMONITOR))
2072 destroy_dist_proc_demonitor(dmon);
2073 }
2074
2075 void
erts_proc_sig_send_demonitor(ErtsMonitor * mon)2076 erts_proc_sig_send_demonitor(ErtsMonitor *mon)
2077 {
2078 ErtsSignal *sig = (ErtsSignal *) mon;
2079 Uint16 type = mon->type;
2080 Eterm to = mon->other.item;
2081
2082 ASSERT(is_internal_pid(to));
2083 ASSERT(erts_monitor_is_origin(mon));
2084 ASSERT(!erts_monitor_is_in_table(mon));
2085
2086 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_DEMONITOR,
2087 type, 0);
2088
2089 if (!proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_DEMONITOR))
2090 erts_monitor_release(mon);
2091 }
2092
2093 int
erts_proc_sig_send_monitor(ErtsMonitor * mon,Eterm to)2094 erts_proc_sig_send_monitor(ErtsMonitor *mon, Eterm to)
2095 {
2096 ErtsSignal *sig = (ErtsSignal *) mon;
2097 Uint16 type = mon->type;
2098
2099 ASSERT(is_internal_pid(to) || to == am_undefined);
2100 ASSERT(erts_monitor_is_target(mon));
2101
2102 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MONITOR,
2103 type, 0);
2104
2105 return proc_queue_signal(NULL, to, sig, ERTS_SIG_Q_OP_MONITOR);
2106 }
2107
2108 void
erts_proc_sig_send_trace_change(Eterm to,Uint on,Uint off,Eterm tracer)2109 erts_proc_sig_send_trace_change(Eterm to, Uint on, Uint off, Eterm tracer)
2110 {
2111 ErtsSigTraceInfo *ti;
2112 Eterm tag;
2113
2114 ti = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigTraceInfo));
2115 tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_TRACE_CHANGE_STATE,
2116 ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO,
2117 0);
2118
2119 ti->common.tag = tag;
2120 ti->flags_off = off;
2121 ti->flags_on = on;
2122 ti->tracer = NIL;
2123 if (is_not_nil(tracer))
2124 erts_tracer_update(&ti->tracer, tracer);
2125
2126 if (!proc_queue_signal(NULL, to, (ErtsSignal *) ti,
2127 ERTS_SIG_Q_OP_TRACE_CHANGE_STATE))
2128 destroy_trace_info(ti);
2129 }
2130
2131 void
erts_proc_sig_send_group_leader(Process * c_p,Eterm to,Eterm gl,Eterm ref)2132 erts_proc_sig_send_group_leader(Process *c_p, Eterm to, Eterm gl, Eterm ref)
2133 {
2134 int res;
2135 ErtsSigGroupLeader *sgl;
2136 Eterm *hp;
2137 Uint gl_sz, ref_sz, size;
2138 erts_aint_t init_flags = ERTS_SIG_GL_FLG_ACTIVE|ERTS_SIG_GL_FLG_RECEIVER;
2139 if (c_p)
2140 init_flags |= ERTS_SIG_GL_FLG_SENDER;
2141
2142 ASSERT(c_p ? is_internal_ref(ref) : ref == NIL);
2143
2144 gl_sz = is_immed(gl) ? 0 : size_object(gl);
2145 ref_sz = is_immed(ref) ? 0 : size_object(ref);
2146
2147 size = sizeof(ErtsSigGroupLeader);
2148
2149 size += (gl_sz + ref_sz - 1) * sizeof(Eterm);
2150
2151 sgl = erts_alloc(ERTS_ALC_T_SIG_DATA, size);
2152
2153 erts_atomic_init_nob(&sgl->flags, init_flags);
2154
2155 ERTS_INIT_OFF_HEAP(&sgl->oh);
2156
2157 hp = &sgl->heap[0];
2158
2159 sgl->group_leader = is_immed(gl) ? gl : copy_struct(gl, gl_sz, &hp, &sgl->oh);
2160 sgl->reply_to = c_p ? c_p->common.id : NIL;
2161 sgl->ref = is_immed(ref) ? ref : copy_struct(ref, ref_sz, &hp, &sgl->oh);
2162
2163 sgl->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_GROUP_LEADER,
2164 ERTS_SIG_Q_TYPE_UNDEFINED,
2165 0);
2166
2167 res = proc_queue_signal(c_p, to, (ErtsSignal *) sgl,
2168 ERTS_SIG_Q_OP_GROUP_LEADER);
2169
2170 if (!res)
2171 destroy_sig_group_leader(sgl);
2172 else if (c_p) {
2173 erts_aint_t flags, rm_flags = ERTS_SIG_GL_FLG_SENDER;
2174 int prio_res = maybe_elevate_sig_handling_prio(c_p, to);
2175 if (!prio_res)
2176 rm_flags |= ERTS_SIG_GL_FLG_ACTIVE;
2177 flags = erts_atomic_read_band_nob(&sgl->flags, ~rm_flags);
2178 if (!prio_res && (flags & ERTS_SIG_GL_FLG_ACTIVE))
2179 res = 0; /* We deactivated signal... */
2180 if ((flags & ~rm_flags) == 0)
2181 destroy_sig_group_leader(sgl);
2182 }
2183
2184 if (!res && c_p)
2185 group_leader_reply(c_p, c_p->common.id, ref, 0);
2186 }
2187
2188 int
erts_proc_sig_send_is_alive_request(Process * c_p,Eterm to,Eterm ref)2189 erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to, Eterm ref)
2190 {
2191 ErlHeapFragment *hfrag;
2192 Uint hsz;
2193 Eterm *hp, *start_hp, ref_cpy, msg;
2194 ErlOffHeap *ohp;
2195 ErtsMessage *mp;
2196 ErtsIsAliveRequest *alive_req;
2197
2198 ASSERT(is_internal_ordinary_ref(ref));
2199
2200 hsz = ERTS_REF_THING_SIZE + 3 + sizeof(ErtsIsAliveRequest)/sizeof(Eterm);
2201
2202 mp = erts_alloc_message(hsz, &hp);
2203 hfrag = &mp->hfrag;
2204 mp->next = NULL;
2205 ohp = &hfrag->off_heap;
2206 start_hp = hp;
2207
2208 ref_cpy = STORE_NC(&hp, ohp, ref);
2209 msg = TUPLE2(hp, ref_cpy, am_false); /* default res 'false' */
2210 hp += 3;
2211
2212 hfrag->used_size = hp - start_hp;
2213
2214 alive_req = (ErtsIsAliveRequest *) (char *) hp;
2215 alive_req->message = msg;
2216 alive_req->requester = c_p->common.id;
2217
2218 ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_IS_ALIVE,
2219 ERTS_SIG_Q_TYPE_UNDEFINED,
2220 0);
2221
2222 if (proc_queue_signal(c_p, to, (ErtsSignal *) mp, ERTS_SIG_Q_OP_IS_ALIVE)) {
2223 (void) maybe_elevate_sig_handling_prio(c_p, to);
2224 return !0;
2225 }
2226 else {
2227 /* It wasn't alive; reply to ourselves... */
2228 mp->next = NULL;
2229 mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
2230 erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN, mp, msg, am_system);
2231 return 0;
2232 }
2233 }
2234
2235 int
erts_proc_sig_send_process_info_request(Process * c_p,Eterm to,int * item_ix,int len,int need_msgq_len,int flags,Uint reserve_size,Eterm ref)2236 erts_proc_sig_send_process_info_request(Process *c_p,
2237 Eterm to,
2238 int *item_ix,
2239 int len,
2240 int need_msgq_len,
2241 int flags,
2242 Uint reserve_size,
2243 Eterm ref)
2244 {
2245 Uint size = sizeof(ErtsProcessInfoSig) + (len - 1) * sizeof(int);
2246 ErtsProcessInfoSig *pis = erts_alloc(ERTS_ALC_T_SIG_DATA, size);
2247 int res;
2248
2249 ASSERT(c_p);
2250 ASSERT(item_ix);
2251 ASSERT(len > 0);
2252 ASSERT(is_internal_ordinary_ref(ref));
2253
2254 pis->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_PROCESS_INFO,
2255 0, 0);
2256
2257 if (!need_msgq_len)
2258 pis->msgq_len_offset = ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE;
2259 else {
2260 pis->msgq_len_offset = ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC;
2261 pis->marker.common.next = NULL;
2262 pis->marker.common.specific.next = NULL;
2263 pis->marker.common.tag = ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK;
2264 pis->marker.refc = 0;
2265 pis->marker.delayed_len = 0;
2266 pis->marker.len_offset = 0;
2267 }
2268 pis->requester = c_p->common.id;
2269 sys_memcpy((void *) &pis->oref_thing,
2270 (void *) internal_ref_val(ref),
2271 sizeof(ErtsORefThing));
2272 pis->ref = make_internal_ref((char *) &pis->oref_thing);
2273 pis->reserve_size = reserve_size;
2274 pis->len = len;
2275 pis->flags = flags;
2276 sys_memcpy((void *) &pis->item_ix[0],
2277 (void *) item_ix,
2278 sizeof(int)*len);
2279 res = proc_queue_signal(c_p, to, (ErtsSignal *) pis,
2280 ERTS_SIG_Q_OP_PROCESS_INFO);
2281 if (res)
2282 (void) maybe_elevate_sig_handling_prio(c_p, to);
2283 else
2284 erts_free(ERTS_ALC_T_SIG_DATA, pis);
2285 return res;
2286 }
2287
2288 void
erts_proc_sig_send_sync_suspend(Process * c_p,Eterm to,Eterm tag,Eterm reply)2289 erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to, Eterm tag, Eterm reply)
2290 {
2291 ErlHeapFragment *hfrag;
2292 Uint hsz, tag_sz;
2293 Eterm *hp, *start_hp, tag_cpy, msg, default_reply;
2294 ErlOffHeap *ohp;
2295 ErtsMessage *mp;
2296 ErtsSyncSuspendRequest *ssusp;
2297 int async_suspend;
2298
2299 tag_sz = size_object(tag);
2300
2301 hsz = 3 + tag_sz + sizeof(ErtsSyncSuspendRequest)/sizeof(Eterm);
2302
2303 mp = erts_alloc_message(hsz, &hp);
2304 hfrag = &mp->hfrag;
2305 mp->next = NULL;
2306 ohp = &hfrag->off_heap;
2307 start_hp = hp;
2308
2309 tag_cpy = copy_struct(tag, tag_sz, &hp, ohp);
2310
2311 async_suspend = is_non_value(reply);
2312 default_reply = async_suspend ? am_suspended : reply;
2313
2314 msg = TUPLE2(hp, tag_cpy, default_reply);
2315 hp += 3;
2316
2317 hfrag->used_size = hp - start_hp;
2318
2319 ssusp = (ErtsSyncSuspendRequest *) (char *) hp;
2320 ssusp->message = msg;
2321 ssusp->requester = c_p->common.id;
2322 ssusp->async = async_suspend;
2323
2324 ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_SYNC_SUSPEND,
2325 ERTS_SIG_Q_TYPE_UNDEFINED,
2326 0);
2327
2328 if (proc_queue_signal(c_p, to, (ErtsSignal *) mp, ERTS_SIG_Q_OP_SYNC_SUSPEND))
2329 (void) maybe_elevate_sig_handling_prio(c_p, to);
2330 else {
2331 Eterm *tp;
2332 /* It wasn't alive; reply to ourselves... */
2333 mp->next = NULL;
2334 mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
2335 tp = tuple_val(msg);
2336 tp[2] = async_suspend ? am_badarg : am_exited;
2337 erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN,
2338 mp, msg, am_system);
2339 }
2340 }
2341
2342 int
erts_proc_sig_send_dist_spawn_reply(Eterm node,Eterm ref,Eterm to,ErtsLink * lnk,Eterm result,Eterm token)2343 erts_proc_sig_send_dist_spawn_reply(Eterm node,
2344 Eterm ref,
2345 Eterm to,
2346 ErtsLink *lnk,
2347 Eterm result,
2348 Eterm token)
2349 {
2350 Uint hsz, ref_sz, result_sz, token_sz;
2351 ErtsDistSpawnReplySigData *datap;
2352 Eterm msg, ref_copy, result_copy, res_type,
2353 token_copy, *hp, *hp_start, *patch_point;
2354 ErlHeapFragment *hfrag;
2355 ErlOffHeap *ohp;
2356 ErtsMessage *mp;
2357
2358 ASSERT(is_atom(node));
2359
2360 /*
2361 * A respons message to a spawn_request() operation
2362 * looks like this:
2363 * {Tag, Ref, ok|error, Pid|ErrorAtom}
2364 *
2365 * Tag is stored in its own heap fragment in the
2366 * (pending) monitor struct and can be attached
2367 * when creating the resulting message on
2368 * reception of this signal.
2369 */
2370
2371 hsz = ref_sz = size_object(ref);
2372 hsz += 5 /* 4-tuple */;
2373 if (is_atom(result)) {
2374 res_type = am_error;
2375 result_sz = 0;
2376 }
2377 else {
2378 ASSERT(is_external_pid(result));
2379 res_type = am_ok;
2380 result_sz = size_object(result);
2381 hsz += result_sz;
2382 }
2383
2384 token_sz = is_immed(token) ? 0 : size_object(token);
2385 hsz += token_sz;
2386
2387 hsz += sizeof(ErtsDistSpawnReplySigData)/sizeof(Eterm);
2388
2389 mp = erts_alloc_message(hsz, &hp);
2390 hp_start = hp;
2391 hfrag = &mp->hfrag;
2392 mp->next = NULL;
2393 ohp = &hfrag->off_heap;
2394
2395 ref_copy = copy_struct(ref, ref_sz, &hp, ohp);
2396 result_copy = (is_atom(result)
2397 ? result
2398 : copy_struct(result, result_sz, &hp, ohp));
2399 msg = TUPLE4(hp,
2400 am_undefined,
2401 ref_copy,
2402 res_type,
2403 result_copy);
2404
2405 patch_point = &hp[1];
2406 ASSERT(*patch_point == am_undefined);
2407
2408 hp += 5;
2409
2410 token_copy = (!token_sz
2411 ? token
2412 : copy_struct(token, token_sz, &hp, ohp));
2413
2414 hfrag->used_size = hp - hp_start;
2415
2416 datap = (ErtsDistSpawnReplySigData *) (char *) hp;
2417 datap->message = msg;
2418 datap->ref = ref_copy;
2419 datap->result = result_copy;
2420 datap->link = lnk;
2421 datap->patch_point = patch_point;
2422
2423 ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_DIST_SPAWN_REPLY,
2424 ERTS_SIG_Q_TYPE_UNDEFINED,
2425 0);
2426 ERL_MESSAGE_FROM(mp) = node;
2427 ERL_MESSAGE_TOKEN(mp) = token_copy;
2428 if (!proc_queue_signal(NULL, to, (ErtsSignal *) mp,
2429 ERTS_SIG_Q_OP_DIST_SPAWN_REPLY)) {
2430 mp->next = NULL;
2431 mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
2432 ERL_MESSAGE_TERM(mp) = msg;
2433 erts_cleanup_messages(mp);
2434 return 0;
2435 }
2436
2437 return !0;
2438 }
2439
2440 Eterm
erts_proc_sig_send_rpc_request(Process * c_p,Eterm to,int reply,Eterm (* func)(Process *,void *,int *,ErlHeapFragment **),void * arg)2441 erts_proc_sig_send_rpc_request(Process *c_p,
2442 Eterm to,
2443 int reply,
2444 Eterm (*func)(Process *, void *, int *, ErlHeapFragment **),
2445 void *arg)
2446 {
2447 Eterm res;
2448 ErtsProcSigRPC *sig = erts_alloc(ERTS_ALC_T_SIG_DATA,
2449 sizeof(ErtsProcSigRPC));
2450 sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_RPC,
2451 ERTS_SIG_Q_TYPE_UNDEFINED,
2452 0);
2453 sig->requester = reply ? c_p->common.id : NIL;
2454 sig->func = func;
2455 sig->arg = arg;
2456
2457 if (!reply) {
2458 res = am_ok;
2459 sig->ref = am_ok;
2460 }
2461 else {
2462 res = erts_make_ref(c_p);
2463
2464 sys_memcpy((void *) &sig->oref_thing,
2465 (void *) internal_ref_val(res),
2466 sizeof(ErtsORefThing));
2467
2468 sig->ref = make_internal_ref(&sig->oref_thing);
2469
2470 erts_msgq_set_save_end(c_p);
2471 }
2472
2473 if (proc_queue_signal(c_p, to, (ErtsSignal *) sig, ERTS_SIG_Q_OP_RPC))
2474 (void) maybe_elevate_sig_handling_prio(c_p, to);
2475 else {
2476 erts_free(ERTS_ALC_T_SIG_DATA, sig);
2477 res = THE_NON_VALUE;
2478 if (reply)
2479 erts_msgq_set_save_first(c_p);
2480 }
2481
2482 return res;
2483 }
2484
2485
2486 void
erts_proc_sig_send_cla_request(Process * c_p,Eterm to,Eterm req_id)2487 erts_proc_sig_send_cla_request(Process *c_p, Eterm to, Eterm req_id)
2488 {
2489 ErtsMessage *sig;
2490 ErlHeapFragment *hfrag;
2491 ErlOffHeap *ohp;
2492 Eterm req_id_cpy, *hp, *start_hp;
2493 Uint hsz, req_id_sz;
2494 ErtsCLAData *cla;
2495
2496 hsz = sizeof(ErtsCLAData)/sizeof(Uint);
2497 if (hsz < 4) {
2498 /*
2499 * Need room to overwrite the ErtsCLAData part with a
2500 * 3-tuple when reusing the signal for the reply...
2501 */
2502 hsz = 4;
2503 }
2504
2505 req_id_sz = size_object(req_id);
2506 hsz += req_id_sz;
2507
2508 sig = erts_alloc_message(hsz, &hp);
2509 hfrag = &sig->hfrag;
2510 sig->next = NULL;
2511 ohp = &hfrag->off_heap;
2512 start_hp = hp;
2513
2514 req_id_cpy = copy_struct(req_id, req_id_sz, &hp, ohp);
2515
2516 cla = (ErtsCLAData *) (char *) hp;
2517 hfrag->used_size = hp - start_hp;
2518
2519 cla->requester = c_p->common.id;
2520 cla->request_id = req_id_cpy;
2521
2522 ERL_MESSAGE_TERM(sig) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_ADJ_MSGQ,
2523 ERTS_SIG_Q_TYPE_CLA,
2524 0);
2525 ERL_MESSAGE_FROM(sig) = c_p->common.id;
2526 ERL_MESSAGE_TOKEN(sig) = am_undefined;
2527 #ifdef USE_VM_PROBES
2528 ERL_MESSAGE_DT_UTAG(sig) = NIL;
2529 #endif
2530
2531 if (!proc_queue_signal(c_p, to, (ErtsSignal *) sig, ERTS_SIG_Q_OP_ADJ_MSGQ))
2532 send_cla_reply(c_p, sig, c_p->common.id, req_id_cpy, am_ok);
2533 }
2534
2535 void
erts_proc_sig_send_move_msgq_off_heap(Process * c_p,Eterm to)2536 erts_proc_sig_send_move_msgq_off_heap(Process *c_p, Eterm to)
2537 {
2538 ErtsMessage *sig = erts_alloc_message(0, NULL);
2539 ASSERT(is_internal_pid(to));
2540 ERL_MESSAGE_TERM(sig) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_ADJ_MSGQ,
2541 ERTS_SIG_Q_TYPE_OFF_HEAP,
2542 0);
2543 ERL_MESSAGE_FROM(sig) = to;
2544 ERL_MESSAGE_TOKEN(sig) = am_undefined;
2545 #ifdef USE_VM_PROBES
2546 ERL_MESSAGE_DT_UTAG(sig) = NIL;
2547 #endif
2548 if (!proc_queue_signal(c_p, to, (ErtsSignal *) sig, ERTS_SIG_Q_OP_ADJ_MSGQ)) {
2549 sig->next = NULL;
2550 erts_cleanup_messages(sig);
2551 }
2552 }
2553
2554 static int
handle_rpc(Process * c_p,ErtsProcSigRPC * rpc,int cnt,int limit,int * yieldp)2555 handle_rpc(Process *c_p, ErtsProcSigRPC *rpc, int cnt, int limit, int *yieldp)
2556 {
2557 Process *rp;
2558 ErlHeapFragment *bp = NULL;
2559 Eterm res;
2560 Uint hsz;
2561 int reds, out_cnt;
2562
2563 /*
2564 * reds in:
2565 * Reductions left.
2566 *
2567 * reds out:
2568 * Absolute value of reds out equals consumed
2569 * amount of reds. If a negative value, force
2570 * a yield.
2571 */
2572
2573 reds = (limit - cnt) / ERTS_SIG_REDS_CNT_FACTOR;
2574 if (reds <= 0)
2575 reds = 1;
2576
2577 res = (*rpc->func)(c_p, rpc->arg, &reds, &bp);
2578
2579 if (reds < 0) {
2580 /* Force yield... */
2581 *yieldp = !0;
2582 reds *= -1;
2583 }
2584
2585 out_cnt = reds*ERTS_SIG_REDS_CNT_FACTOR;
2586
2587 hsz = 3 + sizeof(ErtsORefThing)/sizeof(Eterm);
2588
2589 rp = erts_proc_lookup(rpc->requester);
2590 if (!rp) {
2591 if (bp)
2592 free_message_buffer(bp);
2593 }
2594 else {
2595 Eterm *hp, msg, ref;
2596 ErtsMessage *mp = erts_alloc_message(hsz, &hp);
2597
2598 sys_memcpy((void *) hp, (void *) &rpc->oref_thing,
2599 sizeof(rpc->oref_thing));
2600
2601 ref = make_internal_ref(hp);
2602 hp += sizeof(rpc->oref_thing)/sizeof(Eterm);
2603 msg = TUPLE2(hp, ref, res);
2604
2605 mp->hfrag.next = bp;
2606 ERL_MESSAGE_TOKEN(mp) = am_undefined;
2607 erts_queue_proc_message(c_p, rp, 0, mp, msg);
2608 }
2609
2610 erts_free(ERTS_ALC_T_SIG_DATA, rpc);
2611
2612 return out_cnt;
2613 }
2614
2615 static void
is_alive_response(Process * c_p,ErtsMessage * mp,int is_alive)2616 is_alive_response(Process *c_p, ErtsMessage *mp, int is_alive)
2617 {
2618 /*
2619 * Sender prepared the message for us. Just patch
2620 * the result if necessary. The default prepared
2621 * result is 'false'.
2622 */
2623 Process *rp;
2624 ErtsIsAliveRequest *alive_req;
2625
2626 alive_req = (ErtsIsAliveRequest *) (char *) (&mp->hfrag.mem[0]
2627 + mp->hfrag.used_size);
2628
2629
2630 ASSERT(ERTS_SIG_IS_NON_MSG(mp));
2631 ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) mp)->common.tag)
2632 == ERTS_SIG_Q_OP_IS_ALIVE);
2633 ASSERT(mp->hfrag.alloc_size > mp->hfrag.used_size);
2634 ASSERT((mp->hfrag.alloc_size - mp->hfrag.used_size)*sizeof(UWord)
2635 >= sizeof(ErtsIsAliveRequest));
2636 ASSERT(is_internal_pid(alive_req->requester));
2637 ASSERT(alive_req->requester != c_p->common.id);
2638 ASSERT(is_tuple_arity(alive_req->message, 2));
2639 ASSERT(is_internal_ordinary_ref(tuple_val(alive_req->message)[1]));
2640 ASSERT(tuple_val(alive_req->message)[2] == am_false);
2641
2642 ERL_MESSAGE_TERM(mp) = alive_req->message;
2643 mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
2644 mp->next = NULL;
2645
2646 rp = erts_proc_lookup(alive_req->requester);
2647 if (!rp)
2648 erts_cleanup_messages(mp);
2649 else {
2650 if (is_alive) { /* patch result... */
2651 Eterm *tp = tuple_val(alive_req->message);
2652 tp[2] = am_true;
2653 }
2654 erts_queue_message(rp, 0, mp, alive_req->message, am_system);
2655 }
2656 }
2657
2658
2659 static ERTS_INLINE void
adjust_tracing_state(Process * c_p,ErtsSigRecvTracing * tracing,int setup)2660 adjust_tracing_state(Process *c_p, ErtsSigRecvTracing *tracing, int setup)
2661 {
2662 if (!IS_TRACED(c_p) || (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE)) {
2663 tracing->messages.active = 0;
2664 tracing->messages.receive_trace = 0;
2665 tracing->messages.event = NULL;
2666 tracing->messages.next = NULL;
2667 tracing->procs = 0;
2668 tracing->active = 0;
2669 }
2670 else {
2671 Uint flgs = ERTS_TRACE_FLAGS(c_p);
2672 int procs_trace = !!(flgs & F_TRACE_PROCS);
2673 int recv_trace = !!(flgs & F_TRACE_RECEIVE);
2674 /* procs tracing enabled? */
2675
2676 tracing->procs = procs_trace;
2677
2678 /* message receive tracing enabled? */
2679 tracing->messages.receive_trace = recv_trace;
2680 if (!recv_trace)
2681 tracing->messages.event = NULL;
2682 else {
2683 if (tracing->messages.bp_ix < 0)
2684 tracing->messages.bp_ix = erts_active_bp_ix();
2685 tracing->messages.event = &erts_receive_tracing[tracing->messages.bp_ix];
2686 }
2687 if (setup) {
2688 if (recv_trace)
2689 tracing->messages.next = &c_p->sig_qs.cont;
2690 else
2691 tracing->messages.next = NULL;
2692 }
2693 tracing->messages.active = recv_trace;
2694 tracing->active = recv_trace | procs_trace;
2695 }
2696
2697 #if defined(USE_VM_PROBES)
2698 /* vm probe message_queued enabled? */
2699
2700 tracing->messages.vm_probes = DTRACE_ENABLED(message_queued);
2701 if (tracing->messages.vm_probes) {
2702 dtrace_proc_str(c_p, tracing->messages.receiver_name);
2703 tracing->messages.active = !0;
2704 tracing->active = !0;
2705 if (setup && !tracing->messages.next)
2706 tracing->messages.next = &c_p->sig_qs.cont;
2707 }
2708
2709 #endif
2710 }
2711
2712 static ERTS_INLINE void
setup_tracing_state(Process * c_p,ErtsSigRecvTracing * tracing)2713 setup_tracing_state(Process *c_p, ErtsSigRecvTracing *tracing)
2714 {
2715 tracing->messages.bp_ix = -1;
2716 adjust_tracing_state(c_p, tracing, !0);
2717 }
2718
2719 static ERTS_INLINE void
remove_iq_sig(Process * c_p,ErtsMessage * sig,ErtsMessage ** next_sig)2720 remove_iq_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig)
2721 {
2722 /*
2723 * Remove signal from message queue (inner queue).
2724 */
2725 ASSERT(c_p->sig_qs.cont_last != &sig->next);
2726 ASSERT(c_p->sig_qs.nmsigs.next != &sig->next);
2727 ASSERT(c_p->sig_qs.nmsigs.last != &sig->next);
2728
2729 if (c_p->sig_qs.save == &sig->next)
2730 c_p->sig_qs.save = next_sig;
2731 if (c_p->sig_qs.last == &sig->next)
2732 c_p->sig_qs.last = next_sig;
2733 if (sig->next && ERTS_SIG_IS_RECV_MARKER(sig->next))
2734 ((ErtsRecvMarker *) sig->next)->prev_next = next_sig;
2735 *next_sig = sig->next;
2736 }
2737
2738 static ERTS_INLINE void
remove_mq_sig(Process * c_p,ErtsMessage * sig,ErtsMessage ** next_sig,ErtsMessage *** next_nm_sig)2739 remove_mq_sig(Process *c_p, ErtsMessage *sig,
2740 ErtsMessage **next_sig, ErtsMessage ***next_nm_sig)
2741 {
2742 /*
2743 * Remove signal from (middle) signal queue.
2744 */
2745 ASSERT(c_p->sig_qs.save != &sig->next);
2746 ASSERT(c_p->sig_qs.last != &sig->next);
2747
2748 if (c_p->sig_qs.cont_last == &sig->next)
2749 c_p->sig_qs.cont_last = next_sig;
2750 if (*next_nm_sig == &sig->next)
2751 *next_nm_sig = next_sig;
2752 if (c_p->sig_qs.nmsigs.last == &sig->next)
2753 c_p->sig_qs.nmsigs.last = next_sig;
2754
2755 *next_sig = sig->next;
2756 }
2757
2758 static ERTS_INLINE void
remove_nm_sig(Process * c_p,ErtsMessage * sig,ErtsMessage *** next_nm_sig)2759 remove_nm_sig(Process *c_p, ErtsMessage *sig, ErtsMessage ***next_nm_sig)
2760 {
2761 ErtsMessage **next_sig = *next_nm_sig;
2762 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
2763 ASSERT(*next_sig == sig);
2764 *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
2765 remove_mq_sig(c_p, sig, next_sig, next_nm_sig);
2766 }
2767
2768 static ERTS_INLINE void
convert_to_msg(Process * c_p,ErtsMessage * sig,ErtsMessage * msg,ErtsMessage *** next_nm_sig)2769 convert_to_msg(Process *c_p, ErtsMessage *sig, ErtsMessage *msg,
2770 ErtsMessage ***next_nm_sig)
2771 {
2772 ErtsMessage **next_sig = *next_nm_sig;
2773 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
2774 *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
2775 c_p->sig_qs.len++;
2776 *next_sig = msg;
2777 remove_mq_sig(c_p, sig, &msg->next, next_nm_sig);
2778 }
2779
2780 static ERTS_INLINE void
convert_to_msgs(Process * c_p,ErtsMessage * sig,Uint no_msgs,ErtsMessage * first_msg,ErtsMessage * last_msg,ErtsMessage *** next_nm_sig)2781 convert_to_msgs(Process *c_p, ErtsMessage *sig, Uint no_msgs,
2782 ErtsMessage *first_msg, ErtsMessage *last_msg,
2783 ErtsMessage ***next_nm_sig)
2784 {
2785 ErtsMessage **next_sig = *next_nm_sig;
2786 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
2787 *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
2788 c_p->sig_qs.len += no_msgs;
2789 *next_sig = first_msg;
2790 remove_mq_sig(c_p, sig, &last_msg->next, next_nm_sig);
2791 }
2792
2793 static ERTS_INLINE void
insert_messages(Process * c_p,ErtsMessage ** next,ErtsMessage * first,ErtsMessage * last,Uint no_msgs,ErtsMessage *** next_nm_sig)2794 insert_messages(Process *c_p, ErtsMessage **next, ErtsMessage *first,
2795 ErtsMessage *last, Uint no_msgs, ErtsMessage ***next_nm_sig)
2796 {
2797 last->next = *next;
2798 if (c_p->sig_qs.cont_last == next)
2799 c_p->sig_qs.cont_last = &last->next;
2800 if (*next_nm_sig == next)
2801 *next_nm_sig = &last->next;
2802 if (c_p->sig_qs.nmsigs.last == next)
2803 c_p->sig_qs.nmsigs.last = &last->next;
2804 c_p->sig_qs.len += no_msgs;
2805 *next = first;
2806 }
2807
2808 static ERTS_INLINE void
remove_mq_m_sig(Process * c_p,ErtsMessage * sig,ErtsMessage ** next_sig,ErtsMessage *** next_nm_sig)2809 remove_mq_m_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig, ErtsMessage ***next_nm_sig)
2810 {
2811 /* Removing message... */
2812 ASSERT(!ERTS_SIG_IS_NON_MSG(sig));
2813 c_p->sig_qs.len--;
2814 remove_mq_sig(c_p, sig, next_sig, next_nm_sig);
2815 }
2816
2817 static ERTS_INLINE void
remove_iq_m_sig(Process * c_p,ErtsMessage * sig,ErtsMessage ** next_sig)2818 remove_iq_m_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig)
2819 {
2820 /* Removing message... */
2821 ASSERT(!ERTS_SIG_IS_NON_MSG(sig));
2822 c_p->sig_qs.len--;
2823 remove_iq_sig(c_p, sig, next_sig);
2824 }
2825
2826 static ERTS_INLINE void
convert_prepared_sig_to_msg_attached(Process * c_p,ErtsMessage * sig,Eterm msg,void * data_attached,ErtsMessage *** next_nm_sig)2827 convert_prepared_sig_to_msg_attached(Process *c_p, ErtsMessage *sig, Eterm msg,
2828 void *data_attached,
2829 ErtsMessage ***next_nm_sig)
2830 {
2831 /*
2832 * Everything is already there except for the reference to
2833 * the message and the combined hfrag marker that needs to be
2834 * restored...
2835 */
2836 *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
2837 sig->data.attached = data_attached;
2838 ERL_MESSAGE_TERM(sig) = msg;
2839 c_p->sig_qs.len++;
2840 }
2841
2842 static ERTS_INLINE void
convert_prepared_sig_to_msg(Process * c_p,ErtsMessage * sig,Eterm msg,ErtsMessage *** next_nm_sig)2843 convert_prepared_sig_to_msg(Process *c_p, ErtsMessage *sig, Eterm msg,
2844 ErtsMessage ***next_nm_sig)
2845 {
2846 convert_prepared_sig_to_msg_attached(c_p, sig, msg,
2847 ERTS_MSG_COMBINED_HFRAG,
2848 next_nm_sig);
2849 }
2850
2851 static ERTS_INLINE void
convert_prepared_sig_to_external_msg(Process * c_p,ErtsMessage * sig,ErtsMessage *** next_nm_sig)2852 convert_prepared_sig_to_external_msg(Process *c_p, ErtsMessage *sig,
2853 ErtsMessage ***next_nm_sig)
2854 {
2855 /*
2856 * Everything is already there except for the reference to
2857 * the message and the combined hfrag marker that needs to be
2858 * restored...
2859 */
2860 *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
2861 sig->data.attached = &sig->hfrag;
2862 ERL_MESSAGE_TERM(sig) = THE_NON_VALUE;
2863 c_p->sig_qs.len++;
2864 }
2865
2866 static ERTS_INLINE Eterm
get_heap_frag_eterm(ErlHeapFragment ** hfpp,Eterm * valp)2867 get_heap_frag_eterm(ErlHeapFragment **hfpp, Eterm *valp)
2868 {
2869 Eterm term;
2870 ErlHeapFragment *hfp;
2871 ASSERT(hfpp);
2872 if (is_immed(*valp)) {
2873 *hfpp = NULL;
2874 term = *valp;
2875 }
2876 else {
2877 ASSERT(is_CP(*valp));
2878 *hfpp = hfp = (ErlHeapFragment *) cp_val(*valp);
2879 ASSERT(hfp->alloc_size == hfp->used_size + 1);
2880 term = hfp->mem[hfp->used_size];
2881 ASSERT(size_object(term) == hfp->used_size);
2882 }
2883 *valp = NIL;
2884 return term;
2885 }
2886
2887 static ERTS_INLINE Eterm
save_heap_frag_eterm(Process * c_p,ErtsMessage * mp,Eterm * value)2888 save_heap_frag_eterm(Process *c_p, ErtsMessage *mp, Eterm *value)
2889 {
2890 ErlHeapFragment *hfrag;
2891 Eterm term = get_heap_frag_eterm(&hfrag, value);
2892 if (hfrag) {
2893 if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
2894 hfrag->next = mp->hfrag.next;
2895 mp->hfrag.next = hfrag;
2896 }
2897 else if (!mp->data.heap_frag) {
2898 erts_link_mbuf_to_proc(c_p, hfrag);
2899 }
2900 else {
2901 hfrag->next = mp->data.heap_frag;
2902 mp->data.heap_frag = hfrag;
2903 }
2904 }
2905 return term;
2906 }
2907
2908 static ERTS_INLINE Eterm
copy_heap_frag_eterm(Process * c_p,ErtsMessage * mp,Eterm value)2909 copy_heap_frag_eterm(Process *c_p, ErtsMessage *mp, Eterm value)
2910 {
2911 ErlHeapFragment *hfrag;
2912 Eterm *hp, term_sz, term, term_cpy, val;
2913 val = value;
2914 term = get_heap_frag_eterm(&hfrag, &val);
2915 if (!hfrag)
2916 return term;
2917 term_sz = hfrag->used_size;
2918 if (!mp->data.attached) {
2919 hp = HAlloc(c_p, term_sz);
2920 term_cpy = copy_struct(term, term_sz, &hp, &c_p->off_heap);
2921 }
2922 else {
2923 ErlHeapFragment *hfrag_cpy = new_message_buffer(term_sz);
2924 hp = &hfrag_cpy->mem[0];
2925 term_cpy = copy_struct(term, term_sz, &hp, &hfrag_cpy->off_heap);
2926 if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
2927 hfrag_cpy->next = mp->hfrag.next;
2928 mp->hfrag.next = hfrag_cpy;
2929 }
2930 else {
2931 ASSERT(mp->data.heap_frag);
2932 hfrag_cpy->next = mp->data.heap_frag;
2933 mp->data.heap_frag = hfrag_cpy;
2934 }
2935 }
2936 return term_cpy;
2937 }
2938
2939 /*
2940 * Receive markers
2941 */
2942
2943 #if defined(DEBUG) || defined(ERTS_PROC_SIG_HARD_DEBUG)
2944
2945 #define ERTS_SIG_RECV_MARK_HANDLED ((void *) 4711)
2946
2947 #define ERTS_SIG_DBG_IS_HANDLED_RECV_MARKER(S) \
2948 (ERTS_SIG_IS_RECV_MARKER((S)) \
2949 && (((ErtsSignal *) (S))->common.specific.attachment \
2950 == ERTS_SIG_RECV_MARK_HANDLED))
2951 #define ERTS_SIG_DBG_RECV_MARK_SET_HANDLED(S) \
2952 (((ErtsSignal *) (S))->common.specific.attachment \
2953 = ERTS_SIG_RECV_MARK_HANDLED)
2954
2955 #else
2956
2957 #define ERTS_SIG_DBG_IS_HANDLED_RECV_MARKER(S) 0
2958 #define ERTS_SIG_DBG_RECV_MARK_SET_HANDLED(S)
2959
2960 #endif
2961
2962 static ERTS_INLINE void
recv_marker_deallocate(Process * c_p,ErtsRecvMarker * markp)2963 recv_marker_deallocate(Process *c_p, ErtsRecvMarker *markp)
2964 {
2965 ErtsRecvMarkerBlock *blkp = c_p->sig_qs.recv_mrk_blk;
2966 int ix, nix;
2967
2968 ASSERT(blkp);
2969 ERTS_HDBG_CHK_RECV_MRKS(c_p);
2970
2971 nix = markp->next_ix;
2972 ASSERT(nix >= 0);
2973
2974 ix = ERTS_RECV_MARKER_IX__(blkp, markp);
2975
2976 if (nix == ix) {
2977 ASSERT(markp->prev_ix == ix);
2978 erts_free(ERTS_ALC_T_RECV_MARK_BLK, blkp);
2979 c_p->sig_qs.recv_mrk_blk = NULL;
2980 }
2981 else {
2982 int pix = markp->prev_ix;
2983 ASSERT(pix >= 0);
2984
2985 if (blkp->ref[ix] == am_undefined) {
2986 ASSERT(blkp->unused > 0);
2987 blkp->unused--;
2988 }
2989 #ifdef ERTS_SUPPORT_OLD_RECV_MARK_INSTRS
2990 else if (blkp->ref[ix] == erts_old_recv_marker_id) {
2991 ASSERT(blkp->old_recv_marker_ix == ix);
2992 blkp->old_recv_marker_ix = -1;
2993 }
2994 #endif
2995
2996 blkp->marker[pix].next_ix = nix;
2997 blkp->marker[nix].prev_ix = pix;
2998
2999 if (blkp->used_ix == ix)
3000 blkp->used_ix = nix;
3001
3002 blkp->marker[ix].next_ix = blkp->free_ix;
3003 blkp->free_ix = ix;
3004 blkp->ref[ix] = am_free;
3005 #ifdef DEBUG
3006 markp->used = 0;
3007 #endif
3008
3009 ERTS_HDBG_CHK_RECV_MRKS(c_p);
3010 }
3011 }
3012
3013 static ERTS_INLINE void
recv_marker_dequeue(Process * c_p,ErtsRecvMarker * markp)3014 recv_marker_dequeue(Process *c_p, ErtsRecvMarker *markp)
3015 {
3016 ErtsMessage *sigp;
3017
3018 ASSERT(markp->proc == c_p);
3019
3020 if (markp->in_sigq <= 0) {
3021 /* Not in signal queue or marked for removal... */
3022 return;
3023 }
3024
3025 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3026
3027 sigp = (ErtsMessage *) markp;
3028
3029 ASSERT(ERTS_SIG_IS_RECV_MARKER(sigp));
3030 ASSERT(!markp->in_msgq || markp->prev_next);
3031
3032 if (!markp->in_msgq) {
3033 markp->in_sigq = -1; /* Mark for removal... */
3034 markp->set_save = 0;
3035 }
3036 else {
3037 remove_iq_sig(c_p, sigp, markp->prev_next);
3038 markp->in_sigq = markp->in_msgq = 0;
3039 ASSERT(!markp->set_save);
3040 #ifdef DEBUG
3041 markp->prev_next = NULL;
3042 #endif
3043 recv_marker_deallocate(c_p, markp);
3044 }
3045
3046 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3047 }
3048
3049
3050 static ERTS_INLINE Eterm
recv_marker_uniq(Process * c_p,Eterm * uniqp)3051 recv_marker_uniq(Process *c_p, Eterm *uniqp)
3052 {
3053 Eterm res = *uniqp;
3054 if (res == am_new_uniq) {
3055 Sint64 val = MIN_SMALL + c_p->uniq++;
3056 Uint hsz = ERTS_SINT64_HEAP_SIZE(val);
3057 if (hsz == 0)
3058 res = make_small((Sint) val);
3059 else {
3060 Eterm *hp = HAlloc(c_p, hsz);
3061 res = erts_sint64_to_big(val, &hp);
3062 }
3063 *uniqp = res;
3064 }
3065 return res;
3066 }
3067
3068 static ERTS_INLINE ErtsRecvMarker *
recv_marker_alloc_block(Process * c_p,ErtsRecvMarkerBlock ** blkpp,int * ixp,Eterm * uniqp)3069 recv_marker_alloc_block(Process *c_p, ErtsRecvMarkerBlock **blkpp,
3070 int *ixp, Eterm *uniqp)
3071 {
3072 ErtsRecvMarkerBlock *blkp;
3073 ErtsRecvMarker *markp;
3074 int ix;
3075
3076 blkp = (ErtsRecvMarkerBlock *) erts_alloc(ERTS_ALC_T_RECV_MARK_BLK,
3077 sizeof(ErtsRecvMarkerBlock));
3078 *blkpp = blkp;
3079
3080 /* Allocate marker for 'uniqp' in index zero... */
3081 *ixp = 0;
3082 blkp->ref[0] = recv_marker_uniq(c_p, uniqp);
3083 markp = &blkp->marker[0];
3084 markp->next_ix = markp->prev_ix = 0;
3085 blkp->used_ix = 0;
3086
3087 #ifdef ERTS_SUPPORT_OLD_RECV_MARK_INSTRS
3088 if (*uniqp == erts_old_recv_marker_id)
3089 blkp->old_recv_marker_ix = 0;
3090 else
3091 blkp->old_recv_marker_ix = -1;
3092 #endif
3093
3094 /* Put the rest in a free list in the ref words... */
3095 blkp->free_ix = 1;
3096 for (ix = 1; ix < ERTS_RECV_MARKER_BLOCK_SIZE; ix++) {
3097 blkp->ref[ix] = am_free;
3098 if (ix == ERTS_RECV_MARKER_BLOCK_SIZE - 1)
3099 blkp->marker[ix].next_ix = -1; /* End of list */
3100 else
3101 blkp->marker[ix].next_ix = ix + 1;
3102 }
3103
3104 blkp->unused = 0;
3105 blkp->pending_set_save_ix = -1;
3106
3107 #ifdef DEBUG
3108 for (ix = 0; ix < ERTS_RECV_MARKER_BLOCK_SIZE; ix++) {
3109 blkp->marker[ix].used = ix == 0 ? !0 : 0;
3110 blkp->marker[ix].proc = c_p;
3111 }
3112 #endif
3113
3114 ERTS_HDBG_CHK_RECV_MRKS(c_p);
3115
3116 return markp;
3117 }
3118
3119 static ERTS_INLINE ErtsRecvMarker *
recv_marker_reuse(Process * c_p,int * ixp)3120 recv_marker_reuse(Process *c_p, int *ixp)
3121 {
3122 /*
3123 * All markers used; reuse the least recently
3124 * allocated one...
3125 */
3126 ErtsRecvMarkerBlock *blkp = c_p->sig_qs.recv_mrk_blk;
3127 ErtsRecvMarker *markp;
3128 ErtsMessage *sigp;
3129 int ix, used_ix;
3130
3131 /*
3132 * 'used_ix' points to the least recently
3133 * allocated marker. We reuse least recently
3134 * and preferably unused marker.
3135 *
3136 * In order to reuse a marker it needs to
3137 * be in the message queue. We search from the
3138 * least recently allocated towards the most
3139 * recently allocated. Once we find a marker
3140 * not in the message queue, i.e, in the middle
3141 * signal queue, we know that the rest cannot
3142 * be in the middle queue either.
3143 */
3144
3145 used_ix = blkp->used_ix;
3146 markp = &blkp->marker[used_ix];
3147 if (!markp->in_msgq)
3148 return NULL;
3149 if (!blkp->unused || blkp->ref[used_ix] == am_undefined) {
3150 use_least_recently_allocated:
3151 if (blkp->ref[used_ix] == am_undefined)
3152 blkp->unused--;
3153 ix = used_ix;
3154 blkp->used_ix = used_ix = markp->next_ix;
3155 }
3156 else {
3157 int pix, nix;
3158
3159 ix = markp->next_ix;
3160 ASSERT(ix != used_ix);
3161 while (!0) {
3162 markp = &blkp->marker[ix];
3163 if (!markp->in_msgq)
3164 goto use_least_recently_allocated;
3165 if (blkp->ref[ix] == am_undefined) {
3166 /* use this one... */
3167 ASSERT(blkp->unused > 0);
3168 blkp->unused--;
3169 break;
3170 }
3171 ix = markp->next_ix;
3172 ASSERT(ix != used_ix);
3173 }
3174 /*
3175 * Move this marker to be most recently
3176 * allocated marker (prev_ix of used_ix),
3177 * so that the search property still holds...
3178 */
3179 pix = markp->prev_ix;
3180 nix = markp->next_ix;
3181 blkp->marker[pix].next_ix = nix;
3182 blkp->marker[nix].prev_ix = pix;
3183
3184 pix = blkp->marker[used_ix].prev_ix;
3185 blkp->marker[used_ix].prev_ix = ix;
3186 blkp->marker[pix].next_ix = ix;
3187 markp->next_ix = used_ix;
3188 markp->prev_ix = pix;
3189 }
3190
3191 *ixp = ix;
3192
3193 ASSERT(markp->in_sigq);
3194 ASSERT(markp->in_msgq);
3195 ASSERT(!markp->set_save);
3196
3197 sigp = (ErtsMessage *) markp;
3198
3199 ASSERT(ERTS_SIG_IS_RECV_MARKER(sigp));
3200
3201 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3202
3203 remove_iq_sig(c_p, sigp, markp->prev_next);
3204 markp->in_sigq = markp->in_msgq = 0;
3205 #ifdef DEBUG
3206 markp->prev_next = NULL;
3207 #endif
3208
3209 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3210
3211 return markp;
3212 }
3213
3214 static ERTS_INLINE ErtsRecvMarker *
recv_marker_alloc(Process * c_p,ErtsRecvMarkerBlock ** blkpp,int * ixp,Eterm * uniqp)3215 recv_marker_alloc(Process *c_p, ErtsRecvMarkerBlock **blkpp,
3216 int *ixp, Eterm *uniqp)
3217 {
3218 ErtsRecvMarkerBlock *blkp = *blkpp;
3219 ErtsRecvMarker *markp;
3220 int ix;
3221
3222 ASSERT(is_small(*uniqp) || is_big(*uniqp) || *uniqp == am_new_uniq
3223 || *uniqp == NIL || is_internal_ref(*uniqp));
3224
3225 if (!blkp)
3226 return recv_marker_alloc_block(c_p, blkpp, ixp, uniqp);
3227
3228 ERTS_HDBG_CHK_RECV_MRKS(c_p);
3229
3230 ix = blkp->free_ix;
3231 if (ix < 0) {
3232 markp = recv_marker_reuse(c_p, &ix);
3233 if (!markp)
3234 return NULL;
3235 }
3236 else {
3237 int used_ix = blkp->used_ix;
3238 ASSERT(blkp->ref[ix] == am_free);
3239 markp = &blkp->marker[ix];
3240 blkp->free_ix = markp->next_ix;
3241 ASSERT(-1 <= blkp->free_ix
3242 && blkp->free_ix < ERTS_RECV_MARKER_BLOCK_SIZE);
3243 markp->prev_ix = blkp->marker[used_ix].prev_ix;
3244 markp->next_ix = used_ix;
3245 #ifdef DEBUG
3246 markp->used = !0;
3247 #endif
3248 blkp->marker[markp->prev_ix].next_ix = ix;
3249 blkp->marker[used_ix].prev_ix = ix;
3250 }
3251
3252 *ixp = ix;
3253
3254 blkp->ref[ix] = recv_marker_uniq(c_p, uniqp);
3255
3256 #ifdef ERTS_SUPPORT_OLD_RECV_MARK_INSTRS
3257 if (*uniqp == erts_old_recv_marker_id) {
3258 ASSERT(blkp->old_recv_marker_ix == -1);
3259 blkp->old_recv_marker_ix = ix;
3260 }
3261 #endif
3262
3263 ERTS_HDBG_CHK_RECV_MRKS(c_p);
3264
3265 return markp;
3266 }
3267
3268 static ERTS_INLINE void
recv_marker_insert(Process * c_p,ErtsRecvMarker * markp,int setting)3269 recv_marker_insert(Process *c_p, ErtsRecvMarker *markp, int setting)
3270 {
3271 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3272 markp->sig.common.next = NULL;
3273 markp->sig.common.specific.next = NULL;
3274 markp->sig.common.tag = ERTS_RECV_MARKER_TAG;
3275
3276 markp->pass = 0;
3277 markp->set_save = 0;
3278 markp->in_sigq = 1;
3279 if (!c_p->sig_qs.cont) {
3280 /* Insert in message queue... */
3281 markp->in_msgq = !0;
3282 ASSERT(c_p->sig_qs.first);
3283 markp->prev_next = c_p->sig_qs.last;
3284 *c_p->sig_qs.last = (ErtsMessage *) &markp->sig;
3285 c_p->sig_qs.last = &markp->sig.common.next;
3286
3287 if (!setting && *c_p->sig_qs.save == (ErtsMessage *) &markp->sig) {
3288 /*
3289 * This can happen when a recv marker recently entered the message
3290 * queue via erts_proc_sig_handle_incoming() through the midddle
3291 * signal queue...
3292 */
3293 markp->pass++;
3294 c_p->sig_qs.save = c_p->sig_qs.last;
3295 }
3296
3297 ERTS_SIG_DBG_RECV_MARK_SET_HANDLED(&markp->sig);
3298 }
3299 else {
3300 /* Insert in (middle) signal queue... */
3301 markp->in_msgq = 0;
3302 #ifdef DEBUG
3303 markp->prev_next = NULL;
3304 #endif
3305 if (!c_p->sig_qs.nmsigs.last) {
3306 ASSERT(!c_p->sig_qs.nmsigs.next);
3307 c_p->sig_qs.nmsigs.next = c_p->sig_qs.cont_last;
3308 }
3309 else {
3310 ErtsSignal *lsig = (ErtsSignal *) *c_p->sig_qs.nmsigs.last;
3311 ASSERT(c_p->sig_qs.nmsigs.next);
3312 ASSERT(lsig && !lsig->common.specific.next);
3313 lsig->common.specific.next = c_p->sig_qs.cont_last;
3314 }
3315
3316 c_p->sig_qs.nmsigs.last = c_p->sig_qs.cont_last;
3317
3318 *c_p->sig_qs.cont_last = (ErtsMessage *) &markp->sig;
3319 c_p->sig_qs.cont_last = &markp->sig.common.next;
3320 }
3321 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3322 }
3323
3324 Eterm
erts_msgq_recv_marker_create_insert(Process * c_p,Eterm uniq)3325 erts_msgq_recv_marker_create_insert(Process *c_p, Eterm uniq)
3326 {
3327 int ix;
3328 Eterm new_uniq = uniq;
3329 ErtsRecvMarkerBlock **blkpp = &c_p->sig_qs.recv_mrk_blk;
3330 ErtsRecvMarker *markp = recv_marker_alloc(c_p, blkpp, &ix, &new_uniq);
3331 if (!markp)
3332 return am_undefined;
3333 recv_marker_insert(c_p, markp, 0);
3334 ASSERT(is_small(new_uniq) || is_big(new_uniq) || new_uniq == NIL
3335 || is_internal_ref(new_uniq));
3336 return new_uniq;
3337 }
3338
3339 void
erts_msgq_recv_marker_create_insert_set_save(Process * c_p,Eterm id)3340 erts_msgq_recv_marker_create_insert_set_save(Process *c_p, Eterm id)
3341 {
3342 int ix = -1; /* Shut up faulty warning... */
3343 ErtsRecvMarkerBlock **blkpp = &c_p->sig_qs.recv_mrk_blk;
3344 ErtsRecvMarker *markp = recv_marker_alloc(c_p, blkpp, &ix, &id);
3345
3346 if (markp) {
3347 recv_marker_insert(c_p, markp, !0);
3348 erts_msgq_recv_marker_set_save__(c_p, *blkpp, markp, ix);
3349 ASSERT(markp->in_sigq > 0);
3350 ASSERT(!markp->in_msgq);
3351 ASSERT(markp->set_save);
3352 ASSERT(ix >= 0);
3353 ASSERT((*blkpp)->pending_set_save_ix == ix);
3354
3355 /*
3356 * The save pointer will be set when the marker
3357 * enters the message queue, and then the marker
3358 * will immediately be removed...
3359 */
3360 markp->in_sigq = -1;
3361 }
3362 }
3363
3364 void
erts_msgq_remove_leading_recv_markers(Process * c_p)3365 erts_msgq_remove_leading_recv_markers(Process *c_p)
3366 {
3367 /*
3368 * Receive markers in the front of the queue does not
3369 * add any value, so we just remove them...
3370 */
3371 ASSERT(c_p->sig_qs.first
3372 && ERTS_SIG_IS_RECV_MARKER(c_p->sig_qs.first));
3373 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3374 do {
3375 ErtsRecvMarker *markp = (ErtsRecvMarker *) c_p->sig_qs.first;
3376 recv_marker_dequeue(c_p, markp);
3377 } while (c_p->sig_qs.first
3378 && ERTS_SIG_IS_RECV_MARKER(c_p->sig_qs.first));
3379 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
3380 }
3381
3382 ErtsMessage **
erts_msgq_pass_recv_markers(Process * c_p,ErtsMessage ** markpp)3383 erts_msgq_pass_recv_markers(Process *c_p, ErtsMessage **markpp)
3384 {
3385 ErtsMessage **sigpp = markpp;
3386 ErtsMessage *sigp = *sigpp;
3387 ASSERT(ERTS_SIG_IS_RECV_MARKER(sigp));
3388 do {
3389 ErtsRecvMarker *markp = (ErtsRecvMarker *) sigp;
3390 if (++markp->pass > ERTS_RECV_MARKER_PASS_MAX) {
3391 recv_marker_dequeue(c_p, markp);
3392 sigp = *sigpp;
3393 }
3394 else {
3395 sigpp = &markp->sig.common.next;
3396 sigp = markp->sig.common.next;
3397 }
3398 } while (sigp && ERTS_SIG_IS_RECV_MARKER(sigp));
3399
3400 return sigpp;
3401 }
3402
3403
3404 /*
3405 * Handle signals...
3406 */
3407
3408 static ERTS_INLINE int
handle_exit_signal(Process * c_p,ErtsSigRecvTracing * tracing,ErtsMessage * sig,ErtsMessage *** next_nm_sig,int * exited)3409 handle_exit_signal(Process *c_p, ErtsSigRecvTracing *tracing,
3410 ErtsMessage *sig, ErtsMessage ***next_nm_sig,
3411 int *exited)
3412 {
3413 ErtsMessage *conv_msg = NULL;
3414 ErtsExitSignalData *xsigd = NULL;
3415 Eterm tag = ((ErtsSignal *) sig)->common.tag;
3416 int op = ERTS_PROC_SIG_OP(tag);
3417 int destroy = 0;
3418 int ignore = 0;
3419 int save = 0;
3420 int exit = 0;
3421 int linked = 0;
3422 int cnt = 1;
3423 Eterm reason;
3424 Eterm from;
3425
3426 ASSERT(ERTS_PROC_SIG_TYPE(tag) == ERTS_SIG_Q_TYPE_GEN_EXIT);
3427
3428 xsigd = get_exit_signal_data(sig);
3429 from = xsigd->from;
3430
3431 if (op == ERTS_SIG_Q_OP_EXIT_LINKED) {
3432 ErtsLink *lnk, *dlnk = NULL;
3433 ErtsELink *elnk = NULL;
3434 lnk = erts_link_tree_lookup(ERTS_P_LINKS(c_p), from);
3435 if (!lnk)
3436 ignore = destroy = !0; /* No longer active */
3437 else if (lnk->type != ERTS_LNK_TYPE_DIST_PROC) {
3438 if (((ErtsILink *) lnk)->unlinking)
3439 ignore = destroy = !0; /* No longer active */
3440 else
3441 linked = !0;
3442 }
3443 else {
3444 dlnk = erts_link_to_other(lnk, &elnk);
3445 if (elnk->unlinking)
3446 ignore = destroy = !0; /* No longer active */
3447 else
3448 linked = !0;
3449 if ((xsigd->u.link.flags & ERTS_SIG_LNK_X_FLAG_CONNECTION_LOST)
3450 && xsigd->u.link.connection_id != elnk->dist->connection_id) {
3451 /*
3452 * The exit signal is due to loss of connection. The link
3453 * that triggered this was setup before that connection
3454 * was lost, but was later unlinked. After that, the
3455 * current link was setup using a new connection. That is,
3456 * current link should be left unaffected, and the signal
3457 * should be silently dropped.
3458 */
3459 linked = 0;
3460 lnk = NULL;
3461 ignore = destroy = !0;
3462 }
3463 }
3464 if (lnk) {
3465 /* Remove link... */
3466 erts_link_tree_delete(&ERTS_P_LINKS(c_p), lnk);
3467 if (!elnk)
3468 erts_link_internal_release(lnk);
3469 else if (erts_link_dist_delete(dlnk))
3470 erts_link_release_both(&elnk->ld);
3471 else
3472 erts_link_release(lnk);
3473 }
3474 }
3475
3476 if (!ignore) {
3477 /* This GEN_EXIT was received from another node, decode the exit reason */
3478 if (ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
3479 erts_proc_sig_decode_dist(c_p, ERTS_PROC_LOCK_MAIN, sig, 1);
3480
3481 reason = xsigd->reason;
3482
3483 if (is_non_value(reason)) {
3484 /* Bad distribution message; remove it from queue... */
3485 ignore = !0;
3486 destroy = !0;
3487 }
3488 }
3489
3490 if (!ignore) {
3491
3492 if ((op != ERTS_SIG_Q_OP_EXIT || reason != am_kill)
3493 && (c_p->flags & F_TRAP_EXIT)) {
3494 convert_prepared_sig_to_msg(c_p, sig,
3495 xsigd->message, next_nm_sig);
3496 conv_msg = sig;
3497 }
3498 else if (reason == am_normal
3499 && !(xsigd->u.link.flags & ERTS_SIG_LNK_X_FLAG_NORMAL_KILLS)) {
3500 /* Ignore it... */
3501 destroy = !0;
3502 ignore = !0;
3503 }
3504 else {
3505 /* Terminate... */
3506 save = !0;
3507 exit = !0;
3508 if (op == ERTS_SIG_Q_OP_EXIT && reason == am_kill)
3509 reason = am_killed;
3510 }
3511 }
3512
3513 if (ignore|exit) {
3514 remove_nm_sig(c_p, sig, next_nm_sig);
3515 if (exit) {
3516 if (save) {
3517 sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
3518 ERL_MESSAGE_TERM(sig) = xsigd->message;
3519 erts_save_message_in_proc(c_p, sig);
3520 }
3521 /* Exit process... */
3522 erts_set_self_exiting(c_p, reason);
3523
3524 cnt++;
3525 }
3526 }
3527
3528 if (!exit) {
3529 if (conv_msg)
3530 erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
3531 if (linked && tracing->procs) {
3532 ASSERT(op == ERTS_SIG_Q_OP_EXIT_LINKED);
3533 getting_unlinked(c_p, from);
3534 }
3535 }
3536
3537 if (destroy) {
3538 cnt++;
3539 sig->next = NULL;
3540 erts_cleanup_messages(sig);
3541 }
3542
3543 *exited = exit;
3544
3545 return cnt;
3546 }
3547
3548 static ERTS_INLINE int
convert_prepared_down_message(Process * c_p,ErtsMessage * sig,Eterm msg,ErtsMessage *** next_nm_sig)3549 convert_prepared_down_message(Process *c_p, ErtsMessage *sig,
3550 Eterm msg, ErtsMessage ***next_nm_sig)
3551 {
3552 convert_prepared_sig_to_msg(c_p, sig, msg, next_nm_sig);
3553 erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
3554 return 1;
3555 }
3556
3557 static int
convert_to_down_message(Process * c_p,ErtsMessage * sig,ErtsMonitorData * mdp,ErtsMonitor ** omon,Uint16 mon_type,ErtsMessage *** next_nm_sig)3558 convert_to_down_message(Process *c_p,
3559 ErtsMessage *sig,
3560 ErtsMonitorData *mdp,
3561 ErtsMonitor **omon,
3562 Uint16 mon_type,
3563 ErtsMessage ***next_nm_sig)
3564 {
3565 int cnt = 0;
3566 Eterm node = am_undefined;
3567 ErtsMessage *mp;
3568 ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
3569 Uint hsz;
3570 Eterm *hp, ref, from, type, reason, tag;
3571 ErlOffHeap *ohp;
3572
3573 ASSERT(mdp);
3574 ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
3575 == (mdp->u.target.flags & ERTS_ML_FLGS_SAME));
3576
3577 /* reason is mdp->u.target.other.item */
3578 reason = mdp->u.target.other.item;
3579 ASSERT(is_immed(reason));
3580 ASSERT(&mdp->origin == *omon);
3581
3582 if (mdp->origin.flags & ERTS_ML_FLG_SPAWN_PENDING) {
3583 /*
3584 * Create a spawn_request() error message and replace
3585 * the signal with it...
3586 */
3587 ErtsMonitorDataExtended *mdep;
3588
3589 /* Should only happen when connection breaks... */
3590 ASSERT(reason == am_noconnection);
3591
3592 if (mdp->origin.flags & (ERTS_ML_FLG_SPAWN_ABANDONED
3593 | ERTS_ML_FLG_SPAWN_NO_EMSG)) {
3594 /*
3595 * Operation has been been abandoned or
3596 * error message has been disabled...
3597 */
3598 erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), *omon);
3599 erts_monitor_release(*omon);
3600 *omon = NULL;
3601 return 1;
3602 }
3603
3604 cnt += 4;
3605
3606 mdep = (ErtsMonitorDataExtended *) mdp;
3607 hsz = 5; /* 4-tuple */
3608
3609 ASSERT(is_ref(mdp->ref));
3610 hsz += NC_HEAP_SIZE(mdp->ref);
3611
3612 mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
3613 if (locks != ERTS_PROC_LOCK_MAIN)
3614 erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
3615 /*
3616 * The tag to patch into the resulting message
3617 * is stored in mdep->u.name via a little trick
3618 * (see pending_flag in erts_monitor_create()).
3619 */
3620
3621 tag = save_heap_frag_eterm(c_p, mp, &mdep->u.name);
3622
3623 /* Restore to normal monitor */
3624 ASSERT(mdep->u.name == NIL);
3625 mdp->origin.flags &= ~ERTS_ML_FLGS_SPAWN;
3626
3627 ref = STORE_NC(&hp, ohp, mdp->ref);
3628
3629 ERL_MESSAGE_FROM(mp) = am_undefined;
3630 ERL_MESSAGE_TERM(mp) = TUPLE4(hp, tag, ref, am_error, reason);
3631
3632 }
3633 else {
3634 /*
3635 * Create a 'DOWN' message and replace the signal
3636 * with it...
3637 */
3638
3639 hsz = 6; /* 5-tuple */
3640
3641 if (mdp->origin.flags & ERTS_ML_FLG_NAME)
3642 hsz += 3; /* reg name 2-tuple */
3643 else {
3644 ASSERT(is_pid(mdp->origin.other.item)
3645 || is_internal_port(mdp->origin.other.item));
3646 hsz += NC_HEAP_SIZE(mdp->origin.other.item);
3647 }
3648
3649 ASSERT(is_ref(mdp->ref));
3650 hsz += NC_HEAP_SIZE(mdp->ref);
3651
3652 mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
3653
3654 if (locks != ERTS_PROC_LOCK_MAIN)
3655 erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
3656
3657 cnt += 4;
3658
3659 ref = STORE_NC(&hp, ohp, mdp->ref);
3660
3661 if (!(mdp->origin.flags & ERTS_ML_FLG_NAME)) {
3662 from = STORE_NC(&hp, ohp, mdp->origin.other.item);
3663 }
3664 else {
3665 ErtsMonitorDataExtended *mdep;
3666 ASSERT(mdp->origin.flags & ERTS_ML_FLG_EXTENDED);
3667 mdep = (ErtsMonitorDataExtended *) mdp;
3668 ASSERT(is_atom(mdep->u.name));
3669 if (mdep->dist)
3670 node = mdep->dist->nodename;
3671 else
3672 node = erts_this_dist_entry->sysname;
3673 from = TUPLE2(hp, mdep->u.name, node);
3674 hp += 3;
3675 }
3676
3677 ASSERT(mdp->origin.type == mon_type);
3678 switch (mon_type) {
3679 case ERTS_MON_TYPE_PORT:
3680 type = am_port;
3681 if (mdp->origin.other.item == am_undefined) {
3682 /* failed by name... */
3683 ERL_MESSAGE_FROM(mp) = am_system;
3684 }
3685 else {
3686 ASSERT(is_internal_port(mdp->origin.other.item));
3687 ERL_MESSAGE_FROM(mp) = mdp->origin.other.item;
3688 }
3689 break;
3690 case ERTS_MON_TYPE_PROC:
3691 type = am_process;
3692 if (mdp->origin.other.item == am_undefined) {
3693 /* failed by name... */
3694 ERL_MESSAGE_FROM(mp) = am_system;
3695 }
3696 else {
3697 ASSERT(is_internal_pid(mdp->origin.other.item));
3698 ERL_MESSAGE_FROM(mp) = mdp->origin.other.item;
3699 }
3700 break;
3701 case ERTS_MON_TYPE_DIST_PROC:
3702 type = am_process;
3703 if (node == am_undefined) {
3704 ErtsMonitorDataExtended *mdep;
3705 ASSERT(mdp->origin.flags & ERTS_ML_FLG_EXTENDED);
3706 mdep = (ErtsMonitorDataExtended *) mdp;
3707 ASSERT(mdep->dist);
3708 node = mdep->dist->nodename;
3709 }
3710 ASSERT(is_atom(node) && node != am_undefined);
3711 ERL_MESSAGE_FROM(mp) = node;
3712 break;
3713 default:
3714 ERTS_INTERNAL_ERROR("Unexpected monitor type");
3715 type = am_undefined;
3716 ERL_MESSAGE_FROM(mp) = am_undefined;
3717 break;
3718 }
3719
3720 if (!(mdp->origin.flags & ERTS_ML_FLG_TAG))
3721 tag = am_DOWN;
3722 else {
3723 Eterm *tag_storage;
3724 if (mdp->origin.flags & ERTS_ML_FLG_EXTENDED)
3725 tag_storage = &((ErtsMonitorDataExtended *) mdp)->heap[0];
3726 else
3727 tag_storage = &((ErtsMonitorDataTagHeap *) mdp)->heap[0];
3728 tag = save_heap_frag_eterm(c_p, mp, tag_storage);
3729 }
3730
3731 ERL_MESSAGE_TERM(mp) = TUPLE5(hp, tag, ref,
3732 type, from, reason);
3733 hp += 6;
3734
3735 }
3736
3737 ERL_MESSAGE_TOKEN(mp) = am_undefined;
3738 /* Replace original signal with the exit message... */
3739 convert_to_msg(c_p, sig, mp, next_nm_sig);
3740
3741 cnt += 4;
3742
3743 erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
3744
3745 return cnt;
3746 }
3747
3748 static ERTS_INLINE int
convert_to_nodedown_messages(Process * c_p,ErtsMessage * sig,ErtsMonitorData * mdp,ErtsMessage *** next_nm_sig)3749 convert_to_nodedown_messages(Process *c_p,
3750 ErtsMessage *sig,
3751 ErtsMonitorData *mdp,
3752 ErtsMessage ***next_nm_sig)
3753 {
3754 int cnt = 1;
3755 Uint n;
3756 ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
3757
3758 ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
3759 == (mdp->u.target.flags & ERTS_ML_FLGS_SAME));
3760 ASSERT(mdp->origin.flags & ERTS_ML_FLG_EXTENDED);
3761
3762 n = mdep->u.refc;
3763
3764 if (n == 0)
3765 remove_nm_sig(c_p, sig, next_nm_sig);
3766 else {
3767 Uint i;
3768 ErtsMessage *nd_first = NULL;
3769 ErtsMessage *nd_last = NULL;
3770 ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
3771 Eterm node = mdep->dist->nodename;
3772
3773 ASSERT(is_atom(node));
3774 ASSERT(n > 0);
3775
3776 for (i = 0; i < n; i++) {
3777 ErtsMessage *mp;
3778 ErlOffHeap *ohp;
3779 Eterm *hp;
3780
3781 mp = erts_alloc_message_heap(c_p, &locks, 3, &hp, &ohp);
3782
3783 ERL_MESSAGE_TERM(mp) = TUPLE2(hp, am_nodedown, node);
3784 ERL_MESSAGE_FROM(mp) = am_system;
3785 ERL_MESSAGE_TOKEN(mp) = am_undefined;
3786 mp->next = nd_first;
3787 nd_first = mp;
3788 if (!nd_last)
3789 nd_last = mp;
3790 cnt++;
3791 }
3792
3793 if (locks != ERTS_PROC_LOCK_MAIN)
3794 erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
3795
3796 /* Replace signal with 'nodedown' messages */
3797 convert_to_msgs(c_p, sig, n, nd_first, nd_last, next_nm_sig);
3798
3799 erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
3800 }
3801 return cnt;
3802 }
3803
3804 static int
handle_nodedown(Process * c_p,ErtsMessage * sig,ErtsMonitorData * mdp,ErtsMessage *** next_nm_sig)3805 handle_nodedown(Process *c_p,
3806 ErtsMessage *sig,
3807 ErtsMonitorData *mdp,
3808 ErtsMessage ***next_nm_sig)
3809 {
3810 ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
3811 ErtsMonitor *omon = &mdp->origin;
3812 int not_in_subtab = !(omon->flags & ERTS_ML_FLG_IN_SUBTABLE);
3813 int cnt = 1;
3814
3815 ASSERT(erts_monitor_is_in_table(omon));
3816
3817 if (not_in_subtab & !mdep->uptr.node_monitors)
3818 erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), omon);
3819 else if (not_in_subtab) {
3820 ErtsMonitor *sub_mon;
3821 ErtsMonitorDataExtended *sub_mdep;
3822 sub_mon = erts_monitor_list_last(mdep->uptr.node_monitors);
3823 ASSERT(sub_mon);
3824 erts_monitor_list_delete(&mdep->uptr.node_monitors, sub_mon);
3825 sub_mon->flags &= ~ERTS_ML_FLG_IN_SUBTABLE;
3826 sub_mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(sub_mon);
3827 ASSERT(!sub_mdep->uptr.node_monitors);
3828 sub_mdep->uptr.node_monitors = mdep->uptr.node_monitors;
3829 mdep->uptr.node_monitors = NULL;
3830 erts_monitor_tree_replace(&ERTS_P_MONITORS(c_p), omon, sub_mon);
3831 cnt += 2;
3832 }
3833 else {
3834 ErtsMonitorDataExtended *top_mdep;
3835 ErtsMonitor *top_mon;
3836 ASSERT(is_atom(omon->other.item));
3837 ASSERT(!mdep->uptr.node_monitors);
3838 top_mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p),
3839 omon->other.item);
3840 ASSERT(top_mon);
3841 top_mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(top_mon);
3842 ASSERT(top_mdep->uptr.node_monitors);
3843 erts_monitor_list_delete(&top_mdep->uptr.node_monitors, omon);
3844 omon->flags &= ~ERTS_ML_FLG_IN_SUBTABLE;
3845 cnt += 3;
3846 }
3847
3848 return cnt + convert_to_nodedown_messages(c_p, sig, mdp, next_nm_sig);
3849 }
3850
3851 static void
handle_persistent_mon_msg(Process * c_p,Uint16 type,ErtsMonitor * mon,ErtsMessage * sig,Eterm msg,ErtsMessage *** next_nm_sig)3852 handle_persistent_mon_msg(Process *c_p, Uint16 type,
3853 ErtsMonitor *mon, ErtsMessage *sig,
3854 Eterm msg, ErtsMessage ***next_nm_sig)
3855 {
3856 convert_prepared_sig_to_msg(c_p, sig, msg, next_nm_sig);
3857
3858 switch (type) {
3859
3860 case ERTS_MON_TYPE_TIME_OFFSET:
3861 ASSERT(mon->type == ERTS_MON_TYPE_TIME_OFFSET);
3862 if (mon->flags & ERTS_ML_FLG_TAG) {
3863 ErtsMonitorData *mdp = erts_monitor_to_data(mon);
3864 Eterm *tpl, tag_storage;
3865 ASSERT(is_tuple_arity(msg, 5));
3866 tpl = tuple_val(msg);
3867 ASSERT(tpl[1] == am_CHANGE);
3868 if (mon->flags & ERTS_ML_FLG_EXTENDED)
3869 tag_storage = ((ErtsMonitorDataExtended *) mdp)->heap[0];
3870 else
3871 tag_storage = ((ErtsMonitorDataTagHeap *) mdp)->heap[0];
3872 tpl[1] = copy_heap_frag_eterm(c_p, sig, tag_storage);
3873 }
3874
3875 break;
3876
3877 case ERTS_MON_TYPE_NODES: {
3878 ErtsMonitorDataExtended *mdep;
3879 Uint n;
3880 ASSERT(mon->type == ERTS_MON_TYPE_NODES);
3881 mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
3882 ERTS_ML_ASSERT(mdep->u.refc > 0);
3883 n = mdep->u.refc;
3884 n--;
3885 if (n > 0) {
3886 ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
3887 ErtsMessage *first = NULL, *prev, *last;
3888 Uint hsz = size_object(msg);
3889 Uint i;
3890
3891 for (i = 0; i < n; i++) {
3892 Eterm *hp;
3893 ErlOffHeap *ohp;
3894
3895 last = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
3896
3897 if (!first)
3898 first = last;
3899 else
3900 prev->next = last;
3901 prev = last;
3902
3903 ERL_MESSAGE_TERM(last) = copy_struct(msg, hsz, &hp, ohp);
3904
3905 #ifdef USE_VM_PROBES
3906 ASSERT(is_immed(ERL_MESSAGE_DT_UTAG(sig)));
3907 ERL_MESSAGE_DT_UTAG(last) = ERL_MESSAGE_DT_UTAG(sig);
3908 #endif
3909 ASSERT(is_immed(ERL_MESSAGE_TOKEN(sig)));
3910 ERL_MESSAGE_TOKEN(last) = ERL_MESSAGE_TOKEN(sig);
3911 ASSERT(is_immed(ERL_MESSAGE_FROM(sig)));
3912 ERL_MESSAGE_FROM(last) = ERL_MESSAGE_FROM(sig);
3913
3914 }
3915 if (locks != ERTS_PROC_LOCK_MAIN)
3916 erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
3917 insert_messages(c_p, &sig->next, first, last, n, next_nm_sig);
3918 }
3919 break;
3920 }
3921
3922 default:
3923 ERTS_INTERNAL_ERROR("Invalid type");
3924 break;
3925 }
3926
3927 erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
3928 }
3929
3930 static void
group_leader_reply(Process * c_p,Eterm to,Eterm ref,int success)3931 group_leader_reply(Process *c_p, Eterm to, Eterm ref, int success)
3932 {
3933 Process *rp = erts_proc_lookup(to);
3934
3935 if (rp) {
3936 ErtsProcLocks locks;
3937 Uint sz;
3938 Eterm *hp, msg, ref_cpy, result;
3939 ErlOffHeap *ohp;
3940 ErtsMessage *mp;
3941
3942 ASSERT(is_internal_ref(ref));
3943
3944 locks = c_p == rp ? ERTS_PROC_LOCK_MAIN : 0;
3945 sz = size_object(ref);
3946
3947 mp = erts_alloc_message_heap(rp, &locks, sz+3,
3948 &hp, &ohp);
3949
3950 ref_cpy = copy_struct(ref, sz, &hp, ohp);
3951 result = success ? am_true : am_badarg;
3952 msg = TUPLE2(hp, ref_cpy, result);
3953
3954 erts_queue_message(rp, locks, mp, msg, am_system);
3955
3956 if (c_p == rp)
3957 locks &= ~ERTS_PROC_LOCK_MAIN;
3958
3959 if (locks)
3960 erts_proc_unlock(rp, locks);
3961 }
3962 }
3963
3964 static void
handle_group_leader(Process * c_p,ErtsSigGroupLeader * sgl)3965 handle_group_leader(Process *c_p, ErtsSigGroupLeader *sgl)
3966 {
3967 erts_aint_t flags;
3968
3969 flags = erts_atomic_read_band_nob(&sgl->flags, ~ERTS_SIG_GL_FLG_ACTIVE);
3970 if (flags & ERTS_SIG_GL_FLG_ACTIVE) {
3971 int res = erts_set_group_leader(c_p, sgl->group_leader);
3972 if (is_internal_pid(sgl->reply_to))
3973 group_leader_reply(c_p, sgl->reply_to, sgl->ref, res);
3974 }
3975
3976 flags = erts_atomic_read_band_nob(&sgl->flags, ~ERTS_SIG_GL_FLG_RECEIVER);
3977 if ((flags & ~ERTS_SIG_GL_FLG_RECEIVER) == 0)
3978 destroy_sig_group_leader(sgl);
3979 }
3980
3981 static void
check_push_msgq_len_offs_marker(Process * rp,ErtsSignal * sig)3982 check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig)
3983 {
3984 ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
3985
3986 ASSERT(ERTS_PROC_SIG_OP(sig->common.tag) == ERTS_SIG_Q_OP_PROCESS_INFO);
3987
3988 if (pisig->msgq_len_offset == ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC) {
3989 ErtsProcSigMsgQLenOffsetMarker *mrkr;
3990 Sint len, msgq_len_offset;
3991 ErtsMessage *first = rp->sig_inq.first;
3992 ASSERT(first);
3993 if (((ErtsSignal *) first)->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK)
3994 mrkr = (ErtsProcSigMsgQLenOffsetMarker *) first;
3995 else {
3996 mrkr = &pisig->marker;
3997
3998 ASSERT(mrkr->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
3999
4000 mrkr->common.next = first;
4001 ASSERT(rp->sig_inq.last != &rp->sig_inq.first);
4002 if (rp->sig_inq.nmsigs.next == &rp->sig_inq.first)
4003 rp->sig_inq.nmsigs.next = &mrkr->common.next;
4004 if (rp->sig_inq.nmsigs.last == &rp->sig_inq.first)
4005 rp->sig_inq.nmsigs.last = &mrkr->common.next;
4006 rp->sig_inq.first = (ErtsMessage *) mrkr;
4007 }
4008
4009 len = rp->sig_inq.len;
4010 msgq_len_offset = len - mrkr->len_offset;
4011
4012 mrkr->len_offset = len;
4013 mrkr->refc++;
4014
4015 pisig->msgq_len_offset = msgq_len_offset;
4016
4017 #ifdef DEBUG
4018 /* save pointer to used marker... */
4019 pisig->marker.common.specific.attachment = (void *) mrkr;
4020 #endif
4021
4022 }
4023 }
4024
4025 static void
destroy_process_info_request(Process * c_p,ErtsProcessInfoSig * pisig)4026 destroy_process_info_request(Process *c_p, ErtsProcessInfoSig *pisig)
4027 {
4028 int dealloc_pisig = !0;
4029
4030 if (pisig->msgq_len_offset != ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE) {
4031 Sint refc;
4032 int dealloc_marker = 0;
4033 ErtsProcSigMsgQLenOffsetMarker *marker;
4034 #ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
4035 Sint delayed_len;
4036 #endif
4037
4038 ASSERT(pisig->msgq_len_offset >= 0);
4039
4040 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
4041 marker = (ErtsProcSigMsgQLenOffsetMarker *) c_p->sig_inq.first;
4042 ASSERT(marker);
4043 ASSERT(marker->refc > 0);
4044 ASSERT(pisig->marker.common.specific.attachment == (void *) marker);
4045
4046 marker->delayed_len -= pisig->msgq_len_offset;
4047 #ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
4048 delayed_len = marker->delayed_len;
4049 #endif
4050
4051 refc = --marker->refc;
4052 if (refc) {
4053 if (marker == &pisig->marker) {
4054 /* Another signal using our marker... */
4055 dealloc_pisig = 0;
4056 }
4057 }
4058 else {
4059 /* Marker unused; remove it... */
4060 ASSERT(marker->delayed_len + marker->len_offset == 0);
4061 #ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
4062 delayed_len += marker->len_offset;
4063 #endif
4064 if (marker != &pisig->marker)
4065 dealloc_marker = !0; /* used another signals marker... */
4066 c_p->sig_inq.first = marker->common.next;
4067 if (c_p->sig_inq.last == &marker->common.next)
4068 c_p->sig_inq.last = &c_p->sig_inq.first;
4069 if (c_p->sig_inq.nmsigs.next == &marker->common.next)
4070 c_p->sig_inq.nmsigs.next = &c_p->sig_inq.first;
4071 if (c_p->sig_inq.nmsigs.last == &marker->common.next)
4072 c_p->sig_inq.nmsigs.last = &c_p->sig_inq.first;
4073 }
4074 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
4075
4076 if (!refc) {
4077 c_p->sig_qs.flags &= ~FS_DELAYED_PSIGQS_LEN;
4078 /* Adjust msg len of inner+middle queue */
4079 ASSERT(marker->len_offset <= 0);
4080 c_p->sig_qs.len -= marker->len_offset;
4081
4082 ASSERT(c_p->sig_qs.len >= 0);
4083 }
4084
4085 #ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
4086 {
4087 Sint len = 0;
4088 ERTS_FOREACH_SIG_PRIVQS(
4089 c_p, mp,
4090 {
4091 if (ERTS_SIG_IS_MSG(mp))
4092 len++;
4093 });
4094 ERTS_ASSERT(c_p->sig_qs.len + delayed_len == len);
4095 }
4096 #endif
4097
4098
4099 if (dealloc_marker) {
4100 ErtsProcessInfoSig *pisig2
4101 = (ErtsProcessInfoSig *) (((char *) marker)
4102 - offsetof(ErtsProcessInfoSig,
4103 marker));
4104 erts_free(ERTS_ALC_T_SIG_DATA, pisig2);
4105 }
4106 }
4107
4108 if (dealloc_pisig)
4109 erts_free(ERTS_ALC_T_SIG_DATA, pisig);
4110 }
4111
4112 static int
handle_process_info(Process * c_p,ErtsSigRecvTracing * tracing,ErtsMessage * sig,ErtsMessage *** next_nm_sig,int is_alive)4113 handle_process_info(Process *c_p, ErtsSigRecvTracing *tracing,
4114 ErtsMessage *sig, ErtsMessage ***next_nm_sig,
4115 int is_alive)
4116 {
4117 ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
4118 Uint reds = 0;
4119 Process *rp;
4120
4121 ASSERT(!!is_alive == !(erts_atomic32_read_nob(&c_p->state)
4122 & ERTS_PSFLG_EXITING));
4123
4124 if (pisig->msgq_len_offset != ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE) {
4125 /*
4126 * Request requires message queue data to be updated
4127 * before inspection...
4128 */
4129
4130 ASSERT(pisig->msgq_len_offset >= 0);
4131 /*
4132 * Update sig_qs.len to reflect the length
4133 * of the message queue...
4134 */
4135 c_p->sig_qs.len += pisig->msgq_len_offset;
4136
4137 if (is_alive) {
4138 /*
4139 * Move messages part of message queue into inner
4140 * signal queue...
4141 */
4142 ASSERT(tracing);
4143
4144 if (*next_nm_sig != &c_p->sig_qs.cont) {
4145 if (c_p->sig_qs.save == &c_p->sig_qs.cont)
4146 c_p->sig_qs.save = c_p->sig_qs.last;
4147 if (ERTS_SIG_IS_RECV_MARKER(c_p->sig_qs.cont)) {
4148 ErtsRecvMarker *markp = (ErtsRecvMarker *) c_p->sig_qs.cont;
4149 markp->prev_next = c_p->sig_qs.last;
4150 }
4151 if (*next_nm_sig == tracing->messages.next)
4152 tracing->messages.next = &c_p->sig_qs.cont;
4153 *c_p->sig_qs.last = c_p->sig_qs.cont;
4154 c_p->sig_qs.last = *next_nm_sig;
4155
4156 c_p->sig_qs.cont = **next_nm_sig;
4157 if (c_p->sig_qs.nmsigs.last == *next_nm_sig)
4158 c_p->sig_qs.nmsigs.last = &c_p->sig_qs.cont;
4159 *next_nm_sig = &c_p->sig_qs.cont;
4160 *c_p->sig_qs.last = NULL;
4161 }
4162
4163 #ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
4164 {
4165 Sint len;
4166 ErtsMessage *mp;
4167 for (mp = c_p->sig_qs.first, len = 0; mp; mp = mp->next) {
4168 ERTS_ASSERT(ERTS_SIG_IS_MSG(mp));
4169 len++;
4170 }
4171 ERTS_ASSERT(c_p->sig_qs.len == len);
4172 }
4173 #endif
4174 }
4175 }
4176 if (is_alive) {
4177 if (!pisig->common.specific.next) {
4178 /*
4179 * No more signals in middle queue...
4180 *
4181 * Process-info 'status' needs sig-q
4182 * process flag to be updated in order
4183 * to show accurate result...
4184 */
4185 erts_atomic32_read_band_nob(&c_p->state,
4186 ~ERTS_PSFLG_SIG_Q);
4187 }
4188 remove_nm_sig(c_p, sig, next_nm_sig);
4189 }
4190
4191 rp = erts_proc_lookup(pisig->requester);
4192 ASSERT(c_p != rp);
4193 if (rp) {
4194 Eterm msg, res, ref, *hp;
4195 ErtsProcLocks locks = 0;
4196 ErtsHeapFactory hfact;
4197 ErtsMessage *mp;
4198 Uint reserve_size = 3 + sizeof(pisig->oref_thing)/sizeof(Eterm);
4199
4200 if (!is_alive) {
4201 ErlOffHeap *ohp;
4202 mp = erts_alloc_message_heap(rp, &locks, reserve_size, &hp, &ohp);
4203 res = am_undefined;
4204 }
4205 else {
4206 ErlHeapFragment *hfrag;
4207
4208 reserve_size += pisig->reserve_size;
4209
4210 mp = erts_alloc_message(0, NULL);
4211 hfrag = new_message_buffer(reserve_size);
4212 mp->data.heap_frag = hfrag;
4213 erts_factory_selfcontained_message_init(&hfact, mp, &hfrag->mem[0]);
4214
4215 res = erts_process_info(c_p, &hfact, c_p, ERTS_PROC_LOCK_MAIN,
4216 pisig->item_ix, pisig->len,
4217 pisig->flags, reserve_size, &reds);
4218
4219 hp = erts_produce_heap(&hfact,
4220 3 + sizeof(pisig->oref_thing)/sizeof(Eterm),
4221 0);
4222 }
4223
4224 sys_memcpy((void *) hp, (void *) &pisig->oref_thing,
4225 sizeof(pisig->oref_thing));
4226 ref = make_internal_ref(hp);
4227 hp += sizeof(pisig->oref_thing)/sizeof(Eterm);
4228
4229 msg = TUPLE2(hp, ref, res);
4230
4231 if (is_alive)
4232 erts_factory_trim_and_close(&hfact, &msg, 1);
4233
4234 ERL_MESSAGE_TOKEN(mp) = am_undefined;
4235 erts_queue_proc_message(c_p, rp, locks, mp, msg);
4236
4237 if (!is_alive && locks)
4238 erts_proc_unlock(rp, locks);
4239 }
4240
4241 destroy_process_info_request(c_p, pisig);
4242
4243 if (reds > INT_MAX/8)
4244 reds = INT_MAX/8;
4245
4246 return ((int) reds)*4 + 8;
4247 }
4248
4249 static void
handle_suspend(Process * c_p,ErtsMonitor * mon,int * yieldp)4250 handle_suspend(Process *c_p, ErtsMonitor *mon, int *yieldp)
4251 {
4252 erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
4253
4254 ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
4255
4256 if (!(state & ERTS_PSFLG_DIRTY_RUNNING)) {
4257 ErtsMonitorSuspend *msp;
4258 erts_aint_t mstate;
4259
4260 msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
4261 mstate = erts_atomic_read_bor_acqb(&msp->state,
4262 ERTS_MSUSPEND_STATE_FLG_ACTIVE);
4263 ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
4264 erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
4265 *yieldp = !0;
4266 }
4267 else {
4268 /* Executing dirty; delay suspend... */
4269 ErtsProcSigPendingSuspend *psusp;
4270 ErtsMonitorSuspend *msp;
4271
4272 psusp = ERTS_PROC_GET_PENDING_SUSPEND(c_p);
4273 if (!psusp) {
4274 psusp = erts_alloc(ERTS_ALC_T_SIG_DATA,
4275 sizeof(ErtsProcSigPendingSuspend));
4276 psusp->mon = NULL;
4277 psusp->sync = NULL;
4278 ERTS_PROC_SET_PENDING_SUSPEND(c_p, (void *) psusp);
4279 }
4280
4281 msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
4282
4283 msp->next = psusp->mon;
4284 psusp->mon = msp;
4285
4286 erts_atomic32_inc_nob(&msp->md.refc);
4287 }
4288 }
4289
4290 static void
sync_suspend_reply(Process * c_p,ErtsMessage * mp,erts_aint32_t state)4291 sync_suspend_reply(Process *c_p, ErtsMessage *mp, erts_aint32_t state)
4292 {
4293 /*
4294 * Sender prepared the message for us. Just patch
4295 * the result if necessary. The default prepared
4296 * result is 'false'.
4297 */
4298 Process *rp;
4299 ErtsSyncSuspendRequest *ssusp;
4300
4301 ssusp = (ErtsSyncSuspendRequest *) (char *) (&mp->hfrag.mem[0]
4302 + mp->hfrag.used_size);
4303
4304 ASSERT(ERTS_SIG_IS_NON_MSG(mp));
4305 ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) mp)->common.tag)
4306 == ERTS_SIG_Q_OP_SYNC_SUSPEND);
4307 ASSERT(mp->hfrag.alloc_size > mp->hfrag.used_size);
4308 ASSERT((mp->hfrag.alloc_size - mp->hfrag.used_size)*sizeof(UWord)
4309 >= sizeof(ErtsSyncSuspendRequest));
4310 ASSERT(is_internal_pid(ssusp->requester));
4311 ASSERT(ssusp->requester != c_p->common.id);
4312 ASSERT(is_tuple_arity(ssusp->message, 2));
4313 ASSERT(is_immed(tuple_val(ssusp->message)[2]));
4314
4315 ERL_MESSAGE_TERM(mp) = ssusp->message;
4316 mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
4317 mp->next = NULL;
4318
4319 rp = erts_proc_lookup(ssusp->requester);
4320 if (!rp)
4321 erts_cleanup_messages(mp);
4322 else {
4323 if ((state & (ERTS_PSFLG_EXITING
4324 | ERTS_PSFLG_SUSPENDED)) != ERTS_PSFLG_SUSPENDED) {
4325 /* Not suspended -> patch result... */
4326 if (state & ERTS_PSFLG_EXITING) {
4327 Eterm *tp = tuple_val(ssusp->message);
4328 tp[2] = ssusp->async ? am_exited : am_badarg;
4329 }
4330 else {
4331 Eterm *tp = tuple_val(ssusp->message);
4332 ASSERT(!(state & ERTS_PSFLG_SUSPENDED));
4333 tp[2] = ssusp->async ? am_not_suspended : am_internal_error;
4334 }
4335 }
4336 ERL_MESSAGE_TOKEN(mp) = am_undefined;
4337 erts_queue_proc_message(c_p, rp, 0, mp, ssusp->message);
4338 }
4339 }
4340
4341 static void
handle_sync_suspend(Process * c_p,ErtsMessage * mp)4342 handle_sync_suspend(Process *c_p, ErtsMessage *mp)
4343 {
4344 ErtsProcSigPendingSuspend *psusp;
4345
4346 psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
4347 if (!psusp)
4348 sync_suspend_reply(c_p, mp, erts_atomic32_read_nob(&c_p->state));
4349 else {
4350 mp->next = psusp->sync;
4351 psusp->sync = mp;
4352 }
4353 }
4354
4355 int
erts_proc_sig_decode_dist(Process * proc,ErtsProcLocks proc_locks,ErtsMessage * msgp,int force_off_heap)4356 erts_proc_sig_decode_dist(Process *proc, ErtsProcLocks proc_locks,
4357 ErtsMessage *msgp, int force_off_heap)
4358 {
4359 ErtsHeapFactory factory;
4360 ErlHeapFragment *hfrag;
4361 Eterm msg;
4362 Sint need;
4363 ErtsDistExternal *edep;
4364 ErtsExitSignalData *xsigd = NULL;
4365
4366 edep = erts_proc_sig_get_external(msgp);
4367 if (!ERTS_SIG_IS_EXTERNAL_MSG(msgp))
4368 xsigd = get_exit_signal_data(msgp);
4369
4370 if (edep->heap_size >= 0)
4371 need = edep->heap_size;
4372 else {
4373 need = erts_decode_dist_ext_size(edep, 1, 1);
4374 if (need < 0) {
4375 /* bad signal; remove it... */
4376 return 0;
4377 }
4378
4379 edep->heap_size = need;
4380 }
4381
4382 if (ERTS_SIG_IS_NON_MSG(msgp)) {
4383 switch (ERTS_PROC_SIG_OP(ERL_MESSAGE_TERM(msgp))) {
4384 case ERTS_SIG_Q_OP_EXIT:
4385 case ERTS_SIG_Q_OP_EXIT_LINKED:
4386 /* {'EXIT', From, Reason} */
4387 need += 4;
4388 break;
4389 case ERTS_SIG_Q_OP_MONITOR_DOWN:
4390 /* {'DOWN', Ref, process, From, Reason} */
4391 need += 6; /* 5-tuple */
4392 break;
4393 default:
4394 ERTS_INTERNAL_ERROR("Invalid exit signal op");
4395 break;
4396 }
4397 }
4398
4399 hfrag = new_message_buffer(need);
4400 erts_factory_heap_frag_init(&factory, hfrag);
4401
4402 ASSERT(edep->heap_size >= 0);
4403
4404 msg = erts_decode_dist_ext(&factory, edep, 1);
4405
4406 if (is_non_value(msg)) {
4407 erts_factory_undo(&factory);
4408 return 0;
4409 }
4410
4411 if (ERTS_SIG_IS_MSG(msgp)) {
4412 ERL_MESSAGE_TERM(msgp) = msg;
4413 if (msgp->data.heap_frag == &msgp->hfrag)
4414 msgp->data.heap_frag = ERTS_MSG_COMBINED_HFRAG;
4415 } else {
4416 switch (ERTS_PROC_SIG_OP(ERL_MESSAGE_TERM(msgp))) {
4417 case ERTS_SIG_Q_OP_EXIT:
4418 case ERTS_SIG_Q_OP_EXIT_LINKED:
4419 /* {'EXIT', From, Reason} */
4420 erts_reserve_heap(&factory, 4);
4421 xsigd->message = TUPLE3(factory.hp, am_EXIT, xsigd->from, msg);
4422 factory.hp += 4;
4423 break;
4424 case ERTS_SIG_Q_OP_MONITOR_DOWN:
4425 /* {'DOWN', Ref, process, From, Reason} */
4426 erts_reserve_heap(&factory, 6);
4427 xsigd->message = TUPLE5(factory.hp, am_DOWN, xsigd->u.ref, am_process, xsigd->from, msg);
4428 factory.hp += 6;
4429 break;
4430 }
4431 xsigd->reason = msg;
4432 }
4433
4434 erts_free_dist_ext_copy(edep);
4435
4436 erts_factory_close(&factory);
4437
4438 hfrag = factory.heap_frags;
4439 while (hfrag->next)
4440 hfrag = hfrag->next;
4441
4442 if (ERTS_SIG_IS_MSG(msgp) && msgp->data.heap_frag != ERTS_MSG_COMBINED_HFRAG) {
4443 hfrag->next = msgp->data.heap_frag;
4444 msgp->data.heap_frag = factory.heap_frags;
4445 } else {
4446 hfrag->next = msgp->hfrag.next;
4447 msgp->hfrag.next = factory.heap_frags;
4448 }
4449
4450 return 1;
4451 }
4452
4453 void
erts_proc_sig_handle_pending_suspend(Process * c_p)4454 erts_proc_sig_handle_pending_suspend(Process *c_p)
4455 {
4456 ErtsMonitorSuspend *msp;
4457 ErtsMessage *sync;
4458 ErtsProcSigPendingSuspend *psusp;
4459 erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
4460
4461 psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
4462
4463 msp = psusp->mon;
4464
4465 while (msp) {
4466 ErtsMonitorSuspend *next_msp = msp->next;
4467 msp->next = NULL;
4468 if (!(state & ERTS_PSFLG_EXITING)
4469 && erts_monitor_is_in_table(&msp->md.u.target)) {
4470 erts_aint_t mstate;
4471
4472 mstate = erts_atomic_read_bor_acqb(&msp->state,
4473 ERTS_MSUSPEND_STATE_FLG_ACTIVE);
4474 ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
4475 erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
4476 }
4477
4478 erts_monitor_release(&msp->md.u.target);
4479
4480 msp = next_msp;
4481 }
4482
4483 sync = psusp->sync;
4484
4485 while (sync) {
4486 ErtsMessage *next_sync = sync->next;
4487 sync->next = NULL;
4488 sync_suspend_reply(c_p, sync, state);
4489 sync = next_sync;
4490 }
4491
4492 erts_free(ERTS_ALC_T_SIG_DATA, psusp);
4493
4494 ERTS_PROC_SET_PENDING_SUSPEND(c_p, NULL);
4495 }
4496
4497 static int
handle_dist_spawn_reply(Process * c_p,ErtsSigRecvTracing * tracing,ErtsMessage * sig,ErtsMessage *** next_nm_sig)4498 handle_dist_spawn_reply(Process *c_p, ErtsSigRecvTracing *tracing,
4499 ErtsMessage *sig, ErtsMessage ***next_nm_sig)
4500 {
4501 ErtsDistSpawnReplySigData *datap = get_dist_spawn_reply_data(sig);
4502 ErtsMonitorDataExtended *mdep;
4503 Eterm msg = datap->message;
4504 Eterm result = datap->result;
4505 ErtsMonitor *omon;
4506 int adjust_monitor;
4507 ErlHeapFragment *tag_hfrag;
4508 int convert_to_message = !0;
4509 int cnt = 1;
4510
4511 ASSERT(is_atom(result) || is_external_pid(result));
4512 ASSERT(is_atom(result) || size_object(result) == EXTERNAL_PID_HEAP_SIZE);
4513
4514 omon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), datap->ref);
4515
4516 if (!omon || !(omon->flags & ERTS_ML_FLG_SPAWN_PENDING)) {
4517 /* Stale reply; remove link that was setup... */
4518 ErtsLink *lnk = datap->link;
4519 if (lnk) {
4520 ErtsELink *elnk;
4521 ErtsLink *dlnk = erts_link_to_other(lnk, &elnk);
4522 if (erts_link_dist_delete(dlnk))
4523 erts_link_release_both(&elnk->ld);
4524 else
4525 erts_link_release(lnk);
4526 }
4527 remove_nm_sig(c_p, sig, next_nm_sig);
4528 sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
4529 ERL_MESSAGE_TERM(sig) = msg;
4530 sig->next = NULL;;
4531 erts_cleanup_messages(sig);
4532 return ++cnt;
4533 }
4534
4535 mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(omon);
4536
4537 #ifdef DEBUG
4538 {
4539 Eterm *tp;
4540 int i, start, stop;
4541 ASSERT(erts_monitor_is_in_table(omon));
4542 ASSERT(omon->flags & ERTS_ML_FLG_SPAWN_PENDING);
4543 if (is_atom(result)) {
4544 ASSERT(!datap->link);
4545 }
4546 else {
4547 ASSERT(!datap->link || (omon->flags & ERTS_ML_FLG_SPAWN_LINK));
4548 ASSERT(!(omon->flags & ERTS_ML_FLG_SPAWN_LINK) || datap->link);
4549 }
4550 ASSERT(omon->other.item == am_pending);
4551 ASSERT(is_tuple_arity(datap->message, 4));
4552 tp = tuple_val(datap->message);
4553 ASSERT(tp[1] == am_undefined); /* patch point */
4554 ASSERT(is_internal_ref(tp[2]));
4555 ASSERT((tp[3] == am_ok && is_external_pid(tp[4]))
4556 || (tp[3] == am_error && is_atom(tp[4])));
4557 start = 0;
4558 stop = EXTERNAL_PID_HEAP_SIZE;
4559 if (omon->flags & ERTS_ML_FLG_TAG) {
4560 start++;
4561 stop++;
4562 }
4563 for (i = start; i < stop; i++) {
4564 ASSERT(is_non_value(mdep->heap[i]));
4565 }
4566 }
4567 #endif
4568
4569 /*
4570 * The tag to patch into the resulting message
4571 * is stored in mdep->u.name via a little trick
4572 * (see pending_flag in erts_monitor_create()).
4573 */
4574 *datap->patch_point = get_heap_frag_eterm(&tag_hfrag, &mdep->u.name);
4575 /*
4576 * get_heap_frag_eterm() above will also write
4577 * NIL to mdep->u.name, restoring it to a normal
4578 * monitor...
4579 */
4580
4581 if (is_atom(result)) { /* Spawn error; cleanup... */
4582 /* Dist code should not have created a link on failure... */
4583
4584 ASSERT(is_not_atom(result) || !datap->link);
4585 /* delete monitor structure... */
4586 adjust_monitor = 0;
4587 if (omon->flags & (ERTS_ML_FLG_SPAWN_ABANDONED
4588 | ERTS_ML_FLG_SPAWN_NO_EMSG))
4589 convert_to_message = 0;
4590 }
4591 else if (omon->flags & ERTS_ML_FLG_SPAWN_ABANDONED) {
4592 /*
4593 * Spawn operation has been abandoned and
4594 * link option was passed. Send exit signal
4595 * with exit reason 'abandoned'...
4596 */
4597 DistEntry *dep;
4598 ErtsMonLnkDist *dist;
4599 ErtsMonitorDataExtended *mdep;
4600 ErtsLink *lnk;
4601
4602 mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(omon);
4603 dist = mdep->dist;
4604
4605 ASSERT(omon->flags & ERTS_ML_FLG_SPAWN_LINK);
4606
4607 lnk = datap->link;
4608 if (lnk) {
4609 ErtsELink *elnk;
4610 ErtsLink *dlnk;
4611 dlnk = erts_link_to_other(lnk, &elnk);
4612 if (erts_link_dist_delete(dlnk))
4613 erts_link_release_both(&elnk->ld);
4614 else
4615 erts_link_release(lnk);
4616 }
4617
4618 ASSERT(is_external_pid(result));
4619 dep = external_pid_dist_entry(result);
4620
4621 if (dep != erts_this_dist_entry && dist->nodename == dep->sysname) {
4622 ErtsDSigSendContext ctx;
4623 int code = erts_dsig_prepare(&ctx, dep, NULL, 0,
4624 ERTS_DSP_NO_LOCK, 1, 1, 0);
4625 switch (code) {
4626 case ERTS_DSIG_PREP_CONNECTED:
4627 case ERTS_DSIG_PREP_PENDING:
4628 if (dist->connection_id == ctx.connection_id) {
4629 code = erts_dsig_send_exit_tt(&ctx,
4630 c_p,
4631 result,
4632 am_abandoned,
4633 SEQ_TRACE_TOKEN(c_p));
4634 ASSERT(code == ERTS_DSIG_SEND_OK);
4635 }
4636 break;
4637 default:
4638 break;
4639 }
4640 }
4641 /* delete monitor structure... */
4642 adjust_monitor = 0;
4643 /* drop message... */
4644 convert_to_message = 0;
4645 }
4646 else {
4647 /* Success... */
4648 ASSERT(is_external_pid(result));
4649
4650 if (omon->flags & ERTS_ML_FLG_SPAWN_NO_SMSG)
4651 convert_to_message = 0;
4652
4653 if (datap->link) {
4654 cnt++;
4655 erts_link_tree_insert(&ERTS_P_LINKS(c_p), datap->link);
4656 if (tracing->procs)
4657 linking(c_p, result);
4658 }
4659
4660 adjust_monitor = !!(omon->flags & ERTS_ML_FLG_SPAWN_MONITOR);
4661 if (adjust_monitor) {
4662 /*
4663 * Insert the actual pid of spawned process
4664 * in origin part of monitor...
4665 */
4666 ErlOffHeap oh;
4667 ErtsMonitorDataExtended *mdep;
4668 Eterm *hp;
4669 mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(omon);
4670 hp = &(mdep)->heap[(omon->flags & ERTS_ML_FLG_TAG) ? 1 : 0];
4671 omon->flags &= ~ERTS_ML_FLGS_SPAWN;
4672 ERTS_INIT_OFF_HEAP(&oh);
4673 oh.first = mdep->uptr.ohhp;
4674 omon->other.item = copy_struct(result,
4675 EXTERNAL_PID_HEAP_SIZE,
4676 &hp, &oh);
4677 mdep->uptr.ohhp = oh.first;
4678 cnt += 2;
4679 }
4680 }
4681
4682 if (!adjust_monitor) {
4683 /*
4684 * Delete monitor; either spawn error
4685 * or no monitor requested...
4686 */
4687 ErtsMonitorData *mdp = erts_monitor_to_data(omon);
4688
4689 omon->flags &= ~ERTS_ML_FLGS_SPAWN;
4690
4691 erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), omon);
4692
4693 if (erts_monitor_dist_delete(&mdp->u.target))
4694 erts_monitor_release_both(mdp);
4695 else
4696 erts_monitor_release(omon);
4697 cnt += 2;
4698 }
4699
4700 if (convert_to_message) {
4701 convert_prepared_sig_to_msg(c_p, sig, msg, next_nm_sig);
4702 if (tag_hfrag) {
4703 /* Save heap fragment of tag in message... */
4704 ASSERT(sig->data.attached == ERTS_MSG_COMBINED_HFRAG);
4705 tag_hfrag->next = sig->hfrag.next;
4706 sig->hfrag.next = tag_hfrag;
4707 }
4708 erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
4709 }
4710 else {
4711 remove_nm_sig(c_p, sig, next_nm_sig);
4712 sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
4713 ERL_MESSAGE_TERM(sig) = msg;
4714 sig->next = NULL;;
4715 erts_cleanup_messages(sig);
4716 if (tag_hfrag) {
4717 tag_hfrag->next = NULL;
4718 free_message_buffer(tag_hfrag);
4719 }
4720 }
4721 return cnt;
4722 }
4723
4724 static int
handle_dist_spawn_reply_exiting(Process * c_p,ErtsMessage * sig,ErtsMonitor ** pend_spawn_mon_pp,Eterm reason)4725 handle_dist_spawn_reply_exiting(Process *c_p,
4726 ErtsMessage *sig,
4727 ErtsMonitor **pend_spawn_mon_pp,
4728 Eterm reason)
4729 {
4730 ErtsDistSpawnReplySigData *datap = get_dist_spawn_reply_data(sig);
4731 Eterm result = datap->result;
4732 Eterm msg = datap->message;
4733 ErtsMonitorData *mdp;
4734 ErtsMonitor *omon;
4735 int cnt = 1;
4736
4737 ASSERT(is_atom(result) || is_external_pid(result));
4738 ASSERT(is_atom(result) || size_object(result) == EXTERNAL_PID_HEAP_SIZE);
4739
4740 omon = erts_monitor_tree_lookup(*pend_spawn_mon_pp, datap->ref);
4741 if (!omon) {
4742 /* May happen when connection concurrently close... */
4743 ErtsLink *lnk = datap->link;
4744 if (lnk) {
4745 ErtsELink *elnk;
4746 ErtsLink *dlnk = erts_link_to_other(lnk, &elnk);
4747 if (erts_link_dist_delete(dlnk))
4748 erts_link_release_both(&elnk->ld);
4749 else
4750 erts_link_release(lnk);
4751 }
4752 cnt++;
4753 }
4754 else {
4755 ASSERT(omon->flags & ERTS_ML_FLG_SPAWN_PENDING);
4756 ASSERT(!datap->link || is_external_pid(result));
4757
4758 erts_monitor_tree_delete(pend_spawn_mon_pp, omon);
4759 mdp = erts_monitor_to_data(omon);
4760
4761 if (!erts_dist_pend_spawn_exit_delete(&mdp->u.target))
4762 mdp = NULL; /* Connection closed/closing... */
4763 cnt++;
4764
4765 if (is_external_pid(result)) {
4766 if ((omon->flags & ERTS_ML_FLG_SPAWN_MONITOR) && mdp) {
4767 ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
4768 erts_proc_exit_dist_demonitor(c_p,
4769 external_pid_dist_entry(result),
4770 mdep->dist->connection_id,
4771 datap->ref,
4772 result);
4773 cnt++;
4774 }
4775 ASSERT(!datap->link || (omon->flags & ERTS_ML_FLG_SPAWN_LINK));
4776 ASSERT(!(omon->flags & ERTS_ML_FLG_SPAWN_LINK) || datap->link);
4777
4778 if (datap->link) {
4779 /* This link exit *should* have actual reason... */
4780 ErtsProcExitContext pectxt = {c_p, reason};
4781 /* unless operation has been abandoned... */
4782 if (omon->flags & ERTS_ML_FLG_SPAWN_ABANDONED)
4783 pectxt.reason = am_abandoned;
4784 erts_proc_exit_handle_link(datap->link, (void *) &pectxt, -1);
4785 cnt++;
4786 }
4787 }
4788 if (mdp)
4789 erts_monitor_release_both(mdp);
4790 else
4791 erts_monitor_release(omon);
4792 cnt++;
4793 }
4794 sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
4795 ERL_MESSAGE_TERM(sig) = msg;
4796 sig->next = NULL;
4797 erts_cleanup_messages(sig);
4798 cnt++;
4799 return cnt;
4800 }
4801
4802 static int
handle_alias_message(Process * c_p,ErtsMessage * sig,ErtsMessage *** next_nm_sig)4803 handle_alias_message(Process *c_p, ErtsMessage *sig, ErtsMessage ***next_nm_sig)
4804 {
4805 void *data_attached;
4806 Eterm from, alias, msg;
4807 ErtsMonitor *mon;
4808 Uint16 flags;
4809 int type, cnt = 0;
4810
4811 type = get_alias_msg_data(sig, &from, &alias, &msg, &data_attached);
4812
4813 ASSERT(is_internal_pid(from) || is_atom(from));
4814 ASSERT(is_internal_pid_ref(alias));
4815
4816 ERL_MESSAGE_FROM(sig) = from;
4817
4818 mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), alias);
4819 flags = mon ? mon->flags : (Uint16) 0;
4820 if (!(flags & ERTS_ML_STATE_ALIAS_MASK)
4821 | !!(flags & ERTS_ML_FLG_SPAWN_PENDING)) {
4822 /*
4823 * Not an alias (never has been, not anymore, or not yet);
4824 * drop message...
4825 */
4826 remove_nm_sig(c_p, sig, next_nm_sig);
4827 /* restored as message... */
4828 ERL_MESSAGE_TERM(sig) = msg;
4829 if (type == ERTS_SIG_Q_TYPE_DIST)
4830 sig->data.heap_frag = &sig->hfrag;
4831 else
4832 sig->data.attached = data_attached;
4833 sig->next = NULL;;
4834 erts_cleanup_messages(sig);
4835 return 2;
4836 }
4837
4838 if ((flags & ERTS_ML_STATE_ALIAS_MASK) == ERTS_ML_STATE_ALIAS_ONCE) {
4839 mon->flags &= ~ERTS_ML_STATE_ALIAS_MASK;
4840
4841 erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), mon);
4842
4843 erts_pid_ref_delete(alias);
4844
4845 switch (mon->type) {
4846 case ERTS_MON_TYPE_ALIAS:
4847 erts_monitor_release(mon);
4848 break;
4849 case ERTS_MON_TYPE_PROC:
4850 erts_proc_sig_send_demonitor(mon);
4851 break;
4852 case ERTS_MON_TYPE_DIST_PROC: {
4853 ErtsMonitorData *mdp;
4854 ErtsMonLnkDist *dist;
4855 DistEntry *dep;
4856 Eterm watched;
4857 mdp = erts_monitor_to_data(mon);
4858 dist = ((ErtsMonitorDataExtended *) mdp)->dist;
4859 ASSERT(dist);
4860 if (flags & ERTS_ML_FLG_NAME) {
4861 watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
4862 ASSERT(is_atom(watched));
4863 dep = erts_sysname_to_connected_dist_entry(dist->nodename);
4864 }
4865 else {
4866 watched = mon->other.item;
4867 ASSERT(is_external_pid(watched));
4868 dep = external_pid_dist_entry(watched);
4869 }
4870 erts_proc_exit_dist_demonitor(c_p, dep, dist->connection_id,
4871 mdp->ref, watched);
4872 if (!erts_monitor_dist_delete(&mdp->u.target))
4873 erts_monitor_release(mon);
4874 else
4875 erts_monitor_release_both(mdp);
4876 break;
4877 }
4878 case ERTS_MON_TYPE_TIME_OFFSET:
4879 erts_demonitor_time_offset(mon);
4880 break;
4881 case ERTS_MON_TYPE_PORT: {
4882 Port *prt;
4883 ASSERT(is_internal_port(mon->other.item));
4884 prt = erts_port_lookup(mon->other.item, ERTS_PORT_SFLGS_DEAD);
4885 if (!prt || erts_port_demonitor(c_p, prt, mon) == ERTS_PORT_OP_DROPPED)
4886 erts_monitor_release(mon);
4887 break;
4888 }
4889 default:
4890 break;
4891 }
4892 }
4893
4894 if (type != ERTS_SIG_Q_TYPE_DIST) {
4895 convert_prepared_sig_to_msg_attached(c_p, sig, msg,
4896 data_attached, next_nm_sig);
4897 cnt++;
4898 }
4899 else {
4900 /*
4901 * Convert to external message...
4902 *
4903 * See erts_proc_sig_send_dist_to_alias() for info on
4904 * how the signal was constructed...
4905 */
4906 if (sig->hfrag.alloc_size > 1) {
4907 convert_prepared_sig_to_external_msg(c_p, sig, next_nm_sig);
4908 cnt++;
4909 }
4910 else {
4911 /*
4912 * Fragmented message. Need to replace message
4913 * reference...
4914 */
4915 ErtsMessage *mp = erts_alloc_message(0, NULL);
4916 sys_memcpy((void *) &mp->m[0],
4917 (void *) &sig->m[0],
4918 ERL_MESSAGE_REF_ARRAY_SZ*sizeof(Eterm));
4919 ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
4920 ASSERT(sig->hfrag.next);
4921 mp->data.heap_frag = sig->hfrag.next;
4922
4923 /* Replace original signal with the external message... */
4924 convert_to_msg(c_p, sig, mp, next_nm_sig);
4925
4926 ERL_MESSAGE_TERM(sig) = NIL;
4927 sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
4928 sig->hfrag.next = NULL;
4929 sig->next = NULL;;
4930 erts_cleanup_messages(sig);
4931 cnt += 8;
4932 }
4933 }
4934 erts_proc_notify_new_message(c_p, ERTS_PROC_LOCK_MAIN);
4935 return cnt;
4936 }
4937
4938
4939 /*
4940 * Called in order to handle incoming signals.
4941 */
4942
4943 int
erts_proc_sig_handle_incoming(Process * c_p,erts_aint32_t * statep,int * redsp,int max_reds,int local_only)4944 erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
4945 int *redsp, int max_reds, int local_only)
4946 {
4947 Eterm tag;
4948 erts_aint32_t state;
4949 int yield, cnt, limit, abs_lim, msg_tracing, save_in_msgq;
4950 ErtsMessage *sig, ***next_nm_sig;
4951 ErtsSigRecvTracing tracing;
4952
4953 ASSERT(!(c_p->sig_qs.flags & FS_WAIT_HANDLE_SIGS));
4954 if (c_p->sig_qs.flags & FS_HANDLING_SIGS)
4955 wait_handle_signals(c_p);
4956 else
4957 c_p->sig_qs.flags |= FS_HANDLING_SIGS;
4958
4959 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
4960 ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
4961
4962 state = erts_atomic32_read_nob(&c_p->state);
4963 if (!local_only) {
4964 if (ERTS_PSFLG_SIG_IN_Q & state) {
4965 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
4966 erts_proc_sig_fetch(c_p);
4967 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
4968 }
4969 }
4970
4971 limit = *redsp;
4972 *redsp = 0;
4973 yield = 0;
4974 save_in_msgq = !0;
4975
4976 if (!c_p->sig_qs.cont) {
4977 *statep = state;
4978 ASSERT(!(c_p->sig_qs.flags & FS_WAIT_HANDLE_SIGS));
4979 c_p->sig_qs.flags &= ~FS_HANDLING_SIGS;
4980 return !0;
4981 }
4982
4983 if (state & ERTS_PSFLG_EXITING) {
4984 *statep = state;
4985 ASSERT(!(c_p->sig_qs.flags & FS_WAIT_HANDLE_SIGS));
4986 c_p->sig_qs.flags &= ~FS_HANDLING_SIGS;
4987 return 0;
4988 }
4989
4990 next_nm_sig = &c_p->sig_qs.nmsigs.next;
4991
4992 setup_tracing_state(c_p, &tracing);
4993 msg_tracing = tracing.messages.active;
4994
4995 limit *= ERTS_SIG_REDS_CNT_FACTOR;
4996 abs_lim = ERTS_SIG_REDS_CNT_FACTOR*max_reds;
4997 if (limit > abs_lim)
4998 limit = abs_lim;
4999
5000 cnt = 0;
5001
5002 do {
5003
5004 if (msg_tracing) {
5005 int tres;
5006 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5007 tres = handle_msg_tracing(c_p, &tracing, next_nm_sig);
5008 if (tres != 0) {
5009 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5010 if (tres < 0)
5011 yield = !0;
5012 break; /* tracing limit or end... */
5013 }
5014 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5015 }
5016
5017 if (!*next_nm_sig)
5018 break;
5019
5020 sig = **next_nm_sig;
5021
5022 ASSERT(sig);
5023 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
5024
5025 tag = ((ErtsSignal *) sig)->common.tag;
5026
5027 switch (ERTS_PROC_SIG_OP(tag)) {
5028
5029 case ERTS_SIG_Q_OP_EXIT:
5030 case ERTS_SIG_Q_OP_EXIT_LINKED: {
5031 int exited;
5032
5033 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5034
5035 cnt += handle_exit_signal(c_p, &tracing, sig,
5036 next_nm_sig, &exited);
5037
5038 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5039
5040 if (exited)
5041 goto stop; /* terminated by signal */
5042 /* ignored or converted to exit message... */
5043 break;
5044 }
5045
5046 case ERTS_SIG_Q_OP_MONITOR_DOWN: {
5047 Uint16 type = ERTS_PROC_SIG_TYPE(tag);
5048 ErtsExitSignalData *xsigd = NULL;
5049 ErtsMonitorData *mdp = NULL;
5050 ErtsMonitor *omon = NULL, *tmon = NULL;
5051
5052 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5053
5054 switch (type) {
5055 case ERTS_MON_TYPE_DIST_PROC:
5056 case ERTS_MON_TYPE_PROC:
5057 case ERTS_MON_TYPE_PORT:
5058 tmon = (ErtsMonitor *) sig;
5059 ASSERT(erts_monitor_is_target(tmon));
5060 ASSERT(!erts_monitor_is_in_table(tmon));
5061 mdp = erts_monitor_to_data(tmon);
5062 if (erts_monitor_is_in_table(&mdp->origin)) {
5063 omon = &mdp->origin;
5064 cnt += convert_to_down_message(c_p, sig, mdp, &omon,
5065 type, next_nm_sig);
5066 }
5067 break;
5068 case ERTS_SIG_Q_TYPE_GEN_EXIT:
5069 xsigd = get_exit_signal_data(sig);
5070
5071 /* This GEN_EXIT was received from another node, decode the exit reason */
5072 if (ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
5073 if (!erts_proc_sig_decode_dist(c_p, ERTS_PROC_LOCK_MAIN, sig, 1))
5074 break; /* Decode failed, just remove signal */
5075
5076 omon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p),
5077 xsigd->u.ref);
5078 if (omon) {
5079 ASSERT(erts_monitor_is_origin(omon));
5080 if (omon->type == ERTS_MON_TYPE_ALIAS) {
5081 omon = NULL;
5082 break;
5083 }
5084 if (omon->flags & ERTS_ML_FLG_SPAWN_PENDING) {
5085 handle_missing_spawn_reply(c_p, omon);
5086 /*
5087 * We leave the pending spawn monitor as is,
5088 * so that the nodedown will trigger an error
5089 * spawn_reply...
5090 */
5091 omon = NULL;
5092 cnt += 4;
5093 break;
5094 }
5095 mdp = erts_monitor_to_data(omon);
5096 if (omon->type == ERTS_MON_TYPE_DIST_PROC) {
5097 if (erts_monitor_dist_delete(&mdp->u.target))
5098 tmon = &mdp->u.target;
5099 }
5100 ASSERT(!(omon->flags & ERTS_ML_FLGS_SPAWN));
5101 cnt += convert_prepared_down_message(c_p, sig,
5102 xsigd->message,
5103 next_nm_sig);
5104 if (omon->flags & ERTS_ML_FLG_TAG) {
5105 Eterm *tpl, *tag_storage;
5106 ASSERT(is_tuple_arity(xsigd->message, 5));
5107 tpl = tuple_val(xsigd->message);
5108 ASSERT(tpl[1] == am_DOWN);
5109 if (mdp->origin.flags & ERTS_ML_FLG_EXTENDED)
5110 tag_storage = &((ErtsMonitorDataExtended *) mdp)->heap[0];
5111 else
5112 tag_storage = &((ErtsMonitorDataTagHeap *) mdp)->heap[0];
5113 tpl[1] = save_heap_frag_eterm(c_p, sig, tag_storage);
5114 }
5115 }
5116 break;
5117 case ERTS_MON_TYPE_NODE:
5118 tmon = (ErtsMonitor *) sig;
5119 ASSERT(erts_monitor_is_target(tmon));
5120 ASSERT(!erts_monitor_is_in_table(tmon));
5121 mdp = erts_monitor_to_data(tmon);
5122 if (erts_monitor_is_in_table(&mdp->origin)) {
5123 omon = &mdp->origin;
5124 cnt += handle_nodedown(c_p, sig, mdp, next_nm_sig);
5125 }
5126 break;
5127 case ERTS_MON_TYPE_SUSPEND:
5128 tmon = (ErtsMonitor *) sig;
5129 ASSERT(erts_monitor_is_target(tmon));
5130 ASSERT(!erts_monitor_is_in_table(tmon));
5131 mdp = erts_monitor_to_data(tmon);
5132 if (erts_monitor_is_in_table(&mdp->origin)) {
5133 omon = &mdp->origin;
5134 remove_nm_sig(c_p, sig, next_nm_sig);
5135 }
5136 break;
5137 default:
5138 ERTS_INTERNAL_ERROR("invalid monitor type");
5139 break;
5140 }
5141
5142 if (!omon) {
5143 remove_nm_sig(c_p, sig, next_nm_sig);
5144 if (xsigd) {
5145 sig->next = NULL;
5146 erts_cleanup_messages(sig);
5147 }
5148 if (tmon)
5149 erts_monitor_release(tmon);
5150 }
5151 else {
5152 switch (omon->flags & ERTS_ML_STATE_ALIAS_MASK) {
5153 case ERTS_ML_STATE_ALIAS_UNALIAS: {
5154 ErtsMonitorData *amdp;
5155 ASSERT(is_internal_pid_ref(mdp->ref));
5156 amdp = erts_monitor_create(ERTS_MON_TYPE_ALIAS,
5157 mdp->ref, c_p->common.id,
5158 NIL, NIL, THE_NON_VALUE);
5159 amdp->origin.flags = ERTS_ML_STATE_ALIAS_UNALIAS;
5160 erts_monitor_tree_replace(&ERTS_P_MONITORS(c_p),
5161 omon,
5162 &amdp->origin);
5163 break;
5164 }
5165 case ERTS_ML_STATE_ALIAS_ONCE:
5166 case ERTS_ML_STATE_ALIAS_DEMONITOR:
5167 ASSERT(is_internal_pid_ref(mdp->ref));
5168 erts_pid_ref_delete(mdp->ref);
5169 /* fall through... */
5170 default:
5171 if (type != ERTS_MON_TYPE_NODE)
5172 erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), omon);
5173 break;
5174 }
5175 if (tmon)
5176 erts_monitor_release_both(mdp);
5177 else
5178 erts_monitor_release(omon);
5179 }
5180
5181 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5182 break;
5183 }
5184
5185 case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG: {
5186 Uint16 type = ERTS_PROC_SIG_TYPE(tag);
5187 ErtsMonitor *mon;
5188 Eterm msg;
5189 Eterm key;
5190
5191 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5192
5193 key = get_persist_mon_msg(sig, &msg);
5194
5195 cnt++;
5196 mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), key);
5197 if (mon) {
5198 ASSERT(erts_monitor_is_origin(mon));
5199 handle_persistent_mon_msg(c_p, type, mon, sig,
5200 msg, next_nm_sig);
5201
5202 if ((mon->flags & ERTS_ML_STATE_ALIAS_MASK)
5203 == ERTS_ML_STATE_ALIAS_ONCE) {
5204 mon->flags &= ~ERTS_ML_STATE_ALIAS_MASK;
5205 erts_pid_ref_delete(key);
5206 }
5207 }
5208 else {
5209 cnt++;
5210 remove_nm_sig(c_p, sig, next_nm_sig);
5211 sig->next = NULL;
5212 erts_cleanup_messages(sig);
5213 }
5214
5215 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5216 break;
5217 }
5218
5219 case ERTS_SIG_Q_OP_MONITOR: {
5220 ErtsMonitor *mon = (ErtsMonitor *) sig;
5221
5222 ASSERT(erts_monitor_is_target(mon));
5223 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5224
5225 remove_nm_sig(c_p, sig, next_nm_sig);
5226
5227 if (mon->type == ERTS_MON_TYPE_DIST_PROC)
5228 erts_monitor_tree_insert(&ERTS_P_MONITORS(c_p), mon);
5229 else {
5230 erts_monitor_list_insert(&ERTS_P_LT_MONITORS(c_p), mon);
5231 if (mon->type == ERTS_MON_TYPE_SUSPEND)
5232 handle_suspend(c_p, mon, &yield);
5233 }
5234 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5235 cnt += 2;
5236 break;
5237 }
5238
5239 case ERTS_SIG_Q_OP_DEMONITOR: {
5240 Uint16 type = ERTS_PROC_SIG_TYPE(tag);
5241
5242 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5243
5244 remove_nm_sig(c_p, sig, next_nm_sig);
5245
5246 if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR) {
5247 ErtsMonitor *tmon;
5248 ErtsSigDistProcDemonitor *dmon;
5249 dmon = (ErtsSigDistProcDemonitor *) sig;
5250 tmon = erts_monitor_tree_lookup(ERTS_P_MONITORS(c_p), dmon->ref);
5251 destroy_dist_proc_demonitor(dmon);
5252 cnt++;
5253 if (tmon) {
5254 ErtsMonitorData *mdp = erts_monitor_to_data(tmon);
5255 ASSERT(erts_monitor_is_target(tmon));
5256 erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), tmon);
5257 if (!erts_monitor_dist_delete(&mdp->origin))
5258 erts_monitor_release(tmon);
5259 else
5260 erts_monitor_release_both(mdp);
5261 cnt += 2;
5262 }
5263 }
5264 else {
5265 ErtsMonitor *omon = (ErtsMonitor *) sig;
5266 ErtsMonitorData *mdp = erts_monitor_to_data(omon);
5267 ASSERT(omon->type == type);
5268 ASSERT(erts_monitor_is_origin(omon));
5269 ASSERT(!erts_monitor_is_in_table(omon));
5270 if (!erts_monitor_is_in_table(&mdp->u.target))
5271 erts_monitor_release(omon);
5272 else {
5273 ErtsMonitor *tmon = &mdp->u.target;
5274 ASSERT(tmon->type == type);
5275 if (type == ERTS_MON_TYPE_DIST_PROC)
5276 erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), tmon);
5277 else {
5278 erts_monitor_list_delete(&ERTS_P_LT_MONITORS(c_p), tmon);
5279 switch (type) {
5280 case ERTS_MON_TYPE_RESOURCE:
5281 erts_nif_demonitored((ErtsResource *) tmon->other.ptr);
5282 cnt++;
5283 break;
5284 case ERTS_MON_TYPE_SUSPEND: {
5285 ErtsMonitorSuspend *msp;
5286 erts_aint_t mstate;
5287 msp = (ErtsMonitorSuspend *) erts_monitor_to_data(tmon);
5288 mstate = erts_atomic_read_acqb(&msp->state);
5289 if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)
5290 erts_resume(c_p, ERTS_PROC_LOCK_MAIN);
5291 break;
5292 }
5293 default:
5294 break;
5295 }
5296 }
5297 erts_monitor_release_both(mdp);
5298 cnt++;
5299 }
5300 cnt++;
5301 }
5302 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5303 break;
5304 }
5305
5306 case ERTS_SIG_Q_OP_LINK: {
5307 ErtsLink *lnk, *nlnk = (ErtsLink *) sig;
5308
5309 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5310
5311 remove_nm_sig(c_p, sig, next_nm_sig);
5312 lnk = erts_link_tree_lookup_insert(&ERTS_P_LINKS(c_p), nlnk);
5313 if (!lnk) {
5314 if (tracing.procs)
5315 getting_linked(c_p, nlnk->other.item);
5316 }
5317 else {
5318 /* Already linked or unlinking... */
5319 if (nlnk->type != ERTS_LNK_TYPE_DIST_PROC)
5320 erts_link_internal_release(nlnk);
5321 else {
5322 ErtsELink *elnk;
5323 ErtsLink *dlnk = erts_link_to_other(nlnk, &elnk);
5324 if (erts_link_dist_delete(dlnk))
5325 erts_link_release_both(&elnk->ld);
5326 else
5327 erts_link_release(nlnk);
5328 }
5329 }
5330
5331 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5332 break;
5333 }
5334
5335 case ERTS_SIG_Q_OP_UNLINK: {
5336 Uint16 type = ERTS_PROC_SIG_TYPE(tag);
5337 ErtsLink *llnk;
5338
5339 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5340
5341 remove_nm_sig(c_p, sig, next_nm_sig);
5342 if (type == ERTS_SIG_Q_TYPE_DIST_LINK) {
5343 ErtsSigDistUnlinkOp *sdulnk = (ErtsSigDistUnlinkOp *) sig;
5344 ASSERT(is_external_pid(sdulnk->remote));
5345 llnk = erts_link_tree_lookup(ERTS_P_LINKS(c_p), sdulnk->remote);
5346 if (llnk) {
5347 ErtsELink *elnk;
5348 ErtsLink *dlnk = erts_link_to_other(llnk, &elnk);
5349 if (!elnk->unlinking) {
5350 erts_link_tree_delete(&ERTS_P_LINKS(c_p), llnk);
5351 if (erts_link_dist_delete(dlnk))
5352 erts_link_release_both(&elnk->ld);
5353 else
5354 erts_link_release(llnk);
5355 cnt += 8;
5356 if (tracing.procs)
5357 getting_unlinked(c_p, sdulnk->remote);
5358 }
5359 }
5360 reply_dist_unlink_ack(c_p, sdulnk);
5361 cnt++;
5362 }
5363 else {
5364 ErtsSigUnlinkOp *sulnk = (ErtsSigUnlinkOp *) sig;
5365 llnk = erts_link_tree_lookup(ERTS_P_LINKS(c_p),
5366 sulnk->from);
5367 if (llnk && !((ErtsILink *) llnk)->unlinking) {
5368 if (tracing.procs)
5369 getting_unlinked(c_p, sulnk->from);
5370 erts_link_tree_delete(&ERTS_P_LINKS(c_p), llnk);
5371 erts_link_release(llnk);
5372 cnt += 4;
5373 }
5374 if (is_internal_pid(sulnk->from))
5375 erts_proc_sig_send_unlink_ack(c_p, c_p->common.id, sulnk);
5376 else {
5377 Port *prt;
5378 ASSERT(is_internal_port(sulnk->from));
5379 prt = erts_port_lookup(sulnk->from,
5380 ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
5381 if (prt)
5382 erts_port_unlink_ack(c_p, prt, sulnk);
5383 else
5384 erts_proc_sig_destroy_unlink_op(sulnk);
5385 }
5386 }
5387
5388 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5389 break;
5390 }
5391
5392 case ERTS_SIG_Q_OP_UNLINK_ACK: {
5393 Uint16 type = ERTS_PROC_SIG_TYPE(tag);
5394
5395 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5396
5397 remove_nm_sig(c_p, sig, next_nm_sig);
5398 if (type == ERTS_SIG_Q_TYPE_DIST_LINK) {
5399 ErtsSigDistUnlinkOp *sdulnk;
5400 ErtsLink *lnk;
5401 sdulnk = (ErtsSigDistUnlinkOp *) sig;
5402 lnk = erts_link_tree_lookup(ERTS_P_LINKS(c_p),
5403 sdulnk->remote);
5404 if (lnk) {
5405 ErtsELink *elnk = erts_link_to_elink(lnk);
5406 if (elnk->unlinking == sdulnk->id) {
5407 erts_link_tree_delete(&ERTS_P_LINKS(c_p), lnk);
5408 if (erts_link_dist_delete(&elnk->ld.dist))
5409 erts_link_release_both(&elnk->ld);
5410 else
5411 erts_link_release(lnk);
5412 cnt += 8;
5413 }
5414 }
5415 destroy_sig_dist_unlink_op(sdulnk);
5416 }
5417 else {
5418 ErtsSigUnlinkOp *sulnk;
5419 ErtsILink *ilnk;
5420
5421 sulnk = (ErtsSigUnlinkOp *) sig;
5422 ilnk = (ErtsILink *) erts_link_tree_lookup(ERTS_P_LINKS(c_p),
5423 sulnk->from);
5424
5425 if (ilnk && ilnk->unlinking == sulnk->id) {
5426 erts_link_tree_delete(&ERTS_P_LINKS(c_p), &ilnk->link);
5427 erts_link_internal_release(&ilnk->link);
5428 cnt += 4;
5429 }
5430 erts_proc_sig_destroy_unlink_op(sulnk);
5431 }
5432
5433 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5434 break;
5435 }
5436
5437 case ERTS_SIG_Q_OP_GROUP_LEADER: {
5438 ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
5439 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5440 remove_nm_sig(c_p, sig, next_nm_sig);
5441 handle_group_leader(c_p, sgl);
5442 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5443 break;
5444 }
5445
5446 case ERTS_SIG_Q_OP_IS_ALIVE:
5447 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5448 remove_nm_sig(c_p, sig, next_nm_sig);
5449 is_alive_response(c_p, sig, !0);
5450 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5451 break;
5452
5453 case ERTS_SIG_Q_OP_PROCESS_INFO:
5454 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5455 handle_process_info(c_p, &tracing, sig, next_nm_sig, !0);
5456 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5457 break;
5458
5459 case ERTS_SIG_Q_OP_SYNC_SUSPEND:
5460 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5461 remove_nm_sig(c_p, sig, next_nm_sig);
5462 handle_sync_suspend(c_p, sig);
5463 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5464 break;
5465
5466 case ERTS_SIG_Q_OP_RPC:
5467 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5468 remove_nm_sig(c_p, sig, next_nm_sig);
5469 cnt += handle_rpc(c_p, (ErtsProcSigRPC *) sig, cnt,
5470 limit, &yield);
5471 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5472 break;
5473
5474 case ERTS_SIG_Q_OP_ADJ_MSGQ:
5475 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5476 switch (ERTS_PROC_SIG_TYPE(tag)) {
5477 case ERTS_SIG_Q_TYPE_CLA:
5478 cnt += handle_cla(c_p, sig, next_nm_sig, 0);
5479 break;
5480 case ERTS_SIG_Q_TYPE_OFF_HEAP:
5481 cnt += handle_move_msgq_off_heap(c_p, sig, next_nm_sig, 0);
5482 break;
5483 default:
5484 ERTS_INTERNAL_ERROR("Invalid 'adjust-message-queue' signal type");
5485 break;
5486 }
5487 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5488 break;
5489
5490 case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE: {
5491 Uint16 type = ERTS_PROC_SIG_TYPE(tag);
5492
5493 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5494 msg_tracing = handle_trace_change_state(c_p, &tracing,
5495 type, sig,
5496 next_nm_sig);
5497 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5498
5499 break;
5500 }
5501
5502 case ERTS_SIG_Q_OP_DIST_SPAWN_REPLY: {
5503 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5504 cnt += handle_dist_spawn_reply(c_p, &tracing, sig, next_nm_sig);
5505 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5506 break;
5507 }
5508
5509 case ERTS_SIG_Q_OP_ALIAS_MSG: {
5510 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5511 cnt += handle_alias_message(c_p, sig, next_nm_sig);
5512 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
5513 break;
5514 }
5515
5516 case ERTS_SIG_Q_OP_RECV_MARK: {
5517 ErtsRecvMarker *markp = (ErtsRecvMarker *) sig;
5518 ASSERT(markp->in_sigq);
5519
5520 if (markp->in_sigq < 0) {
5521 /* Marked for removal... */
5522 if (markp->set_save) {
5523 c_p->sig_qs.save = *next_nm_sig;
5524 ASSERT(c_p->sig_qs.recv_mrk_blk);
5525 ASSERT(c_p->sig_qs.recv_mrk_blk->pending_set_save_ix
5526 == ERTS_RECV_MARKER_IX__(c_p->sig_qs.recv_mrk_blk,
5527 markp));
5528 c_p->sig_qs.recv_mrk_blk->pending_set_save_ix = -1;
5529 save_in_msgq = 0;
5530 }
5531 markp->in_msgq = markp->in_sigq = markp->set_save = 0;
5532 remove_nm_sig(c_p, sig, next_nm_sig);
5533 recv_marker_deallocate(c_p, markp);
5534 }
5535 else {
5536 markp->prev_next = *next_nm_sig;
5537 ASSERT(*markp->prev_next == sig);
5538 *next_nm_sig = ((ErtsSignal *) sig)->common.specific.next;
5539
5540 ERTS_SIG_DBG_RECV_MARK_SET_HANDLED(sig);
5541
5542 markp->in_msgq = !0;
5543 if (markp->set_save) {
5544 c_p->sig_qs.save = &markp->sig.common.next;
5545 markp->set_save = 0;
5546 ASSERT(c_p->sig_qs.recv_mrk_blk);
5547 ASSERT(c_p->sig_qs.recv_mrk_blk->pending_set_save_ix
5548 == ERTS_RECV_MARKER_IX__(c_p->sig_qs.recv_mrk_blk,
5549 markp));
5550 c_p->sig_qs.recv_mrk_blk->pending_set_save_ix = -1;
5551 save_in_msgq = 0;
5552 }
5553 }
5554
5555 break;
5556 }
5557
5558 default:
5559 ERTS_INTERNAL_ERROR("Unknown signal");
5560 break;
5561 }
5562
5563 cnt++;
5564
5565 } while (cnt <= limit
5566 || stretch_limit(c_p, &tracing, abs_lim, &limit, save_in_msgq));
5567
5568 stop: {
5569 int res;
5570
5571 if (c_p->sig_qs.save == &c_p->sig_qs.cont)
5572 c_p->sig_qs.save = c_p->sig_qs.last;
5573
5574 if (ERTS_UNLIKELY(msg_tracing != 0)) {
5575 /*
5576 * All messages that has been traced should
5577 * be moved to inner queue. Next signal in
5578 * middle queue should either be next message
5579 * to trace or next non-message signal.
5580 */
5581 ASSERT(tracing.messages.next);
5582
5583 /*
5584 * If we yielded right after we handled a receive
5585 * marker, we might point to a receive marker that
5586 * should be included in the message queue. Adjust
5587 * 'tracing.messages.next' if that is the case...
5588 */
5589 if (*tracing.messages.next
5590 && ERTS_SIG_IS_RECV_MARKER(*tracing.messages.next)
5591 && ((ErtsRecvMarker *) *tracing.messages.next)->in_msgq) {
5592
5593 tracing.messages.next = &(*tracing.messages.next)->next;
5594
5595 /* There can only be one such receive marker... */
5596 ASSERT(!(*tracing.messages.next
5597 && ERTS_SIG_IS_RECV_MARKER(*tracing.messages.next)
5598 && ((ErtsRecvMarker *) *tracing.messages.next)->in_msgq));
5599 }
5600
5601 if (*next_nm_sig) {
5602 if (*next_nm_sig == tracing.messages.next)
5603 *next_nm_sig = &c_p->sig_qs.cont;
5604 if (c_p->sig_qs.nmsigs.last == tracing.messages.next)
5605 c_p->sig_qs.nmsigs.last = &c_p->sig_qs.cont;
5606 *statep = erts_atomic32_read_nob(&c_p->state);
5607 }
5608 else {
5609 ASSERT(!c_p->sig_qs.nmsigs.next);
5610 c_p->sig_qs.nmsigs.last = NULL;
5611 state = erts_atomic32_read_band_nob(&c_p->state,
5612 ~ERTS_PSFLG_SIG_Q);
5613 state &= ~ERTS_PSFLG_SIG_Q;
5614 *statep = state;
5615 }
5616
5617 if (tracing.messages.next != &c_p->sig_qs.cont) {
5618 if (ERTS_SIG_IS_RECV_MARKER(c_p->sig_qs.cont)) {
5619 ErtsRecvMarker *markp = (ErtsRecvMarker *) c_p->sig_qs.cont;
5620 markp->prev_next = c_p->sig_qs.last;
5621 }
5622
5623 *c_p->sig_qs.last = c_p->sig_qs.cont;
5624 c_p->sig_qs.last = tracing.messages.next;
5625
5626 c_p->sig_qs.cont = *tracing.messages.next;
5627 if (!c_p->sig_qs.cont)
5628 c_p->sig_qs.cont_last = &c_p->sig_qs.cont;
5629 *c_p->sig_qs.last = NULL;
5630 }
5631
5632 res = !c_p->sig_qs.cont;
5633 }
5634 else if (*next_nm_sig) {
5635 /*
5636 * All messages prior to next non-message
5637 * signal should be moved to inner queue.
5638 * Next non-message signal to handle should
5639 * be first in middle queue.
5640 */
5641 ASSERT(**next_nm_sig);
5642 if (*next_nm_sig != &c_p->sig_qs.cont) {
5643 if (ERTS_SIG_IS_RECV_MARKER(c_p->sig_qs.cont)) {
5644 ErtsRecvMarker *markp = (ErtsRecvMarker *) c_p->sig_qs.cont;
5645 markp->prev_next = c_p->sig_qs.last;
5646 }
5647
5648 *c_p->sig_qs.last = c_p->sig_qs.cont;
5649 c_p->sig_qs.last = *next_nm_sig;
5650
5651 c_p->sig_qs.cont = **next_nm_sig;
5652 if (c_p->sig_qs.nmsigs.last == *next_nm_sig)
5653 c_p->sig_qs.nmsigs.last = &c_p->sig_qs.cont;
5654 *next_nm_sig = &c_p->sig_qs.cont;
5655 *c_p->sig_qs.last = NULL;
5656 }
5657
5658 ASSERT(c_p->sig_qs.cont);
5659
5660 *statep = erts_atomic32_read_nob(&c_p->state);
5661
5662 res = 0;
5663 }
5664 else {
5665 /*
5666 * All non-message signals handled. All
5667 * messages should be moved to inner queue.
5668 * Middle queue should be empty.
5669 */
5670 ASSERT(!c_p->sig_qs.nmsigs.next);
5671 c_p->sig_qs.nmsigs.last = NULL;
5672
5673 if (c_p->sig_qs.cont_last != &c_p->sig_qs.cont) {
5674 if (ERTS_SIG_IS_RECV_MARKER(c_p->sig_qs.cont)) {
5675 ErtsRecvMarker *markp = (ErtsRecvMarker *) c_p->sig_qs.cont;
5676 markp->prev_next = c_p->sig_qs.last;
5677 }
5678
5679 ASSERT(!*c_p->sig_qs.last);
5680 *c_p->sig_qs.last = c_p->sig_qs.cont;
5681 c_p->sig_qs.last = c_p->sig_qs.cont_last;
5682 ASSERT(!*c_p->sig_qs.last);
5683
5684 c_p->sig_qs.cont_last = &c_p->sig_qs.cont;
5685 c_p->sig_qs.cont = NULL;
5686 }
5687
5688 ASSERT(!c_p->sig_qs.cont);
5689
5690 state = erts_atomic32_read_band_nob(&c_p->state,
5691 ~ERTS_PSFLG_SIG_Q);
5692 state &= ~ERTS_PSFLG_SIG_Q;
5693 *statep = state;
5694 res = !0;
5695 }
5696
5697 /* Ensure that 'save' doesn't point to a receive marker... */
5698 if (*c_p->sig_qs.save
5699 && ERTS_SIG_IS_RECV_MARKER(*c_p->sig_qs.save)) {
5700 c_p->sig_qs.save = erts_msgq_pass_recv_markers(c_p,
5701 c_p->sig_qs.save);
5702 }
5703
5704 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
5705
5706 *redsp = cnt/4 + 1;
5707
5708 if (yield) {
5709 int vreds = max_reds - *redsp;
5710 if (vreds > 0) {
5711 ErtsSchedulerData *esdp = erts_get_scheduler_data();
5712 esdp->virtual_reds += vreds;
5713 }
5714 *redsp = max_reds;
5715 }
5716
5717 if (c_p->sig_qs.flags & FS_WAIT_HANDLE_SIGS)
5718 wake_handle_signals(c_p);
5719 else
5720 c_p->sig_qs.flags &= ~FS_HANDLING_SIGS;
5721
5722 return res;
5723 }
5724 }
5725
5726 static int
stretch_limit(Process * c_p,ErtsSigRecvTracing * tp,int abs_lim,int * limp,int save_in_msgq)5727 stretch_limit(Process *c_p, ErtsSigRecvTracing *tp,
5728 int abs_lim, int *limp, int save_in_msgq)
5729 {
5730 ErtsMessage **sigpp;
5731 int lim, in_msgq;
5732 /*
5733 * Stretch limit up to a maximum of 'abs_lim' if
5734 * there currently are no messages available to
5735 * inspect by 'receive' and it might be possible
5736 * to get messages available by processing
5737 * signals (or trace messages).
5738 */
5739
5740 lim = *limp;
5741 ASSERT(abs_lim >= lim);
5742 if (abs_lim == lim)
5743 return 0;
5744
5745 /*
5746 * We cannot use erts_msgq_peek_msg() to inspect
5747 * save pointer here! At this point save pointer has
5748 * not been moved passed possible receive markers...
5749 *
5750 * Also note that the save pointer might point into
5751 * message queue as well as middle signal queue (if a
5752 * receive marker with 'set_save' set just arrived).
5753 */
5754 if (c_p->sig_qs.save == c_p->sig_qs.last) {
5755 sigpp = &c_p->sig_qs.cont;
5756 in_msgq = 0;
5757 }
5758 else {
5759 sigpp = c_p->sig_qs.save;
5760 in_msgq = save_in_msgq;
5761 }
5762
5763 while (!0) {
5764 Eterm tag;
5765 if (!(*sigpp))
5766 return 0; /* No signals to process available... */
5767
5768 if (!in_msgq)
5769 break;
5770
5771 if (tp->messages.next == sigpp)
5772 break;
5773
5774 tag = ((ErtsSignal *) *sigpp)->common.tag;
5775
5776 if (ERTS_SIG_IS_MSG_TAG(tag))
5777 return 0; /* Have message to inspect... */
5778
5779 ASSERT(tag == ERTS_RECV_MARKER_TAG);
5780
5781 /*
5782 * Pass the recv marker without punishing it
5783 * by increasing the 'pass' field...
5784 */
5785 sigpp = &(*sigpp)->next;
5786 if (sigpp == c_p->sig_qs.last) {
5787 sigpp = &c_p->sig_qs.cont;
5788 in_msgq = 0;
5789 }
5790 }
5791
5792 /*
5793 * Stretch the limit so we can process some more signals
5794 * in order to try to make messages available in message
5795 * queue...
5796 */
5797 lim += ERTS_SIG_REDS_CNT_FACTOR*100;
5798 if (lim > abs_lim)
5799 lim = abs_lim;
5800 *limp = lim;
5801 return !0;
5802 }
5803
5804
5805 int
erts_proc_sig_handle_exit(Process * c_p,Sint * redsp,ErtsMonitor ** pend_spawn_mon_pp,Eterm reason)5806 erts_proc_sig_handle_exit(Process *c_p, Sint *redsp,
5807 ErtsMonitor **pend_spawn_mon_pp,
5808 Eterm reason)
5809 {
5810 int cnt;
5811 Sint limit;
5812 ErtsMessage *sig, ***next_nm_sig;
5813
5814 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
5815 ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
5816
5817 ASSERT(!(ERTS_PSFLG_SIG_IN_Q & erts_atomic32_read_nob(&c_p->state)));
5818
5819 limit = *redsp;
5820 limit *= ERTS_SIG_REDS_CNT_FACTOR;
5821
5822 *redsp = 1;
5823
5824 next_nm_sig = &c_p->sig_qs.nmsigs.next;
5825
5826 if (!*next_nm_sig) {
5827 ASSERT(!c_p->sig_qs.nmsigs.last);
5828 return !0; /* done... */
5829 }
5830
5831 cnt = 0;
5832
5833 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, NULL, next_nm_sig);
5834
5835 do {
5836 Eterm tag;
5837 Uint16 type;
5838 int op;
5839
5840 sig = **next_nm_sig;
5841
5842 ASSERT(sig);
5843 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
5844
5845 tag = ((ErtsSignal *) sig)->common.tag;
5846 type = ERTS_PROC_SIG_TYPE(tag);
5847 op = ERTS_PROC_SIG_OP(tag);
5848
5849 remove_nm_sig(c_p, sig, next_nm_sig);
5850
5851 ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, NULL, next_nm_sig);
5852
5853 cnt++;
5854
5855 switch (op) {
5856
5857 case ERTS_SIG_Q_OP_EXIT:
5858 case ERTS_SIG_Q_OP_EXIT_LINKED:
5859 case ERTS_SIG_Q_OP_MONITOR_DOWN:
5860 switch (type) {
5861 case ERTS_SIG_Q_TYPE_GEN_EXIT:
5862 sig->next = NULL;
5863 erts_cleanup_messages(sig);
5864 break;
5865 case ERTS_LNK_TYPE_PORT:
5866 case ERTS_LNK_TYPE_PROC:
5867 case ERTS_LNK_TYPE_DIST_PROC:
5868 erts_link_release((ErtsLink *) sig);
5869 break;
5870 case ERTS_MON_TYPE_PORT:
5871 case ERTS_MON_TYPE_PROC:
5872 case ERTS_MON_TYPE_DIST_PROC:
5873 case ERTS_MON_TYPE_NODE:
5874 case ERTS_MON_TYPE_NODES:
5875 case ERTS_MON_TYPE_SUSPEND:
5876 erts_monitor_release((ErtsMonitor *) sig);
5877 break;
5878 default:
5879 ERTS_INTERNAL_ERROR("Unexpected sig type");
5880 break;
5881 }
5882 break;
5883
5884 case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
5885 case ERTS_SIG_Q_OP_ALIAS_MSG:
5886 sig->next = NULL;
5887 erts_cleanup_messages(sig);
5888 break;
5889
5890 case ERTS_SIG_Q_OP_MONITOR: {
5891 ErtsProcExitContext pectxt = {c_p, am_noproc, NULL, NULL,
5892 NULL, NULL, NIL, 0};
5893 erts_proc_exit_handle_monitor((ErtsMonitor *) sig,
5894 (void *) &pectxt, -1);
5895 cnt += 4;
5896 break;
5897 }
5898
5899 case ERTS_SIG_Q_OP_DEMONITOR:
5900 if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR)
5901 destroy_dist_proc_demonitor((ErtsSigDistProcDemonitor *) sig);
5902 else
5903 erts_monitor_release((ErtsMonitor *) sig);
5904 break;
5905
5906 case ERTS_SIG_Q_OP_LINK: {
5907 ErtsProcExitContext pectxt = {c_p, am_noproc};
5908 erts_proc_exit_handle_link((ErtsLink *) sig, (void *) &pectxt, -1);
5909 break;
5910 }
5911
5912 case ERTS_SIG_Q_OP_UNLINK:
5913 if (type == ERTS_SIG_Q_TYPE_DIST_LINK)
5914 reply_dist_unlink_ack(c_p, (ErtsSigDistUnlinkOp *) sig);
5915 else if (is_internal_pid(((ErtsSigUnlinkOp *) sig)->from))
5916 erts_proc_sig_send_unlink_ack(c_p, c_p->common.id,
5917 (ErtsSigUnlinkOp *) sig);
5918 else {
5919 Port *prt;
5920 ASSERT(is_internal_port(((ErtsSigUnlinkOp *) sig)->from));
5921 prt = erts_port_lookup(((ErtsSigUnlinkOp *) sig)->from,
5922 ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
5923 if (prt)
5924 erts_port_unlink_ack(c_p, prt, (ErtsSigUnlinkOp *) sig);
5925 else
5926 erts_proc_sig_destroy_unlink_op((ErtsSigUnlinkOp *) sig);
5927 }
5928 break;
5929
5930 case ERTS_SIG_Q_OP_UNLINK_ACK:
5931 erts_proc_sig_destroy_unlink_op((ErtsSigUnlinkOp *) sig);
5932 break;
5933
5934 case ERTS_SIG_Q_OP_GROUP_LEADER: {
5935 ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
5936 handle_group_leader(c_p, sgl);
5937 break;
5938 }
5939
5940 case ERTS_SIG_Q_OP_IS_ALIVE:
5941 is_alive_response(c_p, sig, 0);
5942 break;
5943
5944 case ERTS_SIG_Q_OP_PROCESS_INFO:
5945 handle_process_info(c_p, NULL, sig, next_nm_sig, 0);
5946 break;
5947
5948 case ERTS_SIG_Q_OP_SYNC_SUSPEND:
5949 handle_sync_suspend(c_p, sig);
5950 break;
5951
5952 case ERTS_SIG_Q_OP_RPC: {
5953 int yield = 0;
5954 handle_rpc(c_p, (ErtsProcSigRPC *) sig,
5955 cnt, limit, &yield);
5956 break;
5957 }
5958
5959 case ERTS_SIG_Q_OP_DIST_SPAWN_REPLY: {
5960 cnt += handle_dist_spawn_reply_exiting(c_p, sig,
5961 pend_spawn_mon_pp,
5962 reason);
5963 break;
5964 }
5965
5966 case ERTS_SIG_Q_OP_ADJ_MSGQ:
5967 switch (ERTS_PROC_SIG_TYPE(tag)) {
5968 case ERTS_SIG_Q_TYPE_CLA:
5969 handle_cla(c_p, sig, next_nm_sig, !0);
5970 break;
5971 case ERTS_SIG_Q_TYPE_OFF_HEAP:
5972 handle_move_msgq_off_heap(c_p, sig, next_nm_sig, !0);
5973 break;
5974 default:
5975 ERTS_INTERNAL_ERROR("Invalid 'adjust-message-queue' signal type");
5976 break;
5977 }
5978 break;
5979
5980 case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
5981 destroy_trace_info((ErtsSigTraceInfo *) sig);
5982 break;
5983
5984 case ERTS_SIG_Q_OP_RECV_MARK: {
5985 ErtsRecvMarker *markp = (ErtsRecvMarker *) sig;
5986 markp->in_msgq = markp->in_sigq = markp->set_save = 0;
5987 recv_marker_deallocate(c_p, markp);
5988 break;
5989 }
5990
5991 default:
5992 ERTS_INTERNAL_ERROR("Unknown signal");
5993 break;
5994 }
5995
5996 } while (cnt >= limit && *next_nm_sig);
5997
5998 *redsp += cnt / ERTS_SIG_REDS_CNT_FACTOR;
5999
6000 if (*next_nm_sig)
6001 return 0;
6002
6003 ASSERT(!c_p->sig_qs.nmsigs.next);
6004 c_p->sig_qs.nmsigs.last = NULL;
6005 (void) erts_atomic32_read_band_nob(&c_p->state,
6006 ~ERTS_PSFLG_SIG_Q);
6007 return !0;
6008 }
6009
6010 #ifdef USE_VM_PROBES
6011 # define ERTS_CLEAR_SEQ_TOKEN(MP) \
6012 ERL_MESSAGE_TOKEN((MP)) = ((ERL_MESSAGE_DT_UTAG((MP)) != NIL) \
6013 ? am_have_dt_utag : NIL)
6014 #else
6015 # define ERTS_CLEAR_SEQ_TOKEN(MP) \
6016 ERL_MESSAGE_TOKEN((MP)) = NIL
6017 #endif
6018
6019 static ERTS_INLINE void
clear_seq_trace_token(ErtsMessage * sig)6020 clear_seq_trace_token(ErtsMessage *sig)
6021 {
6022 if (ERTS_SIG_IS_MSG((ErtsSignal *) sig))
6023 ERTS_CLEAR_SEQ_TOKEN(sig);
6024 else {
6025 Uint tag;
6026 Uint16 op, type;
6027
6028 tag = ((ErtsSignal *) sig)->common.tag;
6029 type = ERTS_PROC_SIG_TYPE(tag);
6030 op = ERTS_PROC_SIG_OP(tag);
6031
6032 switch (op) {
6033
6034 case ERTS_SIG_Q_OP_EXIT:
6035 case ERTS_SIG_Q_OP_EXIT_LINKED:
6036 case ERTS_SIG_Q_OP_MONITOR_DOWN:
6037 switch (type) {
6038 case ERTS_SIG_Q_TYPE_GEN_EXIT:
6039 ERTS_CLEAR_SEQ_TOKEN(sig);
6040 break;
6041 case ERTS_LNK_TYPE_PORT:
6042 case ERTS_LNK_TYPE_PROC:
6043 case ERTS_LNK_TYPE_DIST_PROC:
6044 case ERTS_MON_TYPE_PORT:
6045 case ERTS_MON_TYPE_PROC:
6046 case ERTS_MON_TYPE_DIST_PROC:
6047 case ERTS_MON_TYPE_NODE:
6048 case ERTS_MON_TYPE_NODES:
6049 case ERTS_MON_TYPE_SUSPEND:
6050 case ERTS_MON_TYPE_TIME_OFFSET:
6051 break;
6052 default:
6053 ERTS_INTERNAL_ERROR("Unexpected sig type");
6054 break;
6055 }
6056 break;
6057
6058 case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
6059 case ERTS_SIG_Q_OP_DIST_SPAWN_REPLY:
6060 case ERTS_SIG_Q_OP_ALIAS_MSG:
6061 ERTS_CLEAR_SEQ_TOKEN(sig);
6062 break;
6063
6064 case ERTS_SIG_Q_OP_MONITOR:
6065 case ERTS_SIG_Q_OP_DEMONITOR:
6066 case ERTS_SIG_Q_OP_LINK:
6067 case ERTS_SIG_Q_OP_UNLINK:
6068 case ERTS_SIG_Q_OP_UNLINK_ACK:
6069 case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
6070 case ERTS_SIG_Q_OP_GROUP_LEADER:
6071 case ERTS_SIG_Q_OP_IS_ALIVE:
6072 case ERTS_SIG_Q_OP_PROCESS_INFO:
6073 case ERTS_SIG_Q_OP_SYNC_SUSPEND:
6074 case ERTS_SIG_Q_OP_RPC:
6075 case ERTS_SIG_Q_OP_RECV_MARK:
6076 case ERTS_SIG_Q_OP_ADJ_MSGQ:
6077 break;
6078
6079 default:
6080 ERTS_INTERNAL_ERROR("Unknown signal");
6081 break;
6082 }
6083 }
6084 }
6085
6086 void
erts_proc_sig_clear_seq_trace_tokens(Process * c_p)6087 erts_proc_sig_clear_seq_trace_tokens(Process *c_p)
6088 {
6089 erts_proc_sig_fetch(c_p);
6090 ERTS_FOREACH_SIG_PRIVQS(c_p, sig, clear_seq_trace_token(sig));
6091 }
6092
6093 Uint
erts_proc_sig_signal_size(ErtsSignal * sig)6094 erts_proc_sig_signal_size(ErtsSignal *sig)
6095 {
6096 Eterm tag;
6097 Uint16 type;
6098 int op;
6099 Uint size = 0;
6100
6101 ASSERT(sig);
6102 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
6103
6104 tag = sig->common.tag;
6105 type = ERTS_PROC_SIG_TYPE(tag);
6106 op = ERTS_PROC_SIG_OP(tag);
6107
6108 switch (op) {
6109 case ERTS_SIG_Q_OP_EXIT:
6110 case ERTS_SIG_Q_OP_EXIT_LINKED:
6111 case ERTS_SIG_Q_OP_MONITOR_DOWN:
6112 switch (type) {
6113 case ERTS_SIG_Q_TYPE_GEN_EXIT:
6114 size = ((ErtsMessage *) sig)->hfrag.alloc_size;
6115 size *= sizeof(Eterm);
6116 size += sizeof(ErtsMessage) - sizeof(Eterm);
6117 break;
6118 case ERTS_LNK_TYPE_PORT:
6119 case ERTS_LNK_TYPE_PROC:
6120 case ERTS_LNK_TYPE_DIST_PROC:
6121 size = erts_link_size((ErtsLink *) sig);
6122 break;
6123 case ERTS_MON_TYPE_PORT:
6124 case ERTS_MON_TYPE_PROC:
6125 case ERTS_MON_TYPE_DIST_PROC:
6126 case ERTS_MON_TYPE_NODE:
6127 size = erts_monitor_size((ErtsMonitor *) sig);
6128 break;
6129 default:
6130 ERTS_INTERNAL_ERROR("Unexpected sig type");
6131 break;
6132 }
6133 break;
6134
6135 case ERTS_SIG_Q_OP_ADJ_MSGQ:
6136 if (type == ERTS_SIG_Q_TYPE_OFF_HEAP) {
6137 size = sizeof(ErtsMessageRef);
6138 break;
6139 }
6140 /* Fall through... */
6141 case ERTS_SIG_Q_OP_SYNC_SUSPEND:
6142 case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
6143 case ERTS_SIG_Q_OP_IS_ALIVE:
6144 case ERTS_SIG_Q_OP_DIST_SPAWN_REPLY: {
6145 ErlHeapFragment *hf;
6146 size = sizeof(ErtsMessageRef);
6147 size += ERTS_HEAP_FRAG_SIZE(((ErtsMessage *) sig)->hfrag.alloc_size);
6148 for (hf = ((ErtsMessage *) sig)->hfrag.next; hf; hf = hf->next)
6149 size += ERTS_HEAP_FRAG_SIZE(hf->alloc_size);
6150 break;
6151 }
6152
6153 case ERTS_SIG_Q_OP_ALIAS_MSG: {
6154 ErlHeapFragment *hf;
6155
6156 size = sizeof(ErtsMessageRef);
6157
6158 switch (type) {
6159 case ERTS_SIG_Q_TYPE_OFF_HEAP:
6160 size += ERTS_HEAP_FRAG_SIZE(((ErtsMessage *) sig)->hfrag.alloc_size);
6161 hf = ((ErtsMessage *) sig)->hfrag.next;
6162 if (0) {
6163 case ERTS_SIG_Q_TYPE_HEAP_FRAG:
6164 hf = ((ErtsMessage *) sig)->data.heap_frag;
6165 }
6166 for (; hf; hf = hf->next)
6167 size += ERTS_HEAP_FRAG_SIZE(hf->alloc_size);
6168 break;
6169 case ERTS_SIG_Q_TYPE_HEAP:
6170 break;
6171 default:
6172 ERTS_INTERNAL_ERROR("Unexpected sig type");
6173 }
6174 break;
6175 }
6176
6177 case ERTS_SIG_Q_OP_DEMONITOR:
6178 if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR) {
6179 size = NC_HEAP_SIZE(((ErtsSigDistProcDemonitor *) sig)->ref);
6180 size--;
6181 size *= sizeof(Eterm);
6182 size += sizeof(ErtsSigDistProcDemonitor);
6183 break;
6184 }
6185 /* Fall through... */
6186
6187 case ERTS_SIG_Q_OP_MONITOR:
6188 size = erts_monitor_size((ErtsMonitor *) sig);
6189 break;
6190
6191 case ERTS_SIG_Q_OP_UNLINK:
6192 case ERTS_SIG_Q_OP_UNLINK_ACK:
6193 if (type != ERTS_SIG_Q_TYPE_DIST_LINK)
6194 size = sizeof(ErtsSigUnlinkOp);
6195 else {
6196 size = NC_HEAP_SIZE(((ErtsSigDistUnlinkOp *) sig)->remote);
6197 size--;
6198 size *= sizeof(Eterm);
6199 size += sizeof(ErtsSigDistUnlinkOp);
6200 }
6201 break;
6202 case ERTS_SIG_Q_OP_LINK:
6203 size = erts_link_size((ErtsLink *) sig);
6204 break;
6205
6206 case ERTS_SIG_Q_OP_GROUP_LEADER: {
6207 ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
6208 size = size_object(sgl->group_leader);
6209 size += size_object(sgl->ref);
6210 size *= sizeof(Eterm);
6211 size += sizeof(ErtsSigGroupLeader) - sizeof(Eterm);
6212 break;
6213 }
6214
6215 case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
6216 size = sizeof(ErtsSigTraceInfo);
6217 break;
6218
6219 case ERTS_SIG_Q_OP_PROCESS_INFO: {
6220 ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
6221 size = sizeof(ErtsProcessInfoSig);
6222 size += (pisig->len - 1) * sizeof(int);
6223 break;
6224 }
6225
6226 case ERTS_SIG_Q_OP_RPC:
6227 size = sizeof(ErtsProcSigRPC);
6228 break;
6229
6230 case ERTS_SIG_Q_OP_RECV_MARK:
6231 size = sizeof(ErtsRecvMarker);
6232 break;
6233
6234 default:
6235 ERTS_INTERNAL_ERROR("Unknown signal");
6236 break;
6237 }
6238
6239 return size;
6240 }
6241
6242 int
erts_proc_sig_receive_helper(Process * c_p,int fcalls,int neg_o_reds,ErtsMessage ** msgpp,int * get_outp)6243 erts_proc_sig_receive_helper(Process *c_p,
6244 int fcalls,
6245 int neg_o_reds,
6246 ErtsMessage **msgpp,
6247 int *get_outp)
6248 {
6249 ErtsMessage *msgp;
6250 int reds, consumed_reds, left_reds, max_reds;
6251
6252 /*
6253 * Called from the loop-rec instruction when receive
6254 * has reached end of inner (private) queue. This function
6255 * tries to move more messages into the inner queue
6256 * for the receive to handle. This by, processing the
6257 * middle (private) queue and/or moving signals from
6258 * the outer (public) queue into the middle queue.
6259 *
6260 * If this function succeeds in making more messages
6261 * available in the inner queue, *msgpp points to next
6262 * message. If *msgpp is set to NULL when:
6263 * -- process became exiting. *get_outp is set to a
6264 * value greater than zero.
6265 * -- process needs to yield. *get_outp is set to a
6266 * value less than zero.
6267 * -- no more messages exist in any of the queues.
6268 * *get_outp is set to zero and the message queue
6269 * lock remains locked. This so the process can
6270 * make its way to the appropriate wait instruction
6271 * without racing with new incoming messages.
6272 */
6273
6274 ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
6275 ASSERT(!ERTS_PROC_IS_EXITING(c_p));
6276 ASSERT(!*msgpp);
6277
6278 left_reds = fcalls - neg_o_reds;
6279 consumed_reds = 0;
6280
6281 while (!0) {
6282 erts_aint32_t state;
6283
6284 if (!c_p->sig_qs.cont) {
6285
6286 consumed_reds += 4;
6287 left_reds -= 4;
6288 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
6289 erts_proc_sig_fetch(c_p);
6290 /*
6291 * Messages may have been moved directly to
6292 * inner queue...
6293 */
6294 msgp = erts_msgq_peek_msg(c_p);
6295 if (msgp) {
6296 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
6297 *get_outp = 0;
6298 *msgpp = msgp;
6299 return consumed_reds;
6300 }
6301
6302 if (!c_p->sig_qs.cont) {
6303 /*
6304 * No messages! Return with message queue
6305 * locked and let the process continue
6306 * to wait instruction...
6307 */
6308 *get_outp = 0;
6309 *msgpp = NULL;
6310
6311 return consumed_reds;
6312 }
6313 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
6314
6315 if (left_reds <= 0)
6316 break; /* Yield */
6317
6318 /* handle newly arrived signals... */
6319 }
6320
6321 reds = ERTS_SIG_HANDLE_REDS_MAX_PREFERED;
6322 #ifdef DEBUG
6323 /* test that it works also with very few reds */
6324 max_reds = left_reds;
6325 if (reds > left_reds)
6326 reds = left_reds;
6327 #else
6328 /* At least work preferred amount of reds... */
6329 max_reds = left_reds;
6330 if (max_reds < reds)
6331 max_reds = reds;
6332 #endif
6333 (void) erts_proc_sig_handle_incoming(c_p, &state, &reds,
6334 max_reds, !0);
6335 consumed_reds += reds;
6336 left_reds -= reds;
6337
6338 /* we may have exited or suspended by an incoming signal... */
6339
6340 if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_SUSPENDED)) {
6341 if (state & ERTS_PSFLG_SUSPENDED)
6342 break; /* Yield */
6343
6344 /*
6345 * Process need to schedule out in order
6346 * to terminate. Prepare this a bit...
6347 */
6348 ASSERT(state & ERTS_PSFLG_EXITING);
6349 ASSERT(c_p->flags & F_DELAY_GC);
6350
6351 c_p->flags &= ~F_DELAY_GC;
6352 c_p->arity = 0;
6353 c_p->current = NULL;
6354
6355 *get_outp = 1;
6356 *msgpp = NULL;
6357
6358 return consumed_reds;
6359 }
6360
6361 msgp = erts_msgq_peek_msg(c_p);
6362 if (msgp) {
6363 *get_outp = 0;
6364 *msgpp = msgp;
6365 return consumed_reds;
6366 }
6367
6368 if (left_reds <= 0)
6369 break; /* yield */
6370
6371 ASSERT(!c_p->sig_qs.cont);
6372 /* Go fetch again... */
6373 }
6374
6375 /* Yield... */
6376
6377 *get_outp = -1;
6378 *msgpp = NULL;
6379
6380 ASSERT(consumed_reds >= (fcalls - neg_o_reds));
6381 return consumed_reds;
6382 }
6383
6384 static Uint
area_literal_size(Eterm * start,Eterm * end,char * lit_start,Uint lit_size)6385 area_literal_size(Eterm* start, Eterm* end, char* lit_start, Uint lit_size)
6386 {
6387 Eterm* p;
6388 Eterm val;
6389 Uint sz = 0;
6390
6391 for (p = start; p < end; p++) {
6392 val = *p;
6393 switch (primary_tag(val)) {
6394 case TAG_PRIMARY_BOXED:
6395 case TAG_PRIMARY_LIST:
6396 if (ErtsInArea(val, lit_start, lit_size)) {
6397 sz += size_object(val);
6398 }
6399 break;
6400 case TAG_PRIMARY_HEADER:
6401 if (!header_is_transparent(val)) {
6402 Eterm* new_p;
6403 if (header_is_bin_matchstate(val)) {
6404 ErlBinMatchState *ms = (ErlBinMatchState*) p;
6405 ErlBinMatchBuffer *mb = &(ms->mb);
6406 if (ErtsInArea(mb->orig, lit_start, lit_size)) {
6407 sz += size_object(mb->orig);
6408 }
6409 }
6410 new_p = p + thing_arityval(val);
6411 ASSERT(start <= new_p && new_p < end);
6412 p = new_p;
6413 }
6414 }
6415 }
6416 return sz;
6417 }
6418
6419 static ERTS_INLINE void
area_literal_copy(Eterm ** hpp,ErlOffHeap * ohp,Eterm * start,Eterm * end,char * lit_start,Uint lit_size)6420 area_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
6421 Eterm *start, Eterm *end,
6422 char *lit_start, Uint lit_size) {
6423 Eterm* p;
6424 Eterm val;
6425 Uint sz;
6426
6427 for (p = start; p < end; p++) {
6428 val = *p;
6429 switch (primary_tag(val)) {
6430 case TAG_PRIMARY_BOXED:
6431 case TAG_PRIMARY_LIST:
6432 if (ErtsInArea(val, lit_start, lit_size)) {
6433 sz = size_object(val);
6434 val = copy_struct(val, sz, hpp, ohp);
6435 *p = val;
6436 }
6437 break;
6438 case TAG_PRIMARY_HEADER:
6439 if (!header_is_transparent(val)) {
6440 Eterm* new_p;
6441 /* matchstate in message, not possible. */
6442 if (header_is_bin_matchstate(val)) {
6443 ErlBinMatchState *ms = (ErlBinMatchState*) p;
6444 ErlBinMatchBuffer *mb = &(ms->mb);
6445 if (ErtsInArea(mb->orig, lit_start, lit_size)) {
6446 sz = size_object(mb->orig);
6447 mb->orig = copy_struct(mb->orig, sz, hpp, ohp);
6448 }
6449 }
6450 new_p = p + thing_arityval(val);
6451 ASSERT(start <= new_p && new_p < end);
6452 p = new_p;
6453 }
6454 }
6455 }
6456 }
6457
6458 static void
send_cla_reply(Process * c_p,ErtsMessage * sig,Eterm to,Eterm req_id,Eterm result)6459 send_cla_reply(Process *c_p, ErtsMessage *sig, Eterm to,
6460 Eterm req_id, Eterm result)
6461 {
6462 Process *rp;
6463
6464 /*
6465 * The incoming signal is reused as reply message to
6466 * the requester. It has already been partially prepared.
6467 * Request id is already in place in the combined message
6468 * heap fragment and do not need to be copied.
6469 */
6470
6471 ASSERT(is_value(result) && is_immed(result));
6472 ASSERT(is_internal_pid(to));
6473 ASSERT(((Sint) sig->hfrag.alloc_size)
6474 - ((Sint) sig->hfrag.used_size)
6475 >= 4); /* Room for 3-tuple... */
6476
6477 sig->next = NULL;
6478 sig->data.attached = ERTS_MSG_COMBINED_HFRAG;
6479
6480 rp = erts_proc_lookup(to);
6481 if (!rp)
6482 erts_cleanup_messages(sig);
6483 else {
6484 Eterm rp_locks;
6485 Eterm *hp, reply_msg;
6486
6487 hp = &sig->hfrag.mem[0] + sig->hfrag.used_size;
6488 reply_msg = TUPLE3(hp, am_copy_literals, req_id, result);
6489 sig->hfrag.used_size += 4;
6490
6491 if (c_p == rp)
6492 rp_locks = ERTS_PROC_LOCK_MAIN;
6493 else
6494 rp_locks = 0;
6495
6496 erts_queue_proc_message(c_p, rp, rp_locks,
6497 sig, reply_msg);
6498 }
6499 }
6500
6501 static int
handle_cla(Process * c_p,ErtsMessage * sig,ErtsMessage *** next_nm_sig,int exiting)6502 handle_cla(Process *c_p,
6503 ErtsMessage *sig,
6504 ErtsMessage ***next_nm_sig,
6505 int exiting)
6506 {
6507 /*
6508 * TODO: Implement yielding support!
6509 */
6510 ErtsCLAData *cla;
6511 ErtsMessage *msg;
6512 ErtsLiteralArea *la;
6513 char *literals;
6514 Uint lit_bsize;
6515 int nmsgs, reds;
6516 Eterm result = am_ok;
6517 Uint64 cnt = 0;
6518
6519 cnt++;
6520
6521 cla = get_cla_data(sig);
6522 if (exiting) {
6523 /* signal already removed... */
6524 goto done;
6525 }
6526
6527 /*
6528 * If we need to perform a literal GC, all signals *must* already
6529 * have been handled before the GC. Note that only the message
6530 * queue (signals before this signal) needs to be scanned since the
6531 * request have been passed through the signal queue after we set up
6532 * the literal area to copy. No literals in the area of interest
6533 * can therefore occur behind this signal.
6534 */
6535
6536 la = ERTS_COPY_LITERAL_AREA();
6537 if (!la) {
6538 ASSERT(0);
6539 remove_nm_sig(c_p, sig, next_nm_sig);
6540 goto done;
6541 }
6542
6543 ASSERT(la);
6544
6545 literals = (char *) &la->start[0];
6546 lit_bsize = (char *) la->end - literals;
6547
6548 msg = c_p->sig_qs.first;
6549 if (!msg)
6550 msg = c_p->sig_qs.cont;
6551
6552 nmsgs = 0;
6553 while (msg != sig) {
6554 ASSERT(!!msg);
6555 nmsgs++;
6556 if (nmsgs >= ERTS_PROC_SIG_ADJ_MSGQ_MSGS_FACTOR) {
6557 cnt++;
6558 nmsgs = 0;
6559 }
6560 if (ERTS_SIG_IS_INTERNAL_MSG(msg)) {
6561 ErlHeapFragment *first_hfrag, *hf, **last_hfrag;
6562 int in_refs = 0, in_heap_frags = 0;
6563 Uint scanned = 0, lit_sz = 0;
6564
6565 /*
6566 * If a literal to copy is found in the message, we make
6567 * an explicit copy of it in a heap fragment and attach
6568 * that heap fragment to the messag. Each message needs
6569 * to be self contained, we cannot save the literal at
6570 * any other place than in the message itself.
6571 */
6572
6573 /*
6574 * Literals directly from message references should only
6575 * be able to appear in the first message reference, i.e.,
6576 * the message itself...
6577 */
6578 if (ErtsInArea(msg->m[0], literals, lit_bsize)) {
6579 in_refs++;
6580 lit_sz += size_object(msg->m[0]);
6581 }
6582
6583 #ifdef DEBUG
6584 {
6585 int i;
6586 for (i = 1; i < ERL_MESSAGE_REF_ARRAY_SZ; i++) {
6587 ASSERT(!ErtsInArea(msg->m[i], literals, lit_bsize));
6588 }
6589 }
6590 #endif
6591
6592 if (msg->data.attached == ERTS_MSG_COMBINED_HFRAG) {
6593 first_hfrag = &msg->hfrag;
6594 last_hfrag = &msg->hfrag.next;
6595 }
6596 else {
6597 first_hfrag = msg->data.heap_frag;
6598 last_hfrag = &msg->data.heap_frag;
6599 }
6600
6601 for (hf = first_hfrag; hf; hf = hf->next) {
6602 Uint sz = hf->used_size;
6603 Uint lsz = area_literal_size(&hf->mem[0],
6604 &hf->mem[sz],
6605 literals, lit_bsize);
6606 if (lsz)
6607 in_heap_frags++;
6608 lit_sz += lsz;
6609 scanned += sz;
6610 last_hfrag = &hf->next;
6611 }
6612
6613 cnt += scanned/ERTS_PROC_SIG_ADJ_MSGQ_SCAN_FACTOR;
6614
6615 if (lit_sz > 0) {
6616 ErlHeapFragment *new_hfrag = new_message_buffer(lit_sz);
6617 ErlOffHeap *ohp = &new_hfrag->off_heap;
6618 Eterm *hp = new_hfrag->mem;
6619
6620 if (in_refs) {
6621 if (ErtsInArea(msg->m[0], literals, lit_bsize)) {
6622 Uint sz = size_object(msg->m[0]);
6623 msg->m[0] = copy_struct(msg->m[0], sz, &hp, ohp);
6624 }
6625 }
6626
6627 if (in_heap_frags) {
6628
6629 for (hf = first_hfrag; hf; hf = hf->next) {
6630 area_literal_copy(&hp, ohp, &hf->mem[0],
6631 &hf->mem[hf->used_size],
6632 literals, lit_bsize);
6633 }
6634
6635 }
6636
6637 /* link new hfrag last */
6638 ASSERT(new_hfrag->used_size == hp - &new_hfrag->mem[0]);
6639 new_hfrag->next = NULL;
6640 ASSERT(!*last_hfrag);
6641 *last_hfrag = new_hfrag;
6642
6643 cnt += scanned/ERTS_PROC_SIG_ADJ_MSGQ_SCAN_FACTOR;
6644 cnt += lit_sz/ERTS_PROC_SIG_ADJ_MSGQ_COPY_FACTOR;
6645 }
6646 }
6647
6648 msg = msg->next;
6649 if (!msg)
6650 msg = c_p->sig_qs.cont;
6651 }
6652
6653 remove_nm_sig(c_p, sig, next_nm_sig);
6654
6655 reds = 0;
6656 if (erts_check_copy_literals_gc_need(c_p, &reds, literals, lit_bsize))
6657 result = am_need_gc;
6658
6659 cnt += reds * ERTS_SIG_REDS_CNT_FACTOR;
6660
6661 done:
6662
6663 send_cla_reply(c_p, sig, cla->requester, cla->request_id, result);
6664
6665 if (cnt > CONTEXT_REDS)
6666 return CONTEXT_REDS;
6667 return cnt;
6668 }
6669
6670 static int
handle_move_msgq_off_heap(Process * c_p,ErtsMessage * sig,ErtsMessage *** next_nm_sig,int exiting)6671 handle_move_msgq_off_heap(Process *c_p,
6672 ErtsMessage *sig,
6673 ErtsMessage ***next_nm_sig,
6674 int exiting)
6675 {
6676 /*
6677 * TODO: Implement yielding support!
6678 */
6679 ErtsMessage *msg;
6680 int nmsgs;
6681 Uint64 cnt = 0;
6682
6683 /*
6684 * This job was first initiated when the process changed to off heap
6685 * message queue management. ERTS_PSFLG_OFF_HEAP_MSGQ was set at
6686 * initiation and thread progress was made before this signal was
6687 * sent. That is, all signals after this signal already are off heap
6688 * and do not have to be inspected.
6689 *
6690 * The management state might, however, have been changed again
6691 * (multiple times) since initiation. The ERTS_PSFLG_OFF_HEAP_MSGQ has
6692 * however been set since the operation was first initiated. Check
6693 * users last requested state (the flags F_OFF_HEAP_MSGQ, and
6694 * F_ON_HEAP_MSGQ), and make the state consistent with that.
6695 */
6696
6697 cnt++;
6698
6699 if (exiting) {
6700 /* signal already removed from queue... */
6701 goto cleanup;
6702 }
6703
6704 ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
6705 ASSERT(c_p->sig_qs.flags & FS_OFF_HEAP_MSGQ_CHNG);
6706 ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ);
6707
6708 if (!(c_p->sig_qs.flags & FS_OFF_HEAP_MSGQ)) {
6709 /* Someone changed its mind... */
6710 erts_atomic32_read_band_nob(&c_p->state,
6711 ~ERTS_PSFLG_OFF_HEAP_MSGQ);
6712 goto done;
6713 }
6714
6715 msg = c_p->sig_qs.first;
6716 if (!msg)
6717 msg = c_p->sig_qs.cont;
6718
6719 nmsgs = 0;
6720 while (msg != sig) {
6721 ASSERT(!!msg);
6722 nmsgs++;
6723 if (nmsgs >= ERTS_PROC_SIG_ADJ_MSGQ_MSGS_FACTOR) {
6724 cnt++;
6725 nmsgs = 0;
6726 }
6727 if (ERTS_SIG_IS_INTERNAL_MSG(msg)
6728 && !msg->data.attached
6729 && ((is_not_immed(ERL_MESSAGE_TERM(msg))
6730 && !erts_is_literal(ERL_MESSAGE_TERM(msg),
6731 ptr_val(ERL_MESSAGE_TERM(msg))))
6732 #ifdef USE_VM_PROBES
6733 || is_not_immed(ERL_MESSAGE_DT_UTAG(msg))
6734 #endif
6735 || is_not_immed(ERL_MESSAGE_TOKEN(msg)))) {
6736 ErlHeapFragment *hfrag;
6737 Eterm *hp;
6738 ErlOffHeap *ohp;
6739 #ifdef SHCOPY_SEND
6740 erts_shcopy_t info;
6741 #else
6742 erts_literal_area_t litarea;
6743 #endif
6744 #ifdef USE_VM_PROBES
6745 Uint ut_sz = size_object(ERL_MESSAGE_DT_UTAG(msg));
6746 #endif
6747 Uint t_sz = size_object(ERL_MESSAGE_TOKEN(msg));
6748 Uint m_sz;
6749 Uint h_sz;
6750
6751 ASSERT(is_immed(ERL_MESSAGE_FROM(msg)));
6752 if (is_immed(ERL_MESSAGE_TERM(msg)))
6753 m_sz = 0;
6754 else {
6755 #ifdef SHCOPY_SEND
6756 INITIALIZE_SHCOPY(info);
6757 m_sz = copy_shared_calculate(ERL_MESSAGE_TERM(msg), &info);
6758 #else
6759 INITIALIZE_LITERAL_PURGE_AREA(litarea);
6760 m_sz = size_object_litopt(ERL_MESSAGE_TERM(msg), &litarea);
6761 #endif
6762 }
6763
6764 h_sz = m_sz + t_sz;
6765 #ifdef USE_VM_PROBES
6766 h_sz += ut_sz;
6767 #endif
6768 ASSERT(h_sz);
6769
6770 hfrag = new_message_buffer(h_sz);
6771 hp = hfrag->mem;
6772 ohp = &hfrag->off_heap;
6773
6774 if (is_not_immed(ERL_MESSAGE_TERM(msg))) {
6775 Eterm m = ERL_MESSAGE_TERM(msg);
6776 Eterm m_cpy;
6777 #ifdef SHCOPY_SEND
6778 m_cpy = copy_shared_perform(m, m_sz, &info, &hp, ohp);
6779 DESTROY_SHCOPY(info);
6780 #else
6781 m_cpy = copy_struct_litopt(m, m_sz, &hp, ohp, &litarea);
6782 #endif
6783 ERL_MESSAGE_TERM(msg) = m_cpy;
6784 }
6785 if (is_not_immed(ERL_MESSAGE_TOKEN(msg)))
6786 ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg),
6787 t_sz, &hp, ohp);
6788 #ifdef USE_VM_PROBES
6789 if (is_not_immed(ERL_MESSAGE_DT_UTAG(msg)))
6790 ERL_MESSAGE_DT_UTAG(msg) = copy_struct(ERL_MESSAGE_DT_UTAG(msg),
6791 ut_sz, &hp, ohp);
6792 #endif
6793 msg->data.heap_frag = hfrag;
6794 cnt += h_sz/ERTS_PROC_SIG_ADJ_MSGQ_COPY_FACTOR;
6795 }
6796
6797 msg = msg->next;
6798 if (!msg)
6799 msg = c_p->sig_qs.cont;
6800 }
6801
6802 done:
6803
6804 remove_nm_sig(c_p, sig, next_nm_sig);
6805
6806 cleanup:
6807
6808 sig->next = NULL;
6809 erts_cleanup_messages(sig);
6810
6811 c_p->sig_qs.flags &= ~FS_OFF_HEAP_MSGQ_CHNG;
6812
6813 if (cnt > CONTEXT_REDS)
6814 return CONTEXT_REDS;
6815 return cnt;
6816 }
6817
6818
6819 static int
handle_trace_change_state(Process * c_p,ErtsSigRecvTracing * tracing,Uint16 type,ErtsMessage * sig,ErtsMessage *** next_nm_sig)6820 handle_trace_change_state(Process *c_p,
6821 ErtsSigRecvTracing *tracing,
6822 Uint16 type,
6823 ErtsMessage *sig,
6824 ErtsMessage ***next_nm_sig)
6825 {
6826 ErtsSigTraceInfo *trace_info = (ErtsSigTraceInfo *) sig;
6827 ErtsMessage **next = *next_nm_sig;
6828 int msgs_active, old_msgs_active = !!tracing->messages.active;
6829
6830 ASSERT(sig == *next);
6831
6832 erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
6833
6834 ERTS_TRACE_FLAGS(c_p) |= trace_info->flags_on;
6835 ERTS_TRACE_FLAGS(c_p) &= ~trace_info->flags_off;
6836 if (is_value(trace_info->tracer))
6837 erts_tracer_replace(&c_p->common, trace_info->tracer);
6838
6839 erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
6840
6841 remove_nm_sig(c_p, sig, next_nm_sig);
6842 destroy_trace_info(trace_info);
6843 /*
6844 * Adjust tracing state according to modifications made by
6845 * the trace info signal...
6846 */
6847 adjust_tracing_state(c_p, tracing, 0);
6848 msgs_active = !!tracing->messages.active;
6849
6850 if (old_msgs_active ^ msgs_active) {
6851 if (msgs_active) {
6852 ASSERT(!tracing->messages.next);
6853 tracing->messages.next = next;
6854 }
6855 else {
6856 ASSERT(tracing->messages.next);
6857 tracing->messages.next = NULL;
6858 }
6859 }
6860
6861 ASSERT(!msgs_active || tracing->messages.next);
6862
6863 return msgs_active;
6864 }
6865
6866 static void
getting_unlinked(Process * c_p,Eterm unlinker)6867 getting_unlinked(Process *c_p, Eterm unlinker)
6868 {
6869 trace_proc(c_p, ERTS_PROC_LOCK_MAIN, c_p,
6870 am_getting_unlinked, unlinker);
6871 }
6872
6873 static void
getting_linked(Process * c_p,Eterm linker)6874 getting_linked(Process *c_p, Eterm linker)
6875 {
6876 trace_proc(c_p, ERTS_PROC_LOCK_MAIN, c_p,
6877 am_getting_linked, linker);
6878 }
6879
6880 static void
linking(Process * c_p,Eterm to)6881 linking(Process *c_p, Eterm to)
6882 {
6883 trace_proc(c_p, ERTS_PROC_LOCK_MAIN, c_p,
6884 am_link, to);
6885 }
6886
6887 static ERTS_INLINE void
handle_message_enqueued_tracing(Process * c_p,ErtsSigRecvTracing * tracing,ErtsMessage * msg)6888 handle_message_enqueued_tracing(Process *c_p,
6889 ErtsSigRecvTracing *tracing,
6890 ErtsMessage *msg)
6891 {
6892 ASSERT(ERTS_SIG_IS_INTERNAL_MSG(msg));
6893
6894 #if defined(USE_VM_PROBES)
6895 if (tracing->messages.vm_probes && DTRACE_ENABLED(message_queued)) {
6896 Sint tok_label = 0;
6897 Sint tok_lastcnt = 0;
6898 Sint tok_serial = 0;
6899 Sint len = erts_proc_sig_privqs_len(c_p);
6900 Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);
6901
6902 if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
6903 tok_label = SEQ_TRACE_T_DTRACE_LABEL(seq_trace_token);
6904 tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
6905 tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
6906 }
6907 /* Message intentionally not passed... */
6908 DTRACE6(message_queued,
6909 tracing->messages.receiver_name,
6910 size_object(ERL_MESSAGE_TERM(msg)),
6911 len, /* This is NOT message queue len, but its something... */
6912 tok_label, tok_lastcnt, tok_serial);
6913 }
6914 #endif
6915
6916 if (tracing->messages.receive_trace && tracing->messages.event->on) {
6917 ASSERT(IS_TRACED(c_p));
6918 trace_receive(c_p,
6919 ERL_MESSAGE_FROM(msg),
6920 ERL_MESSAGE_TERM(msg),
6921 tracing->messages.event);
6922 }
6923 }
6924
6925 static int
handle_msg_tracing(Process * c_p,ErtsSigRecvTracing * tracing,ErtsMessage *** next_nm_sig)6926 handle_msg_tracing(Process *c_p, ErtsSigRecvTracing *tracing,
6927 ErtsMessage ***next_nm_sig)
6928 {
6929 ErtsMessage **next_sig, *sig;
6930 int cnt = 0, limit = ERTS_PROC_SIG_TRACE_COUNT_LIMIT;
6931
6932 ASSERT(tracing->messages.next);
6933 next_sig = tracing->messages.next;
6934 sig = *next_sig;
6935
6936 if (!sig) {
6937 ASSERT(!*next_nm_sig);
6938 return 1; /* end... */
6939 }
6940
6941 if (ERTS_SIG_IS_RECV_MARKER(sig) && ((ErtsRecvMarker *) sig)->in_msgq) {
6942 /*
6943 * Skip already handled receive marker that just entered
6944 * the message queue...
6945 */
6946 next_sig = &sig->next;
6947 sig = *next_sig;
6948 ASSERT(!sig || !ERTS_SIG_IS_RECV_MARKER(sig)
6949 || !((ErtsRecvMarker *) sig)->in_msgq);
6950 }
6951
6952 /*
6953 * Receive tracing active. Handle all messages
6954 * until next non-message signal...
6955 */
6956
6957 while (sig && ERTS_SIG_IS_MSG(sig)) {
6958 if (cnt > limit) {
6959 tracing->messages.next = next_sig;
6960 return -1; /* Yield... */
6961 }
6962 if (ERTS_SIG_IS_EXTERNAL_MSG(sig)) {
6963 cnt += 50; /* Decode is expensive... */
6964 if (!erts_proc_sig_decode_dist(c_p, ERTS_PROC_LOCK_MAIN,
6965 sig, 0)) {
6966 /* Bad dist message; remove it... */
6967 remove_mq_m_sig(c_p, sig, next_sig, next_nm_sig);
6968 sig->next = NULL;
6969 erts_cleanup_messages(sig);
6970 sig = *next_sig;
6971 continue;
6972 }
6973 }
6974 handle_message_enqueued_tracing(c_p, tracing, sig);
6975 cnt++;
6976
6977 next_sig = &(*next_sig)->next;
6978 sig = *next_sig;
6979 }
6980
6981 tracing->messages.next = next_sig;
6982
6983 if (!sig) {
6984 ASSERT(!*next_nm_sig);
6985 return 1; /* end... */
6986 }
6987
6988 ASSERT(*next_nm_sig);
6989 ASSERT(**next_nm_sig == sig);
6990
6991 /* Next signal a non-message signal... */
6992 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
6993
6994 /*
6995 * Return and handle the non-message signal...
6996 */
6997
6998 return 0;
6999 }
7000
7001 static void
handle_missing_spawn_reply(Process * c_p,ErtsMonitor * omon)7002 handle_missing_spawn_reply(Process *c_p, ErtsMonitor *omon)
7003 {
7004 ErtsMonitorData *mdp;
7005 ErtsMonitorDataExtended *mdep;
7006 erts_dsprintf_buf_t *dsbufp;
7007 Eterm nodename;
7008 DistEntry *dep;
7009
7010 /* Terminate connection to the node and report it... */
7011
7012 if (omon->type != ERTS_MON_TYPE_DIST_PROC)
7013 ERTS_INTERNAL_ERROR("non-distributed missing spawn_reply");
7014
7015 mdp = erts_monitor_to_data(omon);
7016 ASSERT(mdp->origin.flags & ERTS_ML_FLG_EXTENDED);
7017 mdep = (ErtsMonitorDataExtended *) mdp;
7018 ASSERT(mdep->dist);
7019 nodename = mdep->dist->nodename;
7020 ASSERT(is_atom(nodename));
7021
7022 dep = erts_find_dist_entry(nodename);
7023 if (dep)
7024 erts_kill_dist_connection(dep, mdep->dist->connection_id);
7025
7026 dsbufp = erts_create_logger_dsbuf();
7027 erts_dsprintf(dsbufp,
7028 "Missing 'spawn_reply' signal from the node %T "
7029 "detected by %T on the node %T. The node %T "
7030 "probably suffers from the bug with ticket id "
7031 "OTP-17737.",
7032 nodename, c_p->common.id,
7033 erts_this_dist_entry->sysname, nodename);
7034 erts_send_error_to_logger_nogl(dsbufp);
7035 }
7036
7037 Uint
erts_proc_sig_prep_msgq_for_inspection(Process * c_p,Process * rp,ErtsProcLocks rp_locks,int info_on_self,ErtsMessageInfo * mip)7038 erts_proc_sig_prep_msgq_for_inspection(Process *c_p,
7039 Process *rp,
7040 ErtsProcLocks rp_locks,
7041 int info_on_self,
7042 ErtsMessageInfo *mip)
7043 {
7044 Uint tot_heap_size;
7045 ErtsMessage *mp, **mpp;
7046 Sint i;
7047 int self_on_heap;
7048
7049 /*
7050 * Prepare the message queue (inner signal queue)
7051 * for inspection by process_info().
7052 *
7053 * - Decode all messages on external format
7054 * - Remove all corrupt dist messages from queue
7055 * - Save pointer to, and heap size need of each
7056 * message in the mip array.
7057 * - Return total heap size need for all messages
7058 * that needs to be copied.
7059 *
7060 */
7061
7062 ASSERT(!info_on_self || c_p == rp);
7063
7064 self_on_heap = info_on_self && !(c_p->sig_qs.flags & FS_OFF_HEAP_MSGQ);
7065
7066 tot_heap_size = 0;
7067 i = 0;
7068 mpp = &rp->sig_qs.first;
7069 mp = rp->sig_qs.first;
7070 while (mp) {
7071 Eterm msg = ERL_MESSAGE_TERM(mp);
7072
7073 if (msg != ERTS_RECV_MARKER_TAG) {
7074
7075 mip[i].size = 0;
7076
7077 if (ERTS_SIG_IS_EXTERNAL_MSG(mp)) {
7078 /* decode it... */
7079 if (!erts_proc_sig_decode_dist(rp, rp_locks, mp, !0)) {
7080 ErtsMessage *bad_mp = mp;
7081 /*
7082 * Bad distribution message; remove
7083 * it from the queue...
7084 */
7085
7086 ASSERT(*mpp == bad_mp);
7087
7088 remove_iq_m_sig(rp, mp, mpp);
7089
7090 mp = *mpp;
7091
7092 bad_mp->next = NULL;
7093 erts_cleanup_messages(bad_mp);
7094 continue;
7095 }
7096
7097 msg = ERL_MESSAGE_TERM(mp);
7098 }
7099
7100 ASSERT(is_value(msg));
7101
7102 if (is_not_immed(msg) && (!self_on_heap || mp->data.attached)) {
7103 Uint sz = size_object(msg);
7104 mip[i].size = sz;
7105 tot_heap_size += sz;
7106 }
7107
7108 mip[i].msgp = mp;
7109 i++;
7110 }
7111
7112 mpp = &mp->next;
7113 mp = mp->next;
7114 }
7115
7116 ASSERT(c_p->sig_qs.len == i);
7117
7118 return tot_heap_size;
7119 }
7120
7121 static ERTS_INLINE void
move_msg_to_heap(Process * c_p,ErtsMessage * mp)7122 move_msg_to_heap(Process *c_p, ErtsMessage *mp)
7123 {
7124 /*
7125 * We leave not yet decoded distribution messages
7126 * as they are in the queue since it is not
7127 * possible to determine a maximum size until
7128 * actual decoding...
7129 *
7130 * We also leave combined messages as they are...
7131 */
7132 if (ERTS_SIG_IS_INTERNAL_MSG(mp)
7133 && mp->data.attached
7134 && mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
7135 ErlHeapFragment *bp;
7136
7137 bp = erts_message_to_heap_frag(mp);
7138
7139 if (bp->next)
7140 erts_move_multi_frags(&c_p->htop, &c_p->off_heap, bp,
7141 mp->m, ERL_MESSAGE_REF_ARRAY_SZ, 0);
7142 else
7143 erts_copy_one_frag(&c_p->htop, &c_p->off_heap, bp,
7144 mp->m, ERL_MESSAGE_REF_ARRAY_SZ);
7145
7146 mp->data.heap_frag = NULL;
7147 free_message_buffer(bp);
7148 }
7149 }
7150
7151 void
erts_proc_sig_move_msgs_to_heap(Process * c_p)7152 erts_proc_sig_move_msgs_to_heap(Process *c_p)
7153 {
7154 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
7155
7156 ERTS_FOREACH_SIG_PRIVQS(c_p, sig, move_msg_to_heap(c_p, sig));
7157
7158 ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
7159 }
7160
7161
7162 BIF_RETTYPE
erts_internal_dirty_process_handle_signals_1(BIF_ALIST_1)7163 erts_internal_dirty_process_handle_signals_1(BIF_ALIST_1)
7164 {
7165 erts_aint32_t state, dirty, noproc;
7166 int busy;
7167 Process *rp;
7168
7169 if (BIF_P != erts_dirty_process_signal_handler
7170 && BIF_P != erts_dirty_process_signal_handler_high
7171 && BIF_P != erts_dirty_process_signal_handler_max)
7172 BIF_ERROR(BIF_P, EXC_NOTSUP);
7173
7174 if (is_not_internal_pid(BIF_ARG_1))
7175 BIF_RET(am_false);
7176
7177 rp = erts_proc_lookup_raw(BIF_ARG_1);
7178 if (!rp)
7179 BIF_RET(am_noproc);
7180
7181 state = erts_atomic32_read_nob(&rp->state);
7182 dirty = (state & ERTS_PSFLG_DIRTY_RUNNING);
7183 /*
7184 * Ignore ERTS_PSFLG_DIRTY_RUNNING_SYS (see
7185 * comment in erts_execute_dirty_system_task()
7186 * in erl_process.c).
7187 */
7188 if (!dirty)
7189 BIF_RET(am_normal);
7190
7191 busy = erts_proc_trylock(rp, ERTS_PROC_LOCK_MAIN) == EBUSY;
7192
7193 state = erts_atomic32_read_mb(&rp->state);
7194 noproc = (state & ERTS_PSFLG_FREE);
7195 dirty = (state & ERTS_PSFLG_DIRTY_RUNNING);
7196
7197 if (busy) {
7198 if (noproc)
7199 BIF_RET(am_noproc);
7200 if (dirty)
7201 BIF_RET(am_more); /* try again... */
7202 BIF_RET(am_normal); /* will handle signals itself... */
7203 }
7204 else {
7205 erts_aint32_t state;
7206 int done;
7207 Eterm res = am_false;
7208 int reds = 0;
7209
7210 if (noproc)
7211 res = am_noproc;
7212 else if (!dirty)
7213 res = am_normal; /* will handle signals itself... */
7214 else if (rp->sig_qs.flags & FS_HANDLING_SIGS)
7215 res = am_busy;
7216 else {
7217 reds = ERTS_BIF_REDS_LEFT(BIF_P);
7218 done = erts_proc_sig_handle_incoming(rp, &state, &reds,
7219 reds, 0);
7220 if (done || (state & ERTS_PSFLG_EXITING))
7221 res = am_ok;
7222 else
7223 res = am_more;
7224 }
7225
7226 erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
7227
7228 if (reds)
7229 BUMP_REDS(BIF_P, reds);
7230
7231 BIF_RET(res);
7232 }
7233 }
7234
7235 /* Cleanup */
7236
7237 void
erts_proc_sig_cleanup_queues(Process * c_p)7238 erts_proc_sig_cleanup_queues(Process *c_p)
7239 {
7240 ErtsMessage *queues[] = {
7241 c_p->sig_qs.first, /* Message queue (inner signal queue) */
7242 c_p->sig_qs.cont /* Private signal queue (middle signal queue) */
7243 };
7244 int i;
7245
7246 for (i = 0; i < sizeof(queues)/sizeof(queues[0]); i++) {
7247 ErtsMessage *sig = queues[i];
7248 while (sig) {
7249 ErtsMessage *free_sig = sig;
7250 sig = sig->next;
7251 if (ERTS_SIG_IS_RECV_MARKER(free_sig))
7252 recv_marker_deallocate(c_p, (ErtsRecvMarker *) free_sig);
7253 else {
7254 free_sig->next = NULL;
7255 erts_cleanup_messages(free_sig);
7256 }
7257 }
7258 }
7259
7260 #ifdef DEBUG
7261 /*
7262 * External signal queue (outer signal queue)
7263 * should already have been taken care of...
7264 */
7265 erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
7266 ASSERT(!c_p->sig_inq.first);
7267 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
7268 #endif
7269 }
7270
7271 /* Debug */
7272
7273 static void
wait_handle_signals(Process * c_p)7274 wait_handle_signals(Process *c_p)
7275 {
7276 /*
7277 * Process needs to wait on a dirty process signal
7278 * handler before it can handle signals by itself...
7279 *
7280 * This should be a quite rare event. This only occurs
7281 * when all of the following occurs:
7282 * * The process is executing dirty and receives a
7283 * signal.
7284 * * A dirty process signal handler starts handling
7285 * signals for the process and unlocks the main
7286 * lock while doing so. This can currently only
7287 * occur if handling an 'unlink' signal from a port.
7288 * * While the dirty process signal handler is handling
7289 * signals for the process, the process stops executing
7290 * dirty, gets scheduled on a normal scheduler, and
7291 * then tries to handle signals itself.
7292 *
7293 * If the above happens, the normal sceduler executing
7294 * the process will wait here until the dirty process
7295 * signal handler is done with the process...
7296 */
7297 erts_tse_t *event;
7298
7299 ASSERT(c_p = erts_get_current_process());
7300 ASSERT(c_p->scheduler_data);
7301 ASSERT(c_p->scheduler_data->aux_work_data.ssi);
7302 ASSERT(c_p->scheduler_data->aux_work_data.ssi->event);
7303 ASSERT(c_p->sig_qs.flags & FS_HANDLING_SIGS);
7304 ASSERT(!(c_p->sig_qs.flags & FS_WAIT_HANDLE_SIGS));
7305
7306 event = c_p->scheduler_data->aux_work_data.ssi->event;
7307 c_p->sig_qs.flags |= FS_WAIT_HANDLE_SIGS;
7308
7309 erts_tse_use(event);
7310
7311 do {
7312 ASSERT(c_p->sig_qs.flags & FS_WAIT_HANDLE_SIGS);
7313 erts_tse_reset(event);
7314 erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
7315 erts_tse_wait(event);
7316 erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
7317 } while (c_p->sig_qs.flags & FS_HANDLING_SIGS);
7318
7319 erts_tse_return(event);
7320
7321 c_p->sig_qs.flags &= ~FS_WAIT_HANDLE_SIGS;
7322 c_p->sig_qs.flags |= FS_HANDLING_SIGS;
7323 }
7324
7325 static void
wake_handle_signals(Process * proc)7326 wake_handle_signals(Process *proc)
7327 {
7328 /*
7329 * Wake scheduler sleeping in wait_handle_signals()
7330 * (above)...
7331 *
7332 * This function should only be called by a dirty process
7333 * signal handler process...
7334 */
7335 #ifdef DEBUG
7336 Process *c_p = erts_get_current_process();
7337 ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(proc));
7338 ASSERT(proc->sig_qs.flags & FS_WAIT_HANDLE_SIGS);
7339 ERTS_ASSERT(c_p == erts_dirty_process_signal_handler_max
7340 || c_p == erts_dirty_process_signal_handler_high
7341 || erts_dirty_process_signal_handler);
7342 ASSERT(proc->scheduler_data);
7343 ASSERT(proc->scheduler_data->aux_work_data.ssi);
7344 ASSERT(proc->scheduler_data->aux_work_data.ssi->event);
7345 #endif
7346
7347 proc->sig_qs.flags &= ~FS_HANDLING_SIGS;
7348 erts_tse_set(proc->scheduler_data->aux_work_data.ssi->event);
7349 }
7350
7351 static void
debug_foreach_sig_heap_frags(ErlHeapFragment * hfrag,void (* oh_func)(ErlOffHeap *,void *),void * arg)7352 debug_foreach_sig_heap_frags(ErlHeapFragment *hfrag,
7353 void (*oh_func)(ErlOffHeap *, void *),
7354 void *arg)
7355 {
7356 ErlHeapFragment *hf = hfrag;
7357 while (hf) {
7358 oh_func(&(hf->off_heap), arg);
7359 hf = hf->next;
7360 }
7361 }
7362
7363 static void
debug_foreach_sig_fake_oh(Eterm term,void (* oh_func)(ErlOffHeap *,void *),void * arg)7364 debug_foreach_sig_fake_oh(Eterm term,
7365 void (*oh_func)(ErlOffHeap *, void *),
7366 void *arg)
7367 {
7368 if (is_external(term)) {
7369 ErlOffHeap oh;
7370 oh.overhead = 0;
7371 oh.first = ((struct erl_off_heap_header *)
7372 (char *) external_thing_ptr(term));
7373 ASSERT(!oh.first->next);
7374 oh_func(&oh, arg);
7375 }
7376
7377 }
7378
7379 static void
debug_foreach_sig_external(ErtsMessage * msgp,void (* ext_func)(ErtsDistExternal *,void *),void * arg)7380 debug_foreach_sig_external(ErtsMessage *msgp,
7381 void (*ext_func)(ErtsDistExternal *, void *),
7382 void *arg)
7383 {
7384 ext_func(erts_proc_sig_get_external(msgp), arg);
7385 }
7386
7387 void
erts_proc_sig_debug_foreach_sig(Process * c_p,void (* msg_func)(ErtsMessage *,void *),void (* oh_func)(ErlOffHeap *,void *),ErtsMonitorFunc mon_func,ErtsLinkFunc lnk_func,void (* ext_func)(ErtsDistExternal *,void *),void * arg)7388 erts_proc_sig_debug_foreach_sig(Process *c_p,
7389 void (*msg_func)(ErtsMessage *, void *),
7390 void (*oh_func)(ErlOffHeap *, void *),
7391 ErtsMonitorFunc mon_func,
7392 ErtsLinkFunc lnk_func,
7393 void (*ext_func)(ErtsDistExternal *, void *),
7394 void *arg)
7395 {
7396 ErtsMessage *queue[] = {c_p->sig_qs.first, c_p->sig_qs.cont, c_p->sig_inq.first};
7397 int qix;
7398
7399 for (qix = 0; qix < sizeof(queue)/sizeof(queue[0]); qix++) {
7400 ErtsMessage *sig;
7401 for (sig = queue[qix]; sig; sig = sig->next) {
7402
7403 if (ERTS_SIG_IS_MSG(sig)) {
7404 msg_func(sig, arg);
7405 } else {
7406 Eterm tag;
7407 Uint16 type;
7408 int op;
7409
7410 ASSERT(sig);
7411 ASSERT(ERTS_SIG_IS_NON_MSG(sig));
7412
7413 tag = ((ErtsSignal *) sig)->common.tag;
7414 type = ERTS_PROC_SIG_TYPE(tag);
7415 op = ERTS_PROC_SIG_OP(tag);
7416
7417 switch (op) {
7418
7419 case ERTS_SIG_Q_OP_EXIT:
7420 case ERTS_SIG_Q_OP_EXIT_LINKED:
7421 case ERTS_SIG_Q_OP_MONITOR_DOWN:
7422 switch (type) {
7423 case ERTS_SIG_Q_TYPE_GEN_EXIT:
7424 if (!ERTS_SIG_IS_GEN_EXIT_EXTERNAL(sig))
7425 debug_foreach_sig_heap_frags(&sig->hfrag, oh_func, arg);
7426 else {
7427 oh_func(&sig->hfrag.off_heap, arg);
7428 debug_foreach_sig_external(sig, ext_func, arg);
7429 }
7430 break;
7431 case ERTS_LNK_TYPE_PORT:
7432 case ERTS_LNK_TYPE_PROC:
7433 case ERTS_LNK_TYPE_DIST_PROC:
7434 lnk_func((ErtsLink *) sig, arg, -1);
7435 break;
7436 case ERTS_MON_TYPE_PORT:
7437 case ERTS_MON_TYPE_PROC:
7438 case ERTS_MON_TYPE_DIST_PROC:
7439 case ERTS_MON_TYPE_NODE:
7440 mon_func((ErtsMonitor *) sig, arg, -1);
7441 break;
7442 default:
7443 ERTS_INTERNAL_ERROR("Unexpected sig type");
7444 break;
7445 }
7446 break;
7447
7448 case ERTS_SIG_Q_OP_ADJ_MSGQ:
7449 if (type == ERTS_SIG_Q_TYPE_OFF_HEAP)
7450 break;
7451 /* Fall through... */
7452 case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
7453 case ERTS_SIG_Q_OP_ALIAS_MSG:
7454 debug_foreach_sig_heap_frags(&sig->hfrag, oh_func, arg);
7455 break;
7456
7457 case ERTS_SIG_Q_OP_DEMONITOR:
7458 if (type == ERTS_SIG_Q_TYPE_DIST_PROC_DEMONITOR) {
7459 debug_foreach_sig_fake_oh(((ErtsSigDistProcDemonitor *) sig)->ref,
7460 oh_func, arg);
7461 break;
7462 }
7463 /* Fall through... */
7464
7465 case ERTS_SIG_Q_OP_MONITOR:
7466 mon_func((ErtsMonitor *) sig, arg, -1);
7467 break;
7468
7469 case ERTS_SIG_Q_OP_UNLINK:
7470 if (type == ERTS_SIG_Q_TYPE_DIST_LINK) {
7471 debug_foreach_sig_fake_oh(((ErtsSigDistUnlinkOp *) sig)->remote,
7472 oh_func, arg);
7473 }
7474 break;
7475
7476 case ERTS_SIG_Q_OP_UNLINK_ACK:
7477 break;
7478
7479 case ERTS_SIG_Q_OP_LINK:
7480 lnk_func((ErtsLink *) sig, arg, -1);
7481 break;
7482
7483 case ERTS_SIG_Q_OP_GROUP_LEADER: {
7484 ErtsSigGroupLeader *sgl = (ErtsSigGroupLeader *) sig;
7485 oh_func(&sgl->oh, arg);
7486 break;
7487 }
7488
7489 case ERTS_SIG_Q_OP_IS_ALIVE:
7490 case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
7491 case ERTS_SIG_Q_OP_PROCESS_INFO:
7492 case ERTS_SIG_Q_OP_RECV_MARK:
7493 break;
7494
7495 default:
7496 ERTS_INTERNAL_ERROR("Unknown signal");
7497 break;
7498 }
7499
7500 }
7501 }
7502 }
7503 }
7504
7505 #ifdef ERTS_PROC_SIG_HARD_DEBUG_RECV_MARKER
7506
7507 void
erl_proc_sig_hdbg_chk_recv_marker_block(Process * c_p)7508 erl_proc_sig_hdbg_chk_recv_marker_block(Process *c_p)
7509 {
7510 int ix, used, unused, free;
7511 ErtsRecvMarkerBlock *blkp = c_p->sig_qs.recv_mrk_blk;
7512 #ifdef ERTS_SUPPORT_OLD_RECV_MARK_INSTRS
7513 int old_recv_marker = 0;
7514 #endif
7515 if (!blkp)
7516 return;
7517
7518 unused = used = 0;
7519 ix = blkp->used_ix;
7520 ERTS_ASSERT(0 <= ix && ix < ERTS_RECV_MARKER_BLOCK_SIZE);
7521
7522 do {
7523 int pix, nix;
7524 ErtsRecvMarker *markp = &blkp->marker[ix];
7525 Eterm ref = blkp->ref[ix];
7526
7527 ERTS_ASSERT(is_internal_ref(ref)
7528 || is_small(ref)
7529 || is_big(ref)
7530 || ref == am_undefined
7531 || is_nil(ref));
7532
7533 #ifdef ERTS_SUPPORT_OLD_RECV_MARK_INSTRS
7534 if (ref == erts_old_recv_marker_id) {
7535 ERTS_ASSERT(blkp->old_recv_marker_ix == ix);
7536 old_recv_marker++;
7537 }
7538 #endif
7539
7540 if (ref == am_undefined)
7541 unused++;
7542
7543 ASSERT(markp->used);
7544
7545 pix = markp->prev_ix;
7546 nix = markp->next_ix;
7547
7548 ERTS_ASSERT(0 <= pix && pix < ERTS_RECV_MARKER_BLOCK_SIZE);
7549 ERTS_ASSERT(0 <= nix && nix < ERTS_RECV_MARKER_BLOCK_SIZE);
7550 ERTS_ASSERT(blkp->marker[pix].next_ix == ix);
7551 ERTS_ASSERT(blkp->marker[nix].prev_ix == ix);
7552
7553 used++;
7554 ERTS_ASSERT(used <= ERTS_RECV_MARKER_BLOCK_SIZE);
7555
7556 ix = nix;
7557 } while (ix != blkp->used_ix);
7558
7559 ERTS_ASSERT(unused == blkp->unused);
7560
7561 free = 0;
7562
7563 ix = blkp->free_ix;
7564 if (ix >= 0) {
7565 ERTS_ASSERT(ix < ERTS_RECV_MARKER_BLOCK_SIZE);
7566
7567 do {
7568 Eterm ref = blkp->ref[ix];
7569 ERTS_ASSERT(ref == am_free);
7570 ASSERT(!blkp->marker[ix].used);
7571 free++;
7572 ERTS_ASSERT(free < ERTS_RECV_MARKER_BLOCK_SIZE);
7573 ix = blkp->marker[ix].next_ix;
7574 } while (ix >= 0);
7575 }
7576
7577 ERTS_ASSERT(used + free == ERTS_RECV_MARKER_BLOCK_SIZE);
7578
7579 ERTS_ASSERT(old_recv_marker == 0 || old_recv_marker == 1);
7580 }
7581
7582 #endif /* ERTS_PROC_SIG_HARD_DEBUG_RECV_MARKER */
7583
7584
7585 #ifdef ERTS_PROC_SIG_HARD_DEBUG
7586
7587 static void
chk_eterm(Process * c_p,int privq,ErtsMessage * mp,Eterm term)7588 chk_eterm(Process *c_p, int privq, ErtsMessage *mp, Eterm term)
7589 {
7590 ErlHeapFragment *bp;
7591 Eterm *ptr = NULL;
7592
7593 switch (primary_tag(term)) {
7594 case TAG_PRIMARY_IMMED1:
7595 return;
7596 case TAG_PRIMARY_LIST:
7597 ptr = list_val(term);
7598 ERTS_ASSERT(!is_header(CAR(ptr)));
7599 ERTS_ASSERT(!is_header(CDR(ptr)));
7600 break;
7601 case TAG_PRIMARY_BOXED:
7602 ptr = boxed_val(term);
7603 ERTS_ASSERT(is_header(*ptr));
7604 break;
7605 case TAG_PRIMARY_HEADER:
7606 default:
7607 ERTS_INTERNAL_ERROR("Not valid term");
7608 break;
7609 }
7610
7611 if (erts_is_literal(term, ptr))
7612 return;
7613
7614 for (bp = erts_message_to_heap_frag(mp); bp; bp = bp->next) {
7615 if (bp->mem <= ptr && ptr < bp->mem + bp->used_size)
7616 return;
7617 }
7618
7619 ASSERT(erts_dbg_within_proc(ptr, c_p, NULL));
7620 }
7621
7622 static Sint
proc_sig_hdbg_check_queue(Process * proc,int privq,ErtsMessage ** sig_next,ErtsMessage ** sig_last,ErtsMessage ** sig_nm_next,ErtsMessage ** sig_nm_last,ErtsSigRecvTracing * tracing,int * found_set_save_recv_marker_p,erts_aint32_t sig_psflg)7623 proc_sig_hdbg_check_queue(Process *proc,
7624 int privq,
7625 ErtsMessage **sig_next,
7626 ErtsMessage **sig_last,
7627 ErtsMessage **sig_nm_next,
7628 ErtsMessage **sig_nm_last,
7629 ErtsSigRecvTracing *tracing,
7630 int *found_set_save_recv_marker_p,
7631 erts_aint32_t sig_psflg)
7632 {
7633 ErtsMessage **next, *sig, **nm_next, **nm_last;
7634 int last_nm_sig_found, nm_sigs = 0, found_next_trace = 0,
7635 found_save = 0, last_sig_found = 0, recv_marker = 0,
7636 recv_marker_set_save = 0;
7637 Sint msg_len = 0;
7638 ErtsMessage **next_trace = tracing ? tracing->messages.next : NULL;
7639 ErtsMessage **save = proc->sig_qs.save;
7640
7641 if (!privq) {
7642 ErtsSignal *sig = (ErtsSignal *) *sig_next;
7643 if (sig->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK) {
7644
7645 }
7646 }
7647
7648 nm_next = sig_nm_next;
7649 nm_last = sig_nm_last;
7650 next = sig_next;
7651 sig = *sig_next;
7652
7653 last_nm_sig_found = !nm_last;
7654 if (last_nm_sig_found)
7655 ERTS_ASSERT(!nm_next);
7656 else
7657 ERTS_ASSERT(nm_next);
7658
7659 while (1) {
7660 ErtsSignal *nm_sig;
7661
7662 if (next == sig_last) {
7663 ASSERT(!*next);
7664 last_sig_found = 1;
7665 }
7666
7667 if (next == save)
7668 found_save = 1;
7669
7670 if (next == next_trace) {
7671 found_next_trace = 1;
7672 ERTS_ASSERT(nm_sigs == 0);
7673 }
7674
7675 while (sig
7676 && (ERTS_SIG_IS_MSG(sig)
7677 || (ERTS_SIG_DBG_IS_HANDLED_RECV_MARKER(sig)))) {
7678 int i;
7679 if (ERTS_SIG_IS_RECV_MARKER(sig)) {
7680 ErtsRecvMarker *markp = (ErtsRecvMarker *) sig;
7681 recv_marker++;
7682 ASSERT(!markp->set_save);
7683 ERTS_ASSERT(next == markp->prev_next);
7684 }
7685 else {
7686 if (ERTS_SIG_IS_EXTERNAL_MSG(sig))
7687 i = 1;
7688 else
7689 i = 0;
7690 for (; i < ERL_MESSAGE_REF_ARRAY_SZ; i++)
7691 chk_eterm(proc, privq, sig, sig->m[i]);
7692 msg_len++;
7693 }
7694
7695 next = &sig->next;
7696 sig = sig->next;
7697
7698 if (next == sig_last) {
7699 ASSERT(!*next);
7700 last_sig_found = 1;
7701 }
7702
7703 if (next == save)
7704 found_save = 1;
7705
7706 if (next == next_trace) {
7707 found_next_trace = 1;
7708 ERTS_ASSERT(nm_sigs == 0);
7709 }
7710 }
7711
7712 if (!sig)
7713 break;
7714
7715 if (ERTS_SIG_IS_RECV_MARKER(sig)) {
7716 ErtsRecvMarker *markp = (ErtsRecvMarker *) sig;
7717 ErtsRecvMarkerBlock *blkp = proc->sig_qs.recv_mrk_blk;
7718 ERTS_ASSERT(blkp);
7719 recv_marker++;
7720 if (markp->set_save) {
7721 ERTS_ASSERT(blkp->pending_set_save_ix
7722 == ERTS_RECV_MARKER_IX__(blkp, markp));
7723 recv_marker_set_save++;
7724 }
7725 if (privq < 0)
7726 ERTS_ASSERT(next == markp->prev_next);
7727 }
7728
7729 nm_sig = (ErtsSignal *) sig;
7730
7731 if (nm_sig->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK) {
7732 ERTS_ASSERT(!privq);
7733 ERTS_ASSERT(sig == *sig_next);
7734 }
7735 else {
7736 nm_sigs++;
7737
7738 ERTS_ASSERT(!last_nm_sig_found);
7739 ERTS_ASSERT(ERTS_SIG_IS_NON_MSG(sig));
7740
7741 ERTS_ASSERT(nm_next == next);
7742
7743 if (nm_last == next) {
7744 ASSERT(!nm_sig->common.specific.next);
7745 last_nm_sig_found = 1;
7746 }
7747
7748 nm_next = nm_sig->common.specific.next;
7749
7750 }
7751
7752 next = &nm_sig->common.next;
7753 sig = nm_sig->common.next;
7754
7755 }
7756
7757 if (found_set_save_recv_marker_p)
7758 (*found_set_save_recv_marker_p) += recv_marker_set_save;
7759
7760 if (!privq) {
7761 /* outer queue */
7762 ERTS_ASSERT(!found_save);
7763 ERTS_ASSERT(!recv_marker);
7764 }
7765 else if (privq > 0) {
7766 /* middle queue */
7767 ERTS_ASSERT(!next_trace || found_next_trace);
7768 ERTS_ASSERT(!found_save);
7769 }
7770 else {
7771 /* inner queue */
7772 ERTS_ASSERT(!found_next_trace);
7773 ERTS_ASSERT(nm_sigs == 0);
7774 ERTS_ASSERT(found_save);
7775 }
7776
7777 ERTS_ASSERT(last_nm_sig_found);
7778 ERTS_ASSERT(last_sig_found);
7779
7780 if (sig_psflg != ERTS_PSFLG_FREE) {
7781 erts_aint32_t state = erts_atomic32_read_nob(&proc->state);
7782 ERTS_ASSERT(nm_sigs ? !!(state & sig_psflg) : !(state & sig_psflg));
7783 }
7784
7785 return msg_len;
7786 }
7787
7788 void
erts_proc_sig_hdbg_check_priv_queue(Process * p,int qlock,char * what,char * file,int line)7789 erts_proc_sig_hdbg_check_priv_queue(Process *p, int qlock, char *what, char *file, int line)
7790 {
7791 int found_set_save_recv_marker = 0;
7792 Sint len, len1, len2;
7793 ErtsRecvMarkerBlock *blkp = p->sig_qs.recv_mrk_blk;
7794
7795 ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
7796 || ERTS_PROC_IS_EXITING(p)
7797 || (ERTS_PROC_LOCK_MAIN
7798 & erts_proc_lc_my_proc_locks(p)));
7799 len1 = proc_sig_hdbg_check_queue(p,
7800 -1,
7801 &p->sig_qs.first,
7802 p->sig_qs.last,
7803 NULL,
7804 NULL,
7805 NULL,
7806 &found_set_save_recv_marker,
7807 ERTS_PSFLG_FREE);
7808 len2 = proc_sig_hdbg_check_queue(p,
7809 1,
7810 &p->sig_qs.cont,
7811 p->sig_qs.cont_last,
7812 p->sig_qs.nmsigs.next,
7813 p->sig_qs.nmsigs.last,
7814 NULL,
7815 &found_set_save_recv_marker,
7816 ERTS_PSFLG_SIG_Q);
7817 ERTS_ASSERT(found_set_save_recv_marker == 1
7818 || found_set_save_recv_marker == 0);
7819 ERTS_ASSERT(found_set_save_recv_marker || !blkp || blkp->pending_set_save_ix < 0);
7820 ERTS_ASSERT(!found_set_save_recv_marker || blkp->pending_set_save_ix >= 0);
7821 len = proc_sig_privqs_len(p, qlock);
7822 ERTS_ASSERT(len == len1 + len2);
7823 }
7824
7825 void
erts_proc_sig_hdbg_check_in_queue(Process * p,char * what,char * file,int line)7826 erts_proc_sig_hdbg_check_in_queue(Process *p, char *what, char *file, int line)
7827 {
7828 Sint len;
7829 ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
7830 || ERTS_PROC_IS_EXITING(p)
7831 || (ERTS_PROC_LOCK_MSGQ
7832 & erts_proc_lc_my_proc_locks(p)));
7833 len = proc_sig_hdbg_check_queue(p,
7834 0,
7835 &p->sig_inq.first,
7836 p->sig_inq.last,
7837 p->sig_inq.nmsigs.next,
7838 p->sig_inq.nmsigs.last,
7839 NULL,
7840 NULL,
7841 ERTS_PSFLG_SIG_IN_Q);
7842 ASSERT(p->sig_inq.len == len); (void)len;
7843 }
7844
7845 #endif /* ERTS_PROC_SIG_HARD_DEBUG */
7846