1 /*
2  * Copyright (C) 2010-2021 Red Hat, Inc.
3  *
4  * Author: Angus Salkeld <asalkeld@redhat.com>
5  *
6  * This file is part of libqb.
7  *
8  * libqb is free software: you can redistribute it and/or modify
9  * it under the terms of the GNU Lesser General Public License as published by
10  * the Free Software Foundation, either version 2.1 of the License, or
11  * (at your option) any later version.
12  *
13  * libqb is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public License
19  * along with libqb.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 #include "os_base.h"
22 #include <poll.h>
23 #include <signal.h>
24 #include <setjmp.h>
25 
26 #include "ipc_int.h"
27 #include "util_int.h"
28 #include "ringbuffer_int.h"
29 #include <qb/qbdefs.h>
30 #include <qb/qbatomic.h>
31 #include <qb/qbloop.h>
32 #include <qb/qbrb.h>
33 
34 /*
35  * client functions
36  * --------------------------------------------------------
37  */
38 static void
qb_ipcc_shm_disconnect(struct qb_ipcc_connection * c)39 qb_ipcc_shm_disconnect(struct qb_ipcc_connection *c)
40 {
41 	void (*rb_destructor)(struct qb_ringbuffer_s *);
42 	rb_destructor = qb_rb_close;
43 
44 	/* This is an attempt to make sure that /dev/shm is cleaned up when a
45 	 * server exits unexpectedly. Normally it's the server's responsibility
46 	 * to tidy up sockets, but if it crashes or is killed with SIGKILL then
47 	 * the client (us) makes a reasonable attempt to tidy up the server sockets
48 	 * we have connected. The extra delay here just gives the server chance to
49 	 * disappear fully. As a client we can get here pretty quickly but shutting
50 	 * down a large server may take a little longer even when SIGKILLed.
51 	 * The 1/100th of a second is an arbitrary delay (of course) but seems to
52 	 * catch most servers in 2 tries or less.
53 	 */
54 	if (!c->is_connected && c->server_pid) {
55 		int attempt = 0;
56 		while (attempt++ <= 3 && rb_destructor == qb_rb_close) {
57 			if (kill(c->server_pid, 0) == -1 && errno == ESRCH) {
58 				rb_destructor = qb_rb_force_close;
59 			} else {
60 				struct timespec ts = {0, 10*QB_TIME_NS_IN_MSEC};
61 				struct timespec ts_left = {0, 0};
62 				nanosleep(&ts, &ts_left);
63 			}
64 		}
65 	}
66 	/*
67 	 * On FreeBSD we don't have a server PID so tidy up anyway. The
68 	 * server traps SIGBUS when cleaning up so will cope fine.
69 	 */
70 	if (!c->is_connected && !c->server_pid) {
71 		rb_destructor = qb_rb_force_close;
72 	}
73 
74 	if (rb_destructor == qb_rb_force_close) {
75 		qb_util_log(LOG_DEBUG,
76 			    "FORCE closing server sockets\n");
77 	}
78 
79 	qb_ipcc_us_sock_close(c->setup.u.us.sock);
80 
81 	rb_destructor(qb_rb_lastref_and_ret(&c->request.u.shm.rb));
82 	rb_destructor(qb_rb_lastref_and_ret(&c->response.u.shm.rb));
83 	rb_destructor(qb_rb_lastref_and_ret(&c->event.u.shm.rb));
84 }
85 
86 static ssize_t
qb_ipc_shm_send(struct qb_ipc_one_way * one_way,const void * msg_ptr,size_t msg_len)87 qb_ipc_shm_send(struct qb_ipc_one_way *one_way,
88 		const void *msg_ptr, size_t msg_len)
89 {
90 	return qb_rb_chunk_write(one_way->u.shm.rb, msg_ptr, msg_len);
91 }
92 
93 static ssize_t
qb_ipc_shm_sendv(struct qb_ipc_one_way * one_way,const struct iovec * iov,size_t iov_len)94 qb_ipc_shm_sendv(struct qb_ipc_one_way *one_way,
95 		 const struct iovec *iov, size_t iov_len)
96 {
97 	char *dest;
98 	int32_t res = 0;
99 	int32_t total_size = 0;
100 	int32_t i;
101 	char *pt = NULL;
102 
103 	if (one_way->u.shm.rb == NULL) {
104 		return -ENOTCONN;
105 	}
106 
107 	for (i = 0; i < iov_len; i++) {
108 		total_size += iov[i].iov_len;
109 	}
110 	dest = qb_rb_chunk_alloc(one_way->u.shm.rb, total_size);
111 	if (dest == NULL) {
112 		return -errno;
113 	}
114 	pt = dest;
115 
116 	for (i = 0; i < iov_len; i++) {
117 		memcpy(pt, iov[i].iov_base, iov[i].iov_len);
118 		pt += iov[i].iov_len;
119 	}
120 	res = qb_rb_chunk_commit(one_way->u.shm.rb, total_size);
121 	if (res < 0) {
122 		return res;
123 	}
124 	return total_size;
125 }
126 
127 static ssize_t
qb_ipc_shm_recv(struct qb_ipc_one_way * one_way,void * msg_ptr,size_t msg_len,int32_t ms_timeout)128 qb_ipc_shm_recv(struct qb_ipc_one_way *one_way,
129 		void *msg_ptr, size_t msg_len, int32_t ms_timeout)
130 {
131 	if (one_way->u.shm.rb == NULL) {
132 		return -ENOTCONN;
133 	}
134 	return qb_rb_chunk_read(one_way->u.shm.rb,
135 				(void *)msg_ptr, msg_len, ms_timeout);
136 }
137 
138 static ssize_t
qb_ipc_shm_peek(struct qb_ipc_one_way * one_way,void ** data_out,int32_t ms_timeout)139 qb_ipc_shm_peek(struct qb_ipc_one_way *one_way, void **data_out,
140 		int32_t ms_timeout)
141 {
142 	ssize_t rc;
143 	if (one_way->u.shm.rb == NULL) {
144 		return -ENOTCONN;
145 	}
146 	rc = qb_rb_chunk_peek(one_way->u.shm.rb, data_out, ms_timeout);
147 	if (rc == 0)  {
148 		return -EAGAIN;
149 	}
150 	return rc;
151 }
152 
153 static void
qb_ipc_shm_reclaim(struct qb_ipc_one_way * one_way)154 qb_ipc_shm_reclaim(struct qb_ipc_one_way *one_way)
155 {
156 	qb_rb_chunk_reclaim(one_way->u.shm.rb);
157 }
158 
159 static void
qb_ipc_shm_fc_set(struct qb_ipc_one_way * one_way,int32_t fc_enable)160 qb_ipc_shm_fc_set(struct qb_ipc_one_way *one_way, int32_t fc_enable)
161 {
162 	int32_t *fc;
163 	fc = qb_rb_shared_user_data_get(one_way->u.shm.rb);
164 	qb_util_log(LOG_TRACE, "setting fc to %d", fc_enable);
165 	qb_atomic_int_set(fc, fc_enable);
166 }
167 
168 static int32_t
qb_ipc_shm_fc_get(struct qb_ipc_one_way * one_way)169 qb_ipc_shm_fc_get(struct qb_ipc_one_way *one_way)
170 {
171 	int32_t *fc;
172 	int32_t rc = qb_rb_refcount_get(one_way->u.shm.rb);
173 
174 	if (rc != 2) {
175 		return -ENOTCONN;
176 	}
177 	fc = qb_rb_shared_user_data_get(one_way->u.shm.rb);
178 	return qb_atomic_int_get(fc);
179 }
180 
181 static ssize_t
qb_ipc_shm_q_len_get(struct qb_ipc_one_way * one_way)182 qb_ipc_shm_q_len_get(struct qb_ipc_one_way *one_way)
183 {
184 	if (one_way->u.shm.rb == NULL) {
185 		return -ENOTCONN;
186 	}
187 	return qb_rb_chunks_used(one_way->u.shm.rb);
188 }
189 
190 int32_t
qb_ipcc_shm_connect(struct qb_ipcc_connection * c,struct qb_ipc_connection_response * response)191 qb_ipcc_shm_connect(struct qb_ipcc_connection * c,
192 		    struct qb_ipc_connection_response * response)
193 {
194 	int32_t res = 0;
195 
196 	c->funcs.send = qb_ipc_shm_send;
197 	c->funcs.sendv = qb_ipc_shm_sendv;
198 	c->funcs.recv = qb_ipc_shm_recv;
199 	c->funcs.fc_get = qb_ipc_shm_fc_get;
200 	c->funcs.disconnect = qb_ipcc_shm_disconnect;
201 	c->needs_sock_for_poll = QB_TRUE;
202 
203 	if (strlen(c->name) > (NAME_MAX - 20)) {
204 		errno = EINVAL;
205 		return -errno;
206 	}
207 
208 	c->request.u.shm.rb = qb_rb_open(response->request,
209 					 c->request.max_msg_size,
210 					 QB_RB_FLAG_SHARED_PROCESS,
211 					 sizeof(int32_t));
212 	if (c->request.u.shm.rb == NULL) {
213 		res = -errno;
214 		qb_util_perror(LOG_ERR, "qb_rb_open:REQUEST");
215 		goto return_error;
216 	}
217 	c->response.u.shm.rb = qb_rb_open(response->response,
218 					  c->response.max_msg_size,
219 					  QB_RB_FLAG_SHARED_PROCESS, 0);
220 
221 	if (c->response.u.shm.rb == NULL) {
222 		res = -errno;
223 		qb_util_perror(LOG_ERR, "qb_rb_open:RESPONSE");
224 		goto cleanup_request;
225 	}
226 	c->event.u.shm.rb = qb_rb_open(response->event,
227 				       c->response.max_msg_size,
228 				       QB_RB_FLAG_SHARED_PROCESS, 0);
229 
230 	if (c->event.u.shm.rb == NULL) {
231 		res = -errno;
232 		qb_util_perror(LOG_ERR, "qb_rb_open:EVENT");
233 		goto cleanup_request_response;
234 	}
235 	return 0;
236 
237 cleanup_request_response:
238 	qb_rb_close(qb_rb_lastref_and_ret(&c->response.u.shm.rb));
239 
240 cleanup_request:
241 	qb_rb_close(qb_rb_lastref_and_ret(&c->request.u.shm.rb));
242 
243 return_error:
244 	errno = -res;
245 	qb_util_perror(LOG_ERR, "connection failed");
246 
247 	return res;
248 }
249 
250 /*
251  * service functions
252  * --------------------------------------------------------
253  */
254 static jmp_buf sigbus_jmpbuf;
catch_sigbus(int signal)255 static void catch_sigbus(int signal)
256 {
257 	longjmp(sigbus_jmpbuf, 1);
258 }
259 
260 static void
qb_ipcs_shm_disconnect(struct qb_ipcs_connection * c)261 qb_ipcs_shm_disconnect(struct qb_ipcs_connection *c)
262 {
263 	struct sigaction sa;
264 	struct sigaction old_sa;
265 
266 	/* Don't die if the client has truncated the SHM under us */
267 	memset(&old_sa, 0, sizeof(old_sa));
268 	memset(&sa, 0, sizeof(sa));
269 	sa.sa_handler = catch_sigbus;
270 	sigemptyset(&sa.sa_mask);
271 	sa.sa_flags = 0;
272 	sigaction(SIGBUS, &sa, &old_sa);
273 
274 	if (setjmp(sigbus_jmpbuf) == 1) {
275 		goto end_disconnect;
276 	}
277 
278 	if (c->state == QB_IPCS_CONNECTION_SHUTTING_DOWN ||
279 	    c->state == QB_IPCS_CONNECTION_ACTIVE) {
280 		if (c->response.u.shm.rb) {
281 			qb_rb_close(qb_rb_lastref_and_ret(&c->response.u.shm.rb));
282 		}
283 		if (c->event.u.shm.rb) {
284 			qb_rb_close(qb_rb_lastref_and_ret(&c->event.u.shm.rb));
285 		}
286 		if (c->request.u.shm.rb) {
287 			qb_rb_close(qb_rb_lastref_and_ret(&c->request.u.shm.rb));
288 		}
289 	}
290 
291 	if (c->state == QB_IPCS_CONNECTION_ESTABLISHED ||
292 	    c->state == QB_IPCS_CONNECTION_ACTIVE) {
293 		if (c->setup.u.us.sock > 0) {
294 			(void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock);
295 			qb_ipcc_us_sock_close(c->setup.u.us.sock);
296 			c->setup.u.us.sock = -1;
297 		}
298 	}
299 
300 end_disconnect:
301 	sigaction(SIGBUS, &old_sa, NULL);
302 	remove_tempdir(c->description);
303 }
304 
305 static int32_t
qb_ipcs_shm_rb_open(struct qb_ipcs_connection * c,struct qb_ipc_one_way * ow,const char * rb_name)306 qb_ipcs_shm_rb_open(struct qb_ipcs_connection *c,
307 		    struct qb_ipc_one_way *ow,
308 		    const char *rb_name)
309 {
310 	int32_t res = 0;
311 
312 	ow->u.shm.rb = qb_rb_open(rb_name,
313 				  ow->max_msg_size,
314 				  QB_RB_FLAG_CREATE |
315 				  QB_RB_FLAG_SHARED_PROCESS,
316 				  sizeof(int32_t));
317 	if (ow->u.shm.rb == NULL) {
318 		res = -errno;
319 		qb_util_perror(LOG_ERR, "qb_rb_open:%s", rb_name);
320 		return res;
321 	}
322 	res = qb_rb_chown(ow->u.shm.rb, c->auth.uid, c->auth.gid);
323 	if (res != 0) {
324 		qb_util_perror(LOG_ERR, "qb_rb_chown:%s", rb_name);
325 		goto cleanup;
326 	}
327 	res = qb_rb_chmod(ow->u.shm.rb, c->auth.mode);
328 	if (res != 0) {
329 		qb_util_perror(LOG_ERR, "qb_rb_chmod:%s", rb_name);
330 		goto cleanup;
331 	}
332 	return res;
333 
334 cleanup:
335 	qb_rb_close(qb_rb_lastref_and_ret(&ow->u.shm.rb));
336 	return res;
337 }
338 
339 static int32_t
qb_ipcs_shm_connect(struct qb_ipcs_service * s,struct qb_ipcs_connection * c,struct qb_ipc_connection_response * r)340 qb_ipcs_shm_connect(struct qb_ipcs_service *s,
341 		    struct qb_ipcs_connection *c,
342 		    struct qb_ipc_connection_response *r)
343 {
344 	int32_t res;
345 	char dirname[PATH_MAX];
346 	char *slash;
347 
348 	qb_util_log(LOG_DEBUG, "connecting to client [%d]", c->pid);
349 
350 	snprintf(r->request, NAME_MAX, "%s-request-%s",
351 		 c->description, s->name);
352 	snprintf(r->response, NAME_MAX, "%s-response-%s",
353 		 c->description, s->name);
354 	snprintf(r->event, NAME_MAX, "%s-event-%s",
355 		 c->description, s->name);
356 
357 	/* Set correct ownership if qb_ipcs_connection_auth_set() has been used */
358 	strlcpy(dirname, c->description, sizeof(dirname));
359 	slash = strrchr(dirname, '/');
360 	if (slash) {
361 		*slash = '\0';
362 		(void)chown(dirname, c->auth.uid, c->auth.gid);
363 	}
364 
365 	res = qb_ipcs_shm_rb_open(c, &c->request,
366 				  r->request);
367 	if (res != 0) {
368 		goto cleanup;
369 	}
370 
371 	res = qb_ipcs_shm_rb_open(c, &c->response,
372 				  r->response);
373 	if (res != 0) {
374 		goto cleanup_request;
375 	}
376 
377 	res = qb_ipcs_shm_rb_open(c, &c->event,
378 				  r->event);
379 	if (res != 0) {
380 		goto cleanup_request_response;
381 	}
382 
383 	res = s->poll_fns.dispatch_add(s->poll_priority,
384 				       c->setup.u.us.sock,
385 				       POLLIN | POLLPRI | POLLNVAL,
386 				       c, qb_ipcs_dispatch_connection_request);
387 	if (res != 0) {
388 		qb_util_log(LOG_ERR,
389 			    "Error adding socket to mainloop (%s).",
390 			    c->description);
391 		goto cleanup_request_response_event;
392 	}
393 
394 	r->hdr.error = 0;
395 	return 0;
396 
397 cleanup_request_response_event:
398 	qb_rb_close(qb_rb_lastref_and_ret(&c->event.u.shm.rb));
399 
400 cleanup_request_response:
401 	qb_rb_close(qb_rb_lastref_and_ret(&c->response.u.shm.rb));
402 
403 cleanup_request:
404 	qb_rb_close(qb_rb_lastref_and_ret(&c->request.u.shm.rb));
405 
406 cleanup:
407 	r->hdr.error = res;
408 	errno = -res;
409 	qb_util_perror(LOG_ERR, "shm connection FAILED");
410 
411 	return res;
412 }
413 
414 void
qb_ipcs_shm_init(struct qb_ipcs_service * s)415 qb_ipcs_shm_init(struct qb_ipcs_service *s)
416 {
417 	s->funcs.connect = qb_ipcs_shm_connect;
418 	s->funcs.disconnect = qb_ipcs_shm_disconnect;
419 
420 	s->funcs.recv = qb_ipc_shm_recv;
421 	s->funcs.peek = qb_ipc_shm_peek;
422 	s->funcs.reclaim = qb_ipc_shm_reclaim;
423 	s->funcs.send = qb_ipc_shm_send;
424 	s->funcs.sendv = qb_ipc_shm_sendv;
425 
426 	s->funcs.fc_set = qb_ipc_shm_fc_set;
427 	s->funcs.q_len_get = qb_ipc_shm_q_len_get;
428 
429 	s->needs_sock_for_poll = QB_TRUE;
430 }
431