1 /* $OpenBSD: test-kqueue.c,v 1.2 2019/12/24 11:42:34 anton Exp $ */ 2 3 /* 4 * Copyright (c) 2019 Anton Lindqvist <anton@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/event.h> 21 #include <sys/time.h> 22 23 #include <err.h> 24 #include <pthread.h> 25 #include <stdlib.h> 26 #include <unistd.h> 27 28 #include "pipe.h" 29 30 enum kqueue_mode { 31 KQUEUE_READ, 32 KQUEUE_READ_EOF, 33 KQUEUE_WRITE, 34 KQUEUE_WRITE_EOF, 35 }; 36 37 struct context { 38 enum kqueue_mode c_mode; 39 int c_alive; 40 41 int c_pipe[2]; 42 int c_kq; 43 44 char *c_buf; 45 size_t c_bufsiz; 46 47 pthread_t c_th; 48 pthread_mutex_t c_mtx; 49 }; 50 51 static void ctx_setup(struct context *, enum kqueue_mode); 52 static void ctx_teardown(struct context *); 53 static int ctx_thread_alive(struct context *); 54 static void ctx_thread_start(struct context *); 55 static void ctx_lock(struct context *); 56 static void ctx_unlock(struct context *); 57 58 static void *kqueue_thread(void *); 59 60 /* 61 * Verify kqueue read event. 62 */ 63 int 64 test_kqueue_read(void) 65 { 66 struct context ctx; 67 68 ctx_setup(&ctx, KQUEUE_READ); 69 ctx_thread_start(&ctx); 70 71 while (ctx_thread_alive(&ctx)) { 72 ssize_t n; 73 unsigned char c = 'r'; 74 75 n = write(ctx.c_pipe[1], &c, 1); 76 if (n == -1) 77 err(1, "write"); 78 if (n != 1) 79 errx(1, "write: %ld != 1", n); 80 } 81 82 ctx_teardown(&ctx); 83 84 return 0; 85 } 86 87 /* 88 * Verify kqueue read EOF event. 89 */ 90 int 91 test_kqueue_read_eof(void) 92 { 93 struct context ctx; 94 95 ctx_setup(&ctx, KQUEUE_READ_EOF); 96 ctx_thread_start(&ctx); 97 98 while (ctx_thread_alive(&ctx)) { 99 if (ctx.c_pipe[1] == -1) 100 continue; 101 102 close(ctx.c_pipe[1]); 103 ctx.c_pipe[1] = -1; 104 } 105 106 ctx_teardown(&ctx); 107 108 return 0; 109 } 110 111 /* 112 * Verify kqueue write event. 113 */ 114 int 115 test_kqueue_write(void) 116 { 117 struct context ctx; 118 ssize_t n; 119 120 ctx_setup(&ctx, KQUEUE_WRITE); 121 122 n = write(ctx.c_pipe[1], ctx.c_buf, ctx.c_bufsiz); 123 if (n == -1) 124 err(1, "write"); 125 if ((size_t)n != ctx.c_bufsiz) 126 errx(1, "write: %ld != %zu", n, ctx.c_bufsiz); 127 128 ctx_thread_start(&ctx); 129 130 while (ctx_thread_alive(&ctx)) { 131 unsigned char c; 132 133 n = read(ctx.c_pipe[0], &c, 1); 134 if (n == -1) 135 err(1, "read"); 136 if (n != 1) 137 errx(1, "read: %ld != 1", n); 138 } 139 140 ctx_teardown(&ctx); 141 142 return 0; 143 } 144 145 /* 146 * XXX Verify kqueue write event. 147 */ 148 int 149 test_kqueue_write_eof(void) 150 { 151 152 return 0; 153 } 154 155 static void 156 ctx_setup(struct context *ctx, enum kqueue_mode mode) 157 { 158 int error; 159 160 ctx->c_mode = mode; 161 ctx->c_alive = 1; 162 163 if (pipe(ctx->c_pipe) == -1) 164 err(1, "pipe"); 165 166 ctx->c_kq = kqueue(); 167 if (ctx->c_kq == -1) 168 err(1, "kqueue"); 169 170 ctx->c_bufsiz = PIPE_SIZE; 171 ctx->c_buf = malloc(ctx->c_bufsiz); 172 if (ctx->c_buf == NULL) 173 err(1, NULL); 174 175 error = pthread_mutex_init(&ctx->c_mtx, NULL); 176 if (error) 177 errc(1, error, "pthread_mutex_init"); 178 } 179 180 static void 181 ctx_teardown(struct context *ctx) 182 { 183 int error; 184 185 error = pthread_join(ctx->c_th, NULL); 186 if (error) 187 errc(1, error, "pthread_join"); 188 189 error = pthread_mutex_destroy(&ctx->c_mtx); 190 if (error) 191 errc(1, error, "pthread_mutex_destroy"); 192 193 free(ctx->c_buf); 194 195 close(ctx->c_pipe[0]); 196 close(ctx->c_pipe[1]); 197 close(ctx->c_kq); 198 199 } 200 201 static int 202 ctx_thread_alive(struct context *ctx) 203 { 204 int alive; 205 206 ctx_lock(ctx); 207 alive = ctx->c_alive; 208 ctx_unlock(ctx); 209 return alive; 210 } 211 212 static void 213 ctx_thread_start(struct context *ctx) 214 { 215 int error; 216 217 error = pthread_create(&ctx->c_th, NULL, kqueue_thread, ctx); 218 if (error) 219 errc(1, error, "pthread_create"); 220 } 221 222 static void 223 ctx_lock(struct context *ctx) 224 { 225 int error; 226 227 error = pthread_mutex_lock(&ctx->c_mtx); 228 if (error) 229 errc(1, error, "pthread_mutex_lock"); 230 } 231 232 static void ctx_unlock(struct context *ctx) 233 { 234 int error; 235 236 error = pthread_mutex_unlock(&ctx->c_mtx); 237 if (error) 238 errc(1, error, "pthread_mutex_unlock"); 239 } 240 241 static void * 242 kqueue_thread(void *arg) 243 { 244 struct context *ctx = arg; 245 struct kevent kev; 246 int fd, filter, nevents; 247 248 switch (ctx->c_mode) { 249 case KQUEUE_READ: 250 case KQUEUE_READ_EOF: 251 fd = ctx->c_pipe[0]; 252 filter = EVFILT_READ; 253 break; 254 case KQUEUE_WRITE: 255 case KQUEUE_WRITE_EOF: 256 fd = ctx->c_pipe[1]; 257 filter = EVFILT_WRITE; 258 break; 259 } 260 261 EV_SET(&kev, fd, filter, EV_ADD, 0, 0, NULL); 262 nevents = kevent(ctx->c_kq, &kev, 1, NULL, 0, NULL); 263 if (nevents == -1) 264 err(1, "kevent"); 265 nevents = kevent(ctx->c_kq, NULL, 0, &kev, 1, NULL); 266 if (nevents == -1) 267 err(1, "kevent"); 268 if (nevents != 1) 269 errx(1, "kevent: %d != 1", nevents); 270 271 if ((int)kev.ident != fd) 272 errx(1, "kevent: ident"); 273 if (kev.filter != filter) 274 errx(1, "kevent: filter"); 275 276 switch (ctx->c_mode) { 277 case KQUEUE_READ_EOF: 278 case KQUEUE_WRITE_EOF: 279 if ((kev.flags & EV_EOF) == 0) 280 errx(1, "kevent: eof"); 281 break; 282 default: 283 break; 284 } 285 286 ctx_lock(ctx); 287 ctx->c_alive = 0; 288 ctx_unlock(ctx); 289 290 return NULL; 291 } 292