1f841f6adSraf /* 2f841f6adSraf * CDDL HEADER START 3f841f6adSraf * 4f841f6adSraf * The contents of this file are subject to the terms of the 5f841f6adSraf * Common Development and Distribution License (the "License"). 6f841f6adSraf * You may not use this file except in compliance with the License. 7f841f6adSraf * 8f841f6adSraf * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9f841f6adSraf * or http://www.opensolaris.org/os/licensing. 10f841f6adSraf * See the License for the specific language governing permissions 11f841f6adSraf * and limitations under the License. 12f841f6adSraf * 13f841f6adSraf * When distributing Covered Code, include this CDDL HEADER in each 14f841f6adSraf * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15f841f6adSraf * If applicable, add the following below this CDDL HEADER, with the 16f841f6adSraf * fields enclosed by brackets "[]" replaced with your own identifying 17f841f6adSraf * information: Portions Copyright [yyyy] [name of copyright owner] 18f841f6adSraf * 19f841f6adSraf * CDDL HEADER END 20f841f6adSraf */ 21f841f6adSraf 22f841f6adSraf /* 23f841f6adSraf * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24f841f6adSraf * Use is subject to license terms. 25f841f6adSraf */ 26f841f6adSraf 27f841f6adSraf #pragma ident "%Z%%M% %I% %E% SMI" 28f841f6adSraf 29f841f6adSraf #include "synonyms.h" 30f841f6adSraf #include "thr_uberdata.h" 31f841f6adSraf #include "asyncio.h" 32f841f6adSraf #include <atomic.h> 33f841f6adSraf #include <sys/param.h> 34f841f6adSraf #include <sys/file.h> 35f841f6adSraf #include <sys/port.h> 36f841f6adSraf 37f841f6adSraf static int _aio_hash_insert(aio_result_t *, aio_req_t *); 38f841f6adSraf static aio_req_t *_aio_req_get(aio_worker_t *); 39f841f6adSraf static void _aio_req_add(aio_req_t *, aio_worker_t **, int); 40f841f6adSraf static void _aio_req_del(aio_worker_t *, aio_req_t *, int); 41f841f6adSraf static void _aio_work_done(aio_worker_t *); 42f841f6adSraf static void _aio_enq_doneq(aio_req_t *); 43f841f6adSraf 44f841f6adSraf extern void _aio_lio_free(aio_lio_t *); 45f841f6adSraf 46f841f6adSraf extern int __fdsync(int, int); 47f841f6adSraf extern int _port_dispatch(int, int, int, int, uintptr_t, void *); 48f841f6adSraf 49f841f6adSraf static int _aio_fsync_del(aio_worker_t *, aio_req_t *); 50f841f6adSraf static void _aiodone(aio_req_t *, ssize_t, int); 51f841f6adSraf static void _aio_cancel_work(aio_worker_t *, int, int *, int *); 52f841f6adSraf static void _aio_finish_request(aio_worker_t *, ssize_t, int); 53f841f6adSraf 54f841f6adSraf /* 55f841f6adSraf * switch for kernel async I/O 56f841f6adSraf */ 57f841f6adSraf int _kaio_ok = 0; /* 0 = disabled, 1 = on, -1 = error */ 58f841f6adSraf 59f841f6adSraf /* 60f841f6adSraf * Key for thread-specific data 61f841f6adSraf */ 62f841f6adSraf pthread_key_t _aio_key; 63f841f6adSraf 64f841f6adSraf /* 65f841f6adSraf * Array for determining whether or not a file supports kaio. 66f841f6adSraf * Initialized in _kaio_init(). 67f841f6adSraf */ 68f841f6adSraf uint32_t *_kaio_supported = NULL; 69f841f6adSraf 70f841f6adSraf /* 71f841f6adSraf * workers for read/write requests 72f841f6adSraf * (__aio_mutex lock protects circular linked list of workers) 73f841f6adSraf */ 74f841f6adSraf aio_worker_t *__workers_rw; /* circular list of AIO workers */ 75f841f6adSraf aio_worker_t *__nextworker_rw; /* next worker in list of workers */ 76f841f6adSraf int __rw_workerscnt; /* number of read/write workers */ 77f841f6adSraf 78f841f6adSraf /* 79f841f6adSraf * worker for notification requests. 80f841f6adSraf */ 81f841f6adSraf aio_worker_t *__workers_no; /* circular list of AIO workers */ 82f841f6adSraf aio_worker_t *__nextworker_no; /* next worker in list of workers */ 83f841f6adSraf int __no_workerscnt; /* number of write workers */ 84f841f6adSraf 85f841f6adSraf aio_req_t *_aio_done_tail; /* list of done requests */ 86f841f6adSraf aio_req_t *_aio_done_head; 87f841f6adSraf 88f841f6adSraf mutex_t __aio_initlock = DEFAULTMUTEX; /* makes aio initialization atomic */ 89f841f6adSraf cond_t __aio_initcv = DEFAULTCV; 90f841f6adSraf int __aio_initbusy = 0; 91f841f6adSraf 92f841f6adSraf mutex_t __aio_mutex = DEFAULTMUTEX; /* protects counts, and linked lists */ 93f841f6adSraf cond_t _aio_iowait_cv = DEFAULTCV; /* wait for userland I/Os */ 94f841f6adSraf 95f841f6adSraf pid_t __pid = (pid_t)-1; /* initialize as invalid pid */ 96f841f6adSraf int _sigio_enabled = 0; /* when set, send SIGIO signal */ 97f841f6adSraf 98f841f6adSraf aio_hash_t *_aio_hash; 99f841f6adSraf 100f841f6adSraf aio_req_t *_aio_doneq; /* double linked done queue list */ 101f841f6adSraf 102f841f6adSraf int _aio_donecnt = 0; 103f841f6adSraf int _aio_waitncnt = 0; /* # of requests for aio_waitn */ 104f841f6adSraf int _aio_doneq_cnt = 0; 105f841f6adSraf int _aio_outstand_cnt = 0; /* # of outstanding requests */ 106f841f6adSraf int _kaio_outstand_cnt = 0; /* # of outstanding kaio requests */ 107f841f6adSraf int _aio_req_done_cnt = 0; /* req. done but not in "done queue" */ 108f841f6adSraf int _aio_kernel_suspend = 0; /* active kernel kaio calls */ 109f841f6adSraf int _aio_suscv_cnt = 0; /* aio_suspend calls waiting on cv's */ 110f841f6adSraf 111f841f6adSraf int _max_workers = 256; /* max number of workers permitted */ 112f841f6adSraf int _min_workers = 4; /* min number of workers */ 113f841f6adSraf int _minworkload = 2; /* min number of request in q */ 114f841f6adSraf int _aio_worker_cnt = 0; /* number of workers to do requests */ 115f841f6adSraf int __uaio_ok = 0; /* AIO has been enabled */ 116f841f6adSraf sigset_t _worker_set; /* worker's signal mask */ 117f841f6adSraf 118f841f6adSraf int _aiowait_flag = 0; /* when set, aiowait() is inprogress */ 119f841f6adSraf int _aio_flags = 0; /* see asyncio.h defines for */ 120f841f6adSraf 121f841f6adSraf aio_worker_t *_kaiowp = NULL; /* points to kaio cleanup thread */ 122f841f6adSraf 123f841f6adSraf int hz; /* clock ticks per second */ 124f841f6adSraf 125f841f6adSraf static int 126f841f6adSraf _kaio_supported_init(void) 127f841f6adSraf { 128f841f6adSraf void *ptr; 129f841f6adSraf size_t size; 130f841f6adSraf 131f841f6adSraf if (_kaio_supported != NULL) /* already initialized */ 132f841f6adSraf return (0); 133f841f6adSraf 134f841f6adSraf size = MAX_KAIO_FDARRAY_SIZE * sizeof (uint32_t); 135f841f6adSraf ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, 136f841f6adSraf MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 137f841f6adSraf if (ptr == MAP_FAILED) 138f841f6adSraf return (-1); 139f841f6adSraf _kaio_supported = ptr; 140f841f6adSraf return (0); 141f841f6adSraf } 142f841f6adSraf 143f841f6adSraf /* 144f841f6adSraf * The aio subsystem is initialized when an AIO request is made. 145f841f6adSraf * Constants are initialized like the max number of workers that 146f841f6adSraf * the subsystem can create, and the minimum number of workers 147f841f6adSraf * permitted before imposing some restrictions. Also, some 148f841f6adSraf * workers are created. 149f841f6adSraf */ 150f841f6adSraf int 151f841f6adSraf __uaio_init(void) 152f841f6adSraf { 153f841f6adSraf int ret = -1; 154f841f6adSraf int i; 155f841f6adSraf 156f841f6adSraf lmutex_lock(&__aio_initlock); 157f841f6adSraf while (__aio_initbusy) 158f841f6adSraf (void) _cond_wait(&__aio_initcv, &__aio_initlock); 159f841f6adSraf if (__uaio_ok) { /* already initialized */ 160f841f6adSraf lmutex_unlock(&__aio_initlock); 161f841f6adSraf return (0); 162f841f6adSraf } 163f841f6adSraf __aio_initbusy = 1; 164f841f6adSraf lmutex_unlock(&__aio_initlock); 165f841f6adSraf 166f841f6adSraf hz = (int)sysconf(_SC_CLK_TCK); 167f841f6adSraf __pid = getpid(); 168f841f6adSraf 169f841f6adSraf setup_cancelsig(SIGAIOCANCEL); 170f841f6adSraf 171f841f6adSraf if (_kaio_supported_init() != 0) 172f841f6adSraf goto out; 173f841f6adSraf 174f841f6adSraf /* 175f841f6adSraf * Allocate and initialize the hash table. 176*f7499066Ssp92102 * Do this only once, even if __uaio_init() is called twice. 177f841f6adSraf */ 178*f7499066Ssp92102 if (_aio_hash == NULL) { 179f841f6adSraf /* LINTED pointer cast */ 180f841f6adSraf _aio_hash = (aio_hash_t *)mmap(NULL, 181f841f6adSraf HASHSZ * sizeof (aio_hash_t), PROT_READ | PROT_WRITE, 182f841f6adSraf MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 183f841f6adSraf if ((void *)_aio_hash == MAP_FAILED) { 184f841f6adSraf _aio_hash = NULL; 185f841f6adSraf goto out; 186f841f6adSraf } 187f841f6adSraf for (i = 0; i < HASHSZ; i++) 188*f7499066Ssp92102 (void) mutex_init(&_aio_hash[i].hash_lock, 189*f7499066Ssp92102 USYNC_THREAD, NULL); 190*f7499066Ssp92102 } 191f841f6adSraf 192f841f6adSraf /* 193f841f6adSraf * Initialize worker's signal mask to only catch SIGAIOCANCEL. 194f841f6adSraf */ 195f841f6adSraf (void) sigfillset(&_worker_set); 196f841f6adSraf (void) sigdelset(&_worker_set, SIGAIOCANCEL); 197f841f6adSraf 198f841f6adSraf /* 199*f7499066Ssp92102 * Create one worker to send asynchronous notifications. 200*f7499066Ssp92102 * Do this only once, even if __uaio_init() is called twice. 201*f7499066Ssp92102 */ 202*f7499066Ssp92102 if (__no_workerscnt == 0 && 203*f7499066Ssp92102 (_aio_create_worker(NULL, AIONOTIFY) != 0)) { 204*f7499066Ssp92102 errno = EAGAIN; 205*f7499066Ssp92102 goto out; 206*f7499066Ssp92102 } 207*f7499066Ssp92102 208*f7499066Ssp92102 /* 209f841f6adSraf * Create the minimum number of read/write workers. 210*f7499066Ssp92102 * And later check whether atleast one worker is created; 211*f7499066Ssp92102 * lwp_create() calls could fail because of segkp exhaustion. 212f841f6adSraf */ 213f841f6adSraf for (i = 0; i < _min_workers; i++) 214f841f6adSraf (void) _aio_create_worker(NULL, AIOREAD); 215*f7499066Ssp92102 if (__rw_workerscnt == 0) { 216*f7499066Ssp92102 errno = EAGAIN; 217*f7499066Ssp92102 goto out; 218*f7499066Ssp92102 } 219f841f6adSraf 220f841f6adSraf ret = 0; 221f841f6adSraf out: 222f841f6adSraf lmutex_lock(&__aio_initlock); 223f841f6adSraf if (ret == 0) 224f841f6adSraf __uaio_ok = 1; 225f841f6adSraf __aio_initbusy = 0; 226f841f6adSraf (void) cond_broadcast(&__aio_initcv); 227f841f6adSraf lmutex_unlock(&__aio_initlock); 228f841f6adSraf return (ret); 229f841f6adSraf } 230f841f6adSraf 231f841f6adSraf /* 232f841f6adSraf * Called from close() before actually performing the real _close(). 233f841f6adSraf */ 234f841f6adSraf void 235f841f6adSraf _aio_close(int fd) 236f841f6adSraf { 237f841f6adSraf if (fd < 0) /* avoid cancelling everything */ 238f841f6adSraf return; 239f841f6adSraf /* 240f841f6adSraf * Cancel all outstanding aio requests for this file descriptor. 241f841f6adSraf */ 242f841f6adSraf if (__uaio_ok) 243f841f6adSraf (void) aiocancel_all(fd); 244f841f6adSraf /* 245f841f6adSraf * If we have allocated the bit array, clear the bit for this file. 246f841f6adSraf * The next open may re-use this file descriptor and the new file 247f841f6adSraf * may have different kaio() behaviour. 248f841f6adSraf */ 249f841f6adSraf if (_kaio_supported != NULL) 250f841f6adSraf CLEAR_KAIO_SUPPORTED(fd); 251f841f6adSraf } 252f841f6adSraf 253f841f6adSraf /* 254f841f6adSraf * special kaio cleanup thread sits in a loop in the 255f841f6adSraf * kernel waiting for pending kaio requests to complete. 256f841f6adSraf */ 257f841f6adSraf void * 258f841f6adSraf _kaio_cleanup_thread(void *arg) 259f841f6adSraf { 260f841f6adSraf if (pthread_setspecific(_aio_key, arg) != 0) 261f841f6adSraf aio_panic("_kaio_cleanup_thread, pthread_setspecific()"); 262f841f6adSraf (void) _kaio(AIOSTART); 263f841f6adSraf return (arg); 264f841f6adSraf } 265f841f6adSraf 266f841f6adSraf /* 267f841f6adSraf * initialize kaio. 268f841f6adSraf */ 269f841f6adSraf void 270f841f6adSraf _kaio_init() 271f841f6adSraf { 272f841f6adSraf int error; 273f841f6adSraf sigset_t oset; 274f841f6adSraf 275f841f6adSraf lmutex_lock(&__aio_initlock); 276f841f6adSraf while (__aio_initbusy) 277f841f6adSraf (void) _cond_wait(&__aio_initcv, &__aio_initlock); 278f841f6adSraf if (_kaio_ok) { /* already initialized */ 279f841f6adSraf lmutex_unlock(&__aio_initlock); 280f841f6adSraf return; 281f841f6adSraf } 282f841f6adSraf __aio_initbusy = 1; 283f841f6adSraf lmutex_unlock(&__aio_initlock); 284f841f6adSraf 285f841f6adSraf if (_kaio_supported_init() != 0) 286f841f6adSraf error = ENOMEM; 287f841f6adSraf else if ((_kaiowp = _aio_worker_alloc()) == NULL) 288f841f6adSraf error = ENOMEM; 289f841f6adSraf else if ((error = (int)_kaio(AIOINIT)) == 0) { 290f841f6adSraf (void) pthread_sigmask(SIG_SETMASK, &maskset, &oset); 291f841f6adSraf error = thr_create(NULL, AIOSTKSIZE, _kaio_cleanup_thread, 292f841f6adSraf _kaiowp, THR_DAEMON, &_kaiowp->work_tid); 293f841f6adSraf (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 294f841f6adSraf } 295f841f6adSraf if (error && _kaiowp != NULL) { 296f841f6adSraf _aio_worker_free(_kaiowp); 297f841f6adSraf _kaiowp = NULL; 298f841f6adSraf } 299f841f6adSraf 300f841f6adSraf lmutex_lock(&__aio_initlock); 301f841f6adSraf if (error) 302f841f6adSraf _kaio_ok = -1; 303f841f6adSraf else 304f841f6adSraf _kaio_ok = 1; 305f841f6adSraf __aio_initbusy = 0; 306f841f6adSraf (void) cond_broadcast(&__aio_initcv); 307f841f6adSraf lmutex_unlock(&__aio_initlock); 308f841f6adSraf } 309f841f6adSraf 310f841f6adSraf int 311f841f6adSraf aioread(int fd, caddr_t buf, int bufsz, off_t offset, int whence, 312f841f6adSraf aio_result_t *resultp) 313f841f6adSraf { 314f841f6adSraf return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOREAD)); 315f841f6adSraf } 316f841f6adSraf 317f841f6adSraf int 318f841f6adSraf aiowrite(int fd, caddr_t buf, int bufsz, off_t offset, int whence, 319f841f6adSraf aio_result_t *resultp) 320f841f6adSraf { 321f841f6adSraf return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOWRITE)); 322f841f6adSraf } 323f841f6adSraf 324f841f6adSraf #if !defined(_LP64) 325f841f6adSraf int 326f841f6adSraf aioread64(int fd, caddr_t buf, int bufsz, off64_t offset, int whence, 327f841f6adSraf aio_result_t *resultp) 328f841f6adSraf { 329f841f6adSraf return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOAREAD64)); 330f841f6adSraf } 331f841f6adSraf 332f841f6adSraf int 333f841f6adSraf aiowrite64(int fd, caddr_t buf, int bufsz, off64_t offset, int whence, 334f841f6adSraf aio_result_t *resultp) 335f841f6adSraf { 336f841f6adSraf return (_aiorw(fd, buf, bufsz, offset, whence, resultp, AIOAWRITE64)); 337f841f6adSraf } 338f841f6adSraf #endif /* !defined(_LP64) */ 339f841f6adSraf 340f841f6adSraf int 341f841f6adSraf _aiorw(int fd, caddr_t buf, int bufsz, offset_t offset, int whence, 342f841f6adSraf aio_result_t *resultp, int mode) 343f841f6adSraf { 344f841f6adSraf aio_req_t *reqp; 345f841f6adSraf aio_args_t *ap; 346f841f6adSraf offset_t loffset; 347f841f6adSraf struct stat stat; 348f841f6adSraf int error = 0; 349f841f6adSraf int kerr; 350f841f6adSraf int umode; 351f841f6adSraf 352f841f6adSraf switch (whence) { 353f841f6adSraf 354f841f6adSraf case SEEK_SET: 355f841f6adSraf loffset = offset; 356f841f6adSraf break; 357f841f6adSraf case SEEK_CUR: 358f841f6adSraf if ((loffset = llseek(fd, 0, SEEK_CUR)) == -1) 359f841f6adSraf error = -1; 360f841f6adSraf else 361f841f6adSraf loffset += offset; 362f841f6adSraf break; 363f841f6adSraf case SEEK_END: 364f841f6adSraf if (fstat(fd, &stat) == -1) 365f841f6adSraf error = -1; 366f841f6adSraf else 367f841f6adSraf loffset = offset + stat.st_size; 368f841f6adSraf break; 369f841f6adSraf default: 370f841f6adSraf errno = EINVAL; 371f841f6adSraf error = -1; 372f841f6adSraf } 373f841f6adSraf 374f841f6adSraf if (error) 375f841f6adSraf return (error); 376f841f6adSraf 377f841f6adSraf /* initialize kaio */ 378f841f6adSraf if (!_kaio_ok) 379f841f6adSraf _kaio_init(); 380f841f6adSraf 381f841f6adSraf /* 382f841f6adSraf * _aio_do_request() needs the original request code (mode) to be able 383f841f6adSraf * to choose the appropiate 32/64 bit function. All other functions 384f841f6adSraf * only require the difference between READ and WRITE (umode). 385f841f6adSraf */ 386f841f6adSraf if (mode == AIOAREAD64 || mode == AIOAWRITE64) 387f841f6adSraf umode = mode - AIOAREAD64; 388f841f6adSraf else 389f841f6adSraf umode = mode; 390f841f6adSraf 391f841f6adSraf /* 392f841f6adSraf * Try kernel aio first. 393f841f6adSraf * If errno is ENOTSUP/EBADFD, fall back to the thread implementation. 394f841f6adSraf */ 395f841f6adSraf if (_kaio_ok > 0 && KAIO_SUPPORTED(fd)) { 396f841f6adSraf resultp->aio_errno = 0; 397f841f6adSraf sig_mutex_lock(&__aio_mutex); 398f841f6adSraf _kaio_outstand_cnt++; 399f841f6adSraf kerr = (int)_kaio(((resultp->aio_return == AIO_INPROGRESS) ? 400f841f6adSraf (umode | AIO_POLL_BIT) : umode), 401f841f6adSraf fd, buf, bufsz, loffset, resultp); 402f841f6adSraf if (kerr == 0) { 403f841f6adSraf sig_mutex_unlock(&__aio_mutex); 404f841f6adSraf return (0); 405f841f6adSraf } 406f841f6adSraf _kaio_outstand_cnt--; 407f841f6adSraf sig_mutex_unlock(&__aio_mutex); 408f841f6adSraf if (errno != ENOTSUP && errno != EBADFD) 409f841f6adSraf return (-1); 410f841f6adSraf if (errno == EBADFD) 411f841f6adSraf SET_KAIO_NOT_SUPPORTED(fd); 412f841f6adSraf } 413f841f6adSraf 414f841f6adSraf if (!__uaio_ok && __uaio_init() == -1) 415f841f6adSraf return (-1); 416f841f6adSraf 417f841f6adSraf if ((reqp = _aio_req_alloc()) == NULL) { 418f841f6adSraf errno = EAGAIN; 419f841f6adSraf return (-1); 420f841f6adSraf } 421f841f6adSraf 422f841f6adSraf /* 423f841f6adSraf * _aio_do_request() checks reqp->req_op to differentiate 424f841f6adSraf * between 32 and 64 bit access. 425f841f6adSraf */ 426f841f6adSraf reqp->req_op = mode; 427f841f6adSraf reqp->req_resultp = resultp; 428f841f6adSraf ap = &reqp->req_args; 429f841f6adSraf ap->fd = fd; 430f841f6adSraf ap->buf = buf; 431f841f6adSraf ap->bufsz = bufsz; 432f841f6adSraf ap->offset = loffset; 433f841f6adSraf 434f841f6adSraf if (_aio_hash_insert(resultp, reqp) != 0) { 435f841f6adSraf _aio_req_free(reqp); 436f841f6adSraf errno = EINVAL; 437f841f6adSraf return (-1); 438f841f6adSraf } 439f841f6adSraf /* 440f841f6adSraf * _aio_req_add() only needs the difference between READ and 441f841f6adSraf * WRITE to choose the right worker queue. 442f841f6adSraf */ 443f841f6adSraf _aio_req_add(reqp, &__nextworker_rw, umode); 444f841f6adSraf return (0); 445f841f6adSraf } 446f841f6adSraf 447f841f6adSraf int 448f841f6adSraf aiocancel(aio_result_t *resultp) 449f841f6adSraf { 450f841f6adSraf aio_req_t *reqp; 451f841f6adSraf aio_worker_t *aiowp; 452f841f6adSraf int ret; 453f841f6adSraf int done = 0; 454f841f6adSraf int canceled = 0; 455f841f6adSraf 456f841f6adSraf if (!__uaio_ok) { 457f841f6adSraf errno = EINVAL; 458f841f6adSraf return (-1); 459f841f6adSraf } 460f841f6adSraf 461f841f6adSraf sig_mutex_lock(&__aio_mutex); 462f841f6adSraf reqp = _aio_hash_find(resultp); 463f841f6adSraf if (reqp == NULL) { 464f841f6adSraf if (_aio_outstand_cnt == _aio_req_done_cnt) 465f841f6adSraf errno = EINVAL; 466f841f6adSraf else 467f841f6adSraf errno = EACCES; 468f841f6adSraf ret = -1; 469f841f6adSraf } else { 470f841f6adSraf aiowp = reqp->req_worker; 471f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 472f841f6adSraf (void) _aio_cancel_req(aiowp, reqp, &canceled, &done); 473f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 474f841f6adSraf 475f841f6adSraf if (canceled) { 476f841f6adSraf ret = 0; 477f841f6adSraf } else { 478f841f6adSraf if (_aio_outstand_cnt == 0 || 479f841f6adSraf _aio_outstand_cnt == _aio_req_done_cnt) 480f841f6adSraf errno = EINVAL; 481f841f6adSraf else 482f841f6adSraf errno = EACCES; 483f841f6adSraf ret = -1; 484f841f6adSraf } 485f841f6adSraf } 486f841f6adSraf sig_mutex_unlock(&__aio_mutex); 487f841f6adSraf return (ret); 488f841f6adSraf } 489f841f6adSraf 490f841f6adSraf /* 491f841f6adSraf * This must be asynch safe 492f841f6adSraf */ 493f841f6adSraf aio_result_t * 494f841f6adSraf aiowait(struct timeval *uwait) 495f841f6adSraf { 496f841f6adSraf aio_result_t *uresultp; 497f841f6adSraf aio_result_t *kresultp; 498f841f6adSraf aio_result_t *resultp; 499f841f6adSraf int dontblock; 500f841f6adSraf int timedwait = 0; 501f841f6adSraf int kaio_errno = 0; 502f841f6adSraf struct timeval twait; 503f841f6adSraf struct timeval *wait = NULL; 504f841f6adSraf hrtime_t hrtend; 505f841f6adSraf hrtime_t hres; 506f841f6adSraf 507f841f6adSraf if (uwait) { 508f841f6adSraf /* 509f841f6adSraf * Check for a valid specified wait time. 510f841f6adSraf * If it is invalid, fail the call right away. 511f841f6adSraf */ 512f841f6adSraf if (uwait->tv_sec < 0 || uwait->tv_usec < 0 || 513f841f6adSraf uwait->tv_usec >= MICROSEC) { 514f841f6adSraf errno = EINVAL; 515f841f6adSraf return ((aio_result_t *)-1); 516f841f6adSraf } 517f841f6adSraf 518f841f6adSraf if (uwait->tv_sec > 0 || uwait->tv_usec > 0) { 519f841f6adSraf hrtend = gethrtime() + 520f841f6adSraf (hrtime_t)uwait->tv_sec * NANOSEC + 521f841f6adSraf (hrtime_t)uwait->tv_usec * (NANOSEC / MICROSEC); 522f841f6adSraf twait = *uwait; 523f841f6adSraf wait = &twait; 524f841f6adSraf timedwait++; 525f841f6adSraf } else { 526f841f6adSraf /* polling */ 527f841f6adSraf sig_mutex_lock(&__aio_mutex); 528f841f6adSraf if (_kaio_outstand_cnt == 0) { 529f841f6adSraf kresultp = (aio_result_t *)-1; 530f841f6adSraf } else { 531f841f6adSraf kresultp = (aio_result_t *)_kaio(AIOWAIT, 532f841f6adSraf (struct timeval *)-1, 1); 533f841f6adSraf if (kresultp != (aio_result_t *)-1 && 534f841f6adSraf kresultp != NULL && 535f841f6adSraf kresultp != (aio_result_t *)1) { 536f841f6adSraf _kaio_outstand_cnt--; 537f841f6adSraf sig_mutex_unlock(&__aio_mutex); 538f841f6adSraf return (kresultp); 539f841f6adSraf } 540f841f6adSraf } 541f841f6adSraf uresultp = _aio_req_done(); 542f841f6adSraf sig_mutex_unlock(&__aio_mutex); 543f841f6adSraf if (uresultp != NULL && 544f841f6adSraf uresultp != (aio_result_t *)-1) { 545f841f6adSraf return (uresultp); 546f841f6adSraf } 547f841f6adSraf if (uresultp == (aio_result_t *)-1 && 548f841f6adSraf kresultp == (aio_result_t *)-1) { 549f841f6adSraf errno = EINVAL; 550f841f6adSraf return ((aio_result_t *)-1); 551f841f6adSraf } else { 552f841f6adSraf return (NULL); 553f841f6adSraf } 554f841f6adSraf } 555f841f6adSraf } 556f841f6adSraf 557f841f6adSraf for (;;) { 558f841f6adSraf sig_mutex_lock(&__aio_mutex); 559f841f6adSraf uresultp = _aio_req_done(); 560f841f6adSraf if (uresultp != NULL && uresultp != (aio_result_t *)-1) { 561f841f6adSraf sig_mutex_unlock(&__aio_mutex); 562f841f6adSraf resultp = uresultp; 563f841f6adSraf break; 564f841f6adSraf } 565f841f6adSraf _aiowait_flag++; 566f841f6adSraf dontblock = (uresultp == (aio_result_t *)-1); 567f841f6adSraf if (dontblock && _kaio_outstand_cnt == 0) { 568f841f6adSraf kresultp = (aio_result_t *)-1; 569f841f6adSraf kaio_errno = EINVAL; 570f841f6adSraf } else { 571f841f6adSraf sig_mutex_unlock(&__aio_mutex); 572f841f6adSraf kresultp = (aio_result_t *)_kaio(AIOWAIT, 573f841f6adSraf wait, dontblock); 574f841f6adSraf sig_mutex_lock(&__aio_mutex); 575f841f6adSraf kaio_errno = errno; 576f841f6adSraf } 577f841f6adSraf _aiowait_flag--; 578f841f6adSraf sig_mutex_unlock(&__aio_mutex); 579f841f6adSraf if (kresultp == (aio_result_t *)1) { 580f841f6adSraf /* aiowait() awakened by an aionotify() */ 581f841f6adSraf continue; 582f841f6adSraf } else if (kresultp != NULL && 583f841f6adSraf kresultp != (aio_result_t *)-1) { 584f841f6adSraf resultp = kresultp; 585f841f6adSraf sig_mutex_lock(&__aio_mutex); 586f841f6adSraf _kaio_outstand_cnt--; 587f841f6adSraf sig_mutex_unlock(&__aio_mutex); 588f841f6adSraf break; 589f841f6adSraf } else if (kresultp == (aio_result_t *)-1 && 590f841f6adSraf kaio_errno == EINVAL && 591f841f6adSraf uresultp == (aio_result_t *)-1) { 592f841f6adSraf errno = kaio_errno; 593f841f6adSraf resultp = (aio_result_t *)-1; 594f841f6adSraf break; 595f841f6adSraf } else if (kresultp == (aio_result_t *)-1 && 596f841f6adSraf kaio_errno == EINTR) { 597f841f6adSraf errno = kaio_errno; 598f841f6adSraf resultp = (aio_result_t *)-1; 599f841f6adSraf break; 600f841f6adSraf } else if (timedwait) { 601f841f6adSraf hres = hrtend - gethrtime(); 602f841f6adSraf if (hres <= 0) { 603f841f6adSraf /* time is up; return */ 604f841f6adSraf resultp = NULL; 605f841f6adSraf break; 606f841f6adSraf } else { 607f841f6adSraf /* 608f841f6adSraf * Some time left. Round up the remaining time 609f841f6adSraf * in nanoseconds to microsec. Retry the call. 610f841f6adSraf */ 611f841f6adSraf hres += (NANOSEC / MICROSEC) - 1; 612f841f6adSraf wait->tv_sec = hres / NANOSEC; 613f841f6adSraf wait->tv_usec = 614f841f6adSraf (hres % NANOSEC) / (NANOSEC / MICROSEC); 615f841f6adSraf } 616f841f6adSraf } else { 617f841f6adSraf ASSERT(kresultp == NULL && uresultp == NULL); 618f841f6adSraf resultp = NULL; 619f841f6adSraf continue; 620f841f6adSraf } 621f841f6adSraf } 622f841f6adSraf return (resultp); 623f841f6adSraf } 624f841f6adSraf 625f841f6adSraf /* 626f841f6adSraf * _aio_get_timedelta calculates the remaining time and stores the result 627f841f6adSraf * into timespec_t *wait. 628f841f6adSraf */ 629f841f6adSraf 630f841f6adSraf int 631f841f6adSraf _aio_get_timedelta(timespec_t *end, timespec_t *wait) 632f841f6adSraf { 633f841f6adSraf int ret = 0; 634f841f6adSraf struct timeval cur; 635f841f6adSraf timespec_t curtime; 636f841f6adSraf 637f841f6adSraf (void) gettimeofday(&cur, NULL); 638f841f6adSraf curtime.tv_sec = cur.tv_sec; 639f841f6adSraf curtime.tv_nsec = cur.tv_usec * 1000; /* convert us to ns */ 640f841f6adSraf 641f841f6adSraf if (end->tv_sec >= curtime.tv_sec) { 642f841f6adSraf wait->tv_sec = end->tv_sec - curtime.tv_sec; 643f841f6adSraf if (end->tv_nsec >= curtime.tv_nsec) { 644f841f6adSraf wait->tv_nsec = end->tv_nsec - curtime.tv_nsec; 645f841f6adSraf if (wait->tv_sec == 0 && wait->tv_nsec == 0) 646f841f6adSraf ret = -1; /* timer expired */ 647f841f6adSraf } else { 648f841f6adSraf if (end->tv_sec > curtime.tv_sec) { 649f841f6adSraf wait->tv_sec -= 1; 650f841f6adSraf wait->tv_nsec = NANOSEC - 651f841f6adSraf (curtime.tv_nsec - end->tv_nsec); 652f841f6adSraf } else { 653f841f6adSraf ret = -1; /* timer expired */ 654f841f6adSraf } 655f841f6adSraf } 656f841f6adSraf } else { 657f841f6adSraf ret = -1; 658f841f6adSraf } 659f841f6adSraf return (ret); 660f841f6adSraf } 661f841f6adSraf 662f841f6adSraf /* 663f841f6adSraf * If closing by file descriptor: we will simply cancel all the outstanding 664f841f6adSraf * aio`s and return. Those aio's in question will have either noticed the 665f841f6adSraf * cancellation notice before, during, or after initiating io. 666f841f6adSraf */ 667f841f6adSraf int 668f841f6adSraf aiocancel_all(int fd) 669f841f6adSraf { 670f841f6adSraf aio_req_t *reqp; 671f841f6adSraf aio_req_t **reqpp; 672f841f6adSraf aio_worker_t *first; 673f841f6adSraf aio_worker_t *next; 674f841f6adSraf int canceled = 0; 675f841f6adSraf int done = 0; 676f841f6adSraf int cancelall = 0; 677f841f6adSraf 678f841f6adSraf sig_mutex_lock(&__aio_mutex); 679f841f6adSraf 680f841f6adSraf if (_aio_outstand_cnt == 0) { 681f841f6adSraf sig_mutex_unlock(&__aio_mutex); 682f841f6adSraf return (AIO_ALLDONE); 683f841f6adSraf } 684f841f6adSraf 685f841f6adSraf /* 686f841f6adSraf * Cancel requests from the read/write workers' queues. 687f841f6adSraf */ 688f841f6adSraf first = __nextworker_rw; 689f841f6adSraf next = first; 690f841f6adSraf do { 691f841f6adSraf _aio_cancel_work(next, fd, &canceled, &done); 692f841f6adSraf } while ((next = next->work_forw) != first); 693f841f6adSraf 694f841f6adSraf /* 695f841f6adSraf * finally, check if there are requests on the done queue that 696f841f6adSraf * should be canceled. 697f841f6adSraf */ 698f841f6adSraf if (fd < 0) 699f841f6adSraf cancelall = 1; 700f841f6adSraf reqpp = &_aio_done_tail; 701f841f6adSraf while ((reqp = *reqpp) != NULL) { 702f841f6adSraf if (cancelall || reqp->req_args.fd == fd) { 703f841f6adSraf *reqpp = reqp->req_next; 704f841f6adSraf _aio_donecnt--; 705f841f6adSraf (void) _aio_hash_del(reqp->req_resultp); 706f841f6adSraf _aio_req_free(reqp); 707f841f6adSraf } else 708f841f6adSraf reqpp = &reqp->req_next; 709f841f6adSraf } 710f841f6adSraf if (cancelall) { 711f841f6adSraf ASSERT(_aio_donecnt == 0); 712f841f6adSraf _aio_done_head = NULL; 713f841f6adSraf } 714f841f6adSraf sig_mutex_unlock(&__aio_mutex); 715f841f6adSraf 716f841f6adSraf if (canceled && done == 0) 717f841f6adSraf return (AIO_CANCELED); 718f841f6adSraf else if (done && canceled == 0) 719f841f6adSraf return (AIO_ALLDONE); 720f841f6adSraf else if ((canceled + done == 0) && KAIO_SUPPORTED(fd)) 721f841f6adSraf return ((int)_kaio(AIOCANCEL, fd, NULL)); 722f841f6adSraf return (AIO_NOTCANCELED); 723f841f6adSraf } 724f841f6adSraf 725f841f6adSraf /* 726f841f6adSraf * Cancel requests from a given work queue. If the file descriptor 727f841f6adSraf * parameter, fd, is non-negative, then only cancel those requests 728f841f6adSraf * in this queue that are to this file descriptor. If the fd 729f841f6adSraf * parameter is -1, then cancel all requests. 730f841f6adSraf */ 731f841f6adSraf static void 732f841f6adSraf _aio_cancel_work(aio_worker_t *aiowp, int fd, int *canceled, int *done) 733f841f6adSraf { 734f841f6adSraf aio_req_t *reqp; 735f841f6adSraf 736f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 737f841f6adSraf /* 738f841f6adSraf * cancel queued requests first. 739f841f6adSraf */ 740f841f6adSraf reqp = aiowp->work_tail1; 741f841f6adSraf while (reqp != NULL) { 742f841f6adSraf if (fd < 0 || reqp->req_args.fd == fd) { 743f841f6adSraf if (_aio_cancel_req(aiowp, reqp, canceled, done)) { 744f841f6adSraf /* 745f841f6adSraf * Callers locks were dropped. 746f841f6adSraf * reqp is invalid; start traversing 747f841f6adSraf * the list from the beginning again. 748f841f6adSraf */ 749f841f6adSraf reqp = aiowp->work_tail1; 750f841f6adSraf continue; 751f841f6adSraf } 752f841f6adSraf } 753f841f6adSraf reqp = reqp->req_next; 754f841f6adSraf } 755f841f6adSraf /* 756f841f6adSraf * Since the queued requests have been canceled, there can 757f841f6adSraf * only be one inprogress request that should be canceled. 758f841f6adSraf */ 759f841f6adSraf if ((reqp = aiowp->work_req) != NULL && 760f841f6adSraf (fd < 0 || reqp->req_args.fd == fd)) 761f841f6adSraf (void) _aio_cancel_req(aiowp, reqp, canceled, done); 762f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 763f841f6adSraf } 764f841f6adSraf 765f841f6adSraf /* 766f841f6adSraf * Cancel a request. Return 1 if the callers locks were temporarily 767f841f6adSraf * dropped, otherwise return 0. 768f841f6adSraf */ 769f841f6adSraf int 770f841f6adSraf _aio_cancel_req(aio_worker_t *aiowp, aio_req_t *reqp, int *canceled, int *done) 771f841f6adSraf { 772f841f6adSraf int ostate = reqp->req_state; 773f841f6adSraf 774f841f6adSraf ASSERT(MUTEX_HELD(&__aio_mutex)); 775f841f6adSraf ASSERT(MUTEX_HELD(&aiowp->work_qlock1)); 776f841f6adSraf if (ostate == AIO_REQ_CANCELED) 777f841f6adSraf return (0); 778f841f6adSraf if (ostate == AIO_REQ_DONE || ostate == AIO_REQ_DONEQ) { 779f841f6adSraf (*done)++; 780f841f6adSraf return (0); 781f841f6adSraf } 782f841f6adSraf if (reqp->req_op == AIOFSYNC && reqp != aiowp->work_req) { 783f841f6adSraf ASSERT(POSIX_AIO(reqp)); 784f841f6adSraf /* Cancel the queued aio_fsync() request */ 785f841f6adSraf if (!reqp->req_head->lio_canned) { 786f841f6adSraf reqp->req_head->lio_canned = 1; 787f841f6adSraf _aio_outstand_cnt--; 788f841f6adSraf (*canceled)++; 789f841f6adSraf } 790f841f6adSraf return (0); 791f841f6adSraf } 792f841f6adSraf reqp->req_state = AIO_REQ_CANCELED; 793f841f6adSraf _aio_req_del(aiowp, reqp, ostate); 794f841f6adSraf (void) _aio_hash_del(reqp->req_resultp); 795f841f6adSraf (*canceled)++; 796f841f6adSraf if (reqp == aiowp->work_req) { 797f841f6adSraf ASSERT(ostate == AIO_REQ_INPROGRESS); 798f841f6adSraf /* 799f841f6adSraf * Set the result values now, before _aiodone() is called. 800f841f6adSraf * We do this because the application can expect aio_return 801f841f6adSraf * and aio_errno to be set to -1 and ECANCELED, respectively, 802f841f6adSraf * immediately after a successful return from aiocancel() 803f841f6adSraf * or aio_cancel(). 804f841f6adSraf */ 805f841f6adSraf _aio_set_result(reqp, -1, ECANCELED); 806f841f6adSraf (void) thr_kill(aiowp->work_tid, SIGAIOCANCEL); 807f841f6adSraf return (0); 808f841f6adSraf } 809f841f6adSraf if (!POSIX_AIO(reqp)) { 810f841f6adSraf _aio_outstand_cnt--; 811f841f6adSraf _aio_set_result(reqp, -1, ECANCELED); 812f841f6adSraf return (0); 813f841f6adSraf } 814f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 815f841f6adSraf sig_mutex_unlock(&__aio_mutex); 816f841f6adSraf _aiodone(reqp, -1, ECANCELED); 817f841f6adSraf sig_mutex_lock(&__aio_mutex); 818f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 819f841f6adSraf return (1); 820f841f6adSraf } 821f841f6adSraf 822f841f6adSraf int 823f841f6adSraf _aio_create_worker(aio_req_t *reqp, int mode) 824f841f6adSraf { 825f841f6adSraf aio_worker_t *aiowp, **workers, **nextworker; 826f841f6adSraf int *aio_workerscnt; 827f841f6adSraf void *(*func)(void *); 828f841f6adSraf sigset_t oset; 829f841f6adSraf int error; 830f841f6adSraf 831f841f6adSraf /* 832f841f6adSraf * Put the new worker thread in the right queue. 833f841f6adSraf */ 834f841f6adSraf switch (mode) { 835f841f6adSraf case AIOREAD: 836f841f6adSraf case AIOWRITE: 837f841f6adSraf case AIOAREAD: 838f841f6adSraf case AIOAWRITE: 839f841f6adSraf #if !defined(_LP64) 840f841f6adSraf case AIOAREAD64: 841f841f6adSraf case AIOAWRITE64: 842f841f6adSraf #endif 843f841f6adSraf workers = &__workers_rw; 844f841f6adSraf nextworker = &__nextworker_rw; 845f841f6adSraf aio_workerscnt = &__rw_workerscnt; 846f841f6adSraf func = _aio_do_request; 847f841f6adSraf break; 848f841f6adSraf case AIONOTIFY: 849f841f6adSraf workers = &__workers_no; 850f841f6adSraf nextworker = &__nextworker_no; 851f841f6adSraf func = _aio_do_notify; 852f841f6adSraf aio_workerscnt = &__no_workerscnt; 853f841f6adSraf break; 854f841f6adSraf default: 855f841f6adSraf aio_panic("_aio_create_worker: invalid mode"); 856f841f6adSraf break; 857f841f6adSraf } 858f841f6adSraf 859f841f6adSraf if ((aiowp = _aio_worker_alloc()) == NULL) 860f841f6adSraf return (-1); 861f841f6adSraf 862f841f6adSraf if (reqp) { 863f841f6adSraf reqp->req_state = AIO_REQ_QUEUED; 864f841f6adSraf reqp->req_worker = aiowp; 865f841f6adSraf aiowp->work_head1 = reqp; 866f841f6adSraf aiowp->work_tail1 = reqp; 867f841f6adSraf aiowp->work_next1 = reqp; 868f841f6adSraf aiowp->work_count1 = 1; 869f841f6adSraf aiowp->work_minload1 = 1; 870f841f6adSraf } 871f841f6adSraf 872f841f6adSraf (void) pthread_sigmask(SIG_SETMASK, &maskset, &oset); 873f841f6adSraf error = thr_create(NULL, AIOSTKSIZE, func, aiowp, 874f841f6adSraf THR_DAEMON | THR_SUSPENDED, &aiowp->work_tid); 875f841f6adSraf (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 876f841f6adSraf if (error) { 877f841f6adSraf if (reqp) { 878f841f6adSraf reqp->req_state = 0; 879f841f6adSraf reqp->req_worker = NULL; 880f841f6adSraf } 881f841f6adSraf _aio_worker_free(aiowp); 882f841f6adSraf return (-1); 883f841f6adSraf } 884f841f6adSraf 885f841f6adSraf lmutex_lock(&__aio_mutex); 886f841f6adSraf (*aio_workerscnt)++; 887f841f6adSraf if (*workers == NULL) { 888f841f6adSraf aiowp->work_forw = aiowp; 889f841f6adSraf aiowp->work_backw = aiowp; 890f841f6adSraf *nextworker = aiowp; 891f841f6adSraf *workers = aiowp; 892f841f6adSraf } else { 893f841f6adSraf aiowp->work_backw = (*workers)->work_backw; 894f841f6adSraf aiowp->work_forw = (*workers); 895f841f6adSraf (*workers)->work_backw->work_forw = aiowp; 896f841f6adSraf (*workers)->work_backw = aiowp; 897f841f6adSraf } 898f841f6adSraf _aio_worker_cnt++; 899f841f6adSraf lmutex_unlock(&__aio_mutex); 900f841f6adSraf 901f841f6adSraf (void) thr_continue(aiowp->work_tid); 902f841f6adSraf 903f841f6adSraf return (0); 904f841f6adSraf } 905f841f6adSraf 906f841f6adSraf /* 907f841f6adSraf * This is the worker's main routine. 908f841f6adSraf * The task of this function is to execute all queued requests; 909f841f6adSraf * once the last pending request is executed this function will block 910f841f6adSraf * in _aio_idle(). A new incoming request must wakeup this thread to 911f841f6adSraf * restart the work. 912f841f6adSraf * Every worker has an own work queue. The queue lock is required 913f841f6adSraf * to synchronize the addition of new requests for this worker or 914f841f6adSraf * cancellation of pending/running requests. 915f841f6adSraf * 916f841f6adSraf * Cancellation scenarios: 917f841f6adSraf * The cancellation of a request is being done asynchronously using 918f841f6adSraf * _aio_cancel_req() from another thread context. 919f841f6adSraf * A queued request can be cancelled in different manners : 920f841f6adSraf * a) request is queued but not "in progress" or "done" (AIO_REQ_QUEUED): 921f841f6adSraf * - lock the queue -> remove the request -> unlock the queue 922f841f6adSraf * - this function/thread does not detect this cancellation process 923f841f6adSraf * b) request is in progress (AIO_REQ_INPROGRESS) : 924f841f6adSraf * - this function first allow the cancellation of the running 925f841f6adSraf * request with the flag "work_cancel_flg=1" 926f841f6adSraf * see _aio_req_get() -> _aio_cancel_on() 927f841f6adSraf * During this phase, it is allowed to interrupt the worker 928f841f6adSraf * thread running the request (this thread) using the SIGAIOCANCEL 929f841f6adSraf * signal. 930f841f6adSraf * Once this thread returns from the kernel (because the request 931f841f6adSraf * is just done), then it must disable a possible cancellation 932f841f6adSraf * and proceed to finish the request. To disable the cancellation 933f841f6adSraf * this thread must use _aio_cancel_off() to set "work_cancel_flg=0". 934f841f6adSraf * c) request is already done (AIO_REQ_DONE || AIO_REQ_DONEQ): 935f841f6adSraf * same procedure as in a) 936f841f6adSraf * 937f841f6adSraf * To b) 938f841f6adSraf * This thread uses sigsetjmp() to define the position in the code, where 939f841f6adSraf * it wish to continue working in the case that a SIGAIOCANCEL signal 940f841f6adSraf * is detected. 941f841f6adSraf * Normally this thread should get the cancellation signal during the 942f841f6adSraf * kernel phase (reading or writing). In that case the signal handler 943f841f6adSraf * aiosigcancelhndlr() is activated using the worker thread context, 944f841f6adSraf * which again will use the siglongjmp() function to break the standard 945f841f6adSraf * code flow and jump to the "sigsetjmp" position, provided that 946f841f6adSraf * "work_cancel_flg" is set to "1". 947f841f6adSraf * Because the "work_cancel_flg" is only manipulated by this worker 948f841f6adSraf * thread and it can only run on one CPU at a given time, it is not 949f841f6adSraf * necessary to protect that flag with the queue lock. 950f841f6adSraf * Returning from the kernel (read or write system call) we must 951f841f6adSraf * first disable the use of the SIGAIOCANCEL signal and accordingly 952f841f6adSraf * the use of the siglongjmp() function to prevent a possible deadlock: 953f841f6adSraf * - It can happens that this worker thread returns from the kernel and 954f841f6adSraf * blocks in "work_qlock1", 955f841f6adSraf * - then a second thread cancels the apparently "in progress" request 956f841f6adSraf * and sends the SIGAIOCANCEL signal to the worker thread, 957f841f6adSraf * - the worker thread gets assigned the "work_qlock1" and will returns 958f841f6adSraf * from the kernel, 959f841f6adSraf * - the kernel detects the pending signal and activates the signal 960f841f6adSraf * handler instead, 961f841f6adSraf * - if the "work_cancel_flg" is still set then the signal handler 962f841f6adSraf * should use siglongjmp() to cancel the "in progress" request and 963f841f6adSraf * it would try to acquire the same work_qlock1 in _aio_req_get() 964f841f6adSraf * for a second time => deadlock. 965f841f6adSraf * To avoid that situation we disable the cancellation of the request 966f841f6adSraf * in progress BEFORE we try to acquire the work_qlock1. 967f841f6adSraf * In that case the signal handler will not call siglongjmp() and the 968f841f6adSraf * worker thread will continue running the standard code flow. 969f841f6adSraf * Then this thread must check the AIO_REQ_CANCELED flag to emulate 970f841f6adSraf * an eventually required siglongjmp() freeing the work_qlock1 and 971f841f6adSraf * avoiding a deadlock. 972f841f6adSraf */ 973f841f6adSraf void * 974f841f6adSraf _aio_do_request(void *arglist) 975f841f6adSraf { 976f841f6adSraf aio_worker_t *aiowp = (aio_worker_t *)arglist; 977f841f6adSraf ulwp_t *self = curthread; 978f841f6adSraf struct aio_args *arg; 979f841f6adSraf aio_req_t *reqp; /* current AIO request */ 980f841f6adSraf ssize_t retval; 981f841f6adSraf int error; 982f841f6adSraf 983f841f6adSraf if (pthread_setspecific(_aio_key, aiowp) != 0) 984f841f6adSraf aio_panic("_aio_do_request, pthread_setspecific()"); 985f841f6adSraf (void) pthread_sigmask(SIG_SETMASK, &_worker_set, NULL); 986f841f6adSraf ASSERT(aiowp->work_req == NULL); 987f841f6adSraf 988f841f6adSraf /* 989f841f6adSraf * We resume here when an operation is cancelled. 990f841f6adSraf * On first entry, aiowp->work_req == NULL, so all 991f841f6adSraf * we do is block SIGAIOCANCEL. 992f841f6adSraf */ 993f841f6adSraf (void) sigsetjmp(aiowp->work_jmp_buf, 0); 994f841f6adSraf ASSERT(self->ul_sigdefer == 0); 995f841f6adSraf 996f841f6adSraf sigoff(self); /* block SIGAIOCANCEL */ 997f841f6adSraf if (aiowp->work_req != NULL) 998f841f6adSraf _aio_finish_request(aiowp, -1, ECANCELED); 999f841f6adSraf 1000f841f6adSraf for (;;) { 1001f841f6adSraf /* 1002f841f6adSraf * Put completed requests on aio_done_list. This has 1003f841f6adSraf * to be done as part of the main loop to ensure that 1004f841f6adSraf * we don't artificially starve any aiowait'ers. 1005f841f6adSraf */ 1006f841f6adSraf if (aiowp->work_done1) 1007f841f6adSraf _aio_work_done(aiowp); 1008f841f6adSraf 1009f841f6adSraf top: 1010f841f6adSraf /* consume any deferred SIGAIOCANCEL signal here */ 1011f841f6adSraf sigon(self); 1012f841f6adSraf sigoff(self); 1013f841f6adSraf 1014f841f6adSraf while ((reqp = _aio_req_get(aiowp)) == NULL) { 1015f841f6adSraf if (_aio_idle(aiowp) != 0) 1016f841f6adSraf goto top; 1017f841f6adSraf } 1018f841f6adSraf arg = &reqp->req_args; 1019f841f6adSraf ASSERT(reqp->req_state == AIO_REQ_INPROGRESS || 1020f841f6adSraf reqp->req_state == AIO_REQ_CANCELED); 1021f841f6adSraf error = 0; 1022f841f6adSraf 1023f841f6adSraf switch (reqp->req_op) { 1024f841f6adSraf case AIOREAD: 1025f841f6adSraf case AIOAREAD: 1026f841f6adSraf sigon(self); /* unblock SIGAIOCANCEL */ 1027f841f6adSraf retval = pread(arg->fd, arg->buf, 1028f841f6adSraf arg->bufsz, arg->offset); 1029f841f6adSraf if (retval == -1) { 1030f841f6adSraf if (errno == ESPIPE) { 1031f841f6adSraf retval = read(arg->fd, 1032f841f6adSraf arg->buf, arg->bufsz); 1033f841f6adSraf if (retval == -1) 1034f841f6adSraf error = errno; 1035f841f6adSraf } else { 1036f841f6adSraf error = errno; 1037f841f6adSraf } 1038f841f6adSraf } 1039f841f6adSraf sigoff(self); /* block SIGAIOCANCEL */ 1040f841f6adSraf break; 1041f841f6adSraf case AIOWRITE: 1042f841f6adSraf case AIOAWRITE: 1043f841f6adSraf sigon(self); /* unblock SIGAIOCANCEL */ 1044f841f6adSraf retval = pwrite(arg->fd, arg->buf, 1045f841f6adSraf arg->bufsz, arg->offset); 1046f841f6adSraf if (retval == -1) { 1047f841f6adSraf if (errno == ESPIPE) { 1048f841f6adSraf retval = write(arg->fd, 1049f841f6adSraf arg->buf, arg->bufsz); 1050f841f6adSraf if (retval == -1) 1051f841f6adSraf error = errno; 1052f841f6adSraf } else { 1053f841f6adSraf error = errno; 1054f841f6adSraf } 1055f841f6adSraf } 1056f841f6adSraf sigoff(self); /* block SIGAIOCANCEL */ 1057f841f6adSraf break; 1058f841f6adSraf #if !defined(_LP64) 1059f841f6adSraf case AIOAREAD64: 1060f841f6adSraf sigon(self); /* unblock SIGAIOCANCEL */ 1061f841f6adSraf retval = pread64(arg->fd, arg->buf, 1062f841f6adSraf arg->bufsz, arg->offset); 1063f841f6adSraf if (retval == -1) { 1064f841f6adSraf if (errno == ESPIPE) { 1065f841f6adSraf retval = read(arg->fd, 1066f841f6adSraf arg->buf, arg->bufsz); 1067f841f6adSraf if (retval == -1) 1068f841f6adSraf error = errno; 1069f841f6adSraf } else { 1070f841f6adSraf error = errno; 1071f841f6adSraf } 1072f841f6adSraf } 1073f841f6adSraf sigoff(self); /* block SIGAIOCANCEL */ 1074f841f6adSraf break; 1075f841f6adSraf case AIOAWRITE64: 1076f841f6adSraf sigon(self); /* unblock SIGAIOCANCEL */ 1077f841f6adSraf retval = pwrite64(arg->fd, arg->buf, 1078f841f6adSraf arg->bufsz, arg->offset); 1079f841f6adSraf if (retval == -1) { 1080f841f6adSraf if (errno == ESPIPE) { 1081f841f6adSraf retval = write(arg->fd, 1082f841f6adSraf arg->buf, arg->bufsz); 1083f841f6adSraf if (retval == -1) 1084f841f6adSraf error = errno; 1085f841f6adSraf } else { 1086f841f6adSraf error = errno; 1087f841f6adSraf } 1088f841f6adSraf } 1089f841f6adSraf sigoff(self); /* block SIGAIOCANCEL */ 1090f841f6adSraf break; 1091f841f6adSraf #endif /* !defined(_LP64) */ 1092f841f6adSraf case AIOFSYNC: 1093f841f6adSraf if (_aio_fsync_del(aiowp, reqp)) 1094f841f6adSraf goto top; 1095f841f6adSraf ASSERT(reqp->req_head == NULL); 1096f841f6adSraf /* 1097f841f6adSraf * All writes for this fsync request are now 1098f841f6adSraf * acknowledged. Now make these writes visible 1099f841f6adSraf * and put the final request into the hash table. 1100f841f6adSraf */ 1101f841f6adSraf if (reqp->req_state == AIO_REQ_CANCELED) { 1102f841f6adSraf /* EMPTY */; 1103f841f6adSraf } else if (arg->offset == O_SYNC) { 1104f841f6adSraf if ((retval = __fdsync(arg->fd, FSYNC)) == -1) 1105f841f6adSraf error = errno; 1106f841f6adSraf } else { 1107f841f6adSraf if ((retval = __fdsync(arg->fd, FDSYNC)) == -1) 1108f841f6adSraf error = errno; 1109f841f6adSraf } 1110f841f6adSraf if (_aio_hash_insert(reqp->req_resultp, reqp) != 0) 1111f841f6adSraf aio_panic("_aio_do_request(): AIOFSYNC: " 1112f841f6adSraf "request already in hash table"); 1113f841f6adSraf break; 1114f841f6adSraf default: 1115f841f6adSraf aio_panic("_aio_do_request, bad op"); 1116f841f6adSraf } 1117f841f6adSraf 1118f841f6adSraf _aio_finish_request(aiowp, retval, error); 1119f841f6adSraf } 1120f841f6adSraf /* NOTREACHED */ 1121f841f6adSraf return (NULL); 1122f841f6adSraf } 1123f841f6adSraf 1124f841f6adSraf /* 1125f841f6adSraf * Perform the tail processing for _aio_do_request(). 1126f841f6adSraf * The in-progress request may or may not have been cancelled. 1127f841f6adSraf */ 1128f841f6adSraf static void 1129f841f6adSraf _aio_finish_request(aio_worker_t *aiowp, ssize_t retval, int error) 1130f841f6adSraf { 1131f841f6adSraf aio_req_t *reqp; 1132f841f6adSraf 1133f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 1134f841f6adSraf if ((reqp = aiowp->work_req) == NULL) 1135f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1136f841f6adSraf else { 1137f841f6adSraf aiowp->work_req = NULL; 1138f841f6adSraf if (reqp->req_state == AIO_REQ_CANCELED) { 1139f841f6adSraf retval = -1; 1140f841f6adSraf error = ECANCELED; 1141f841f6adSraf } 1142f841f6adSraf if (!POSIX_AIO(reqp)) { 1143f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1144f841f6adSraf sig_mutex_lock(&__aio_mutex); 1145f841f6adSraf if (reqp->req_state == AIO_REQ_INPROGRESS) 1146f841f6adSraf reqp->req_state = AIO_REQ_DONE; 1147f841f6adSraf _aio_req_done_cnt++; 1148f841f6adSraf _aio_set_result(reqp, retval, error); 1149f841f6adSraf if (error == ECANCELED) 1150f841f6adSraf _aio_outstand_cnt--; 1151f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1152f841f6adSraf } else { 1153f841f6adSraf if (reqp->req_state == AIO_REQ_INPROGRESS) 1154f841f6adSraf reqp->req_state = AIO_REQ_DONE; 1155f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1156f841f6adSraf _aiodone(reqp, retval, error); 1157f841f6adSraf } 1158f841f6adSraf } 1159f841f6adSraf } 1160f841f6adSraf 1161f841f6adSraf void 1162f841f6adSraf _aio_req_mark_done(aio_req_t *reqp) 1163f841f6adSraf { 1164f841f6adSraf #if !defined(_LP64) 1165f841f6adSraf if (reqp->req_largefile) 1166f841f6adSraf ((aiocb64_t *)reqp->req_aiocbp)->aio_state = USERAIO_DONE; 1167f841f6adSraf else 1168f841f6adSraf #endif 1169f841f6adSraf ((aiocb_t *)reqp->req_aiocbp)->aio_state = USERAIO_DONE; 1170f841f6adSraf } 1171f841f6adSraf 1172f841f6adSraf /* 1173f841f6adSraf * Sleep for 'ticks' clock ticks to give somebody else a chance to run, 1174f841f6adSraf * hopefully to consume one of our queued signals. 1175f841f6adSraf */ 1176f841f6adSraf static void 1177f841f6adSraf _aio_delay(int ticks) 1178f841f6adSraf { 1179f841f6adSraf (void) usleep(ticks * (MICROSEC / hz)); 1180f841f6adSraf } 1181f841f6adSraf 1182f841f6adSraf /* 1183f841f6adSraf * Actually send the notifications. 1184f841f6adSraf * We could block indefinitely here if the application 1185f841f6adSraf * is not listening for the signal or port notifications. 1186f841f6adSraf */ 1187f841f6adSraf static void 1188f841f6adSraf send_notification(notif_param_t *npp) 1189f841f6adSraf { 1190f841f6adSraf extern int __sigqueue(pid_t pid, int signo, 1191f841f6adSraf /* const union sigval */ void *value, int si_code, int block); 1192f841f6adSraf 1193f841f6adSraf if (npp->np_signo) 1194f841f6adSraf (void) __sigqueue(__pid, npp->np_signo, npp->np_user, 1195f841f6adSraf SI_ASYNCIO, 1); 1196f841f6adSraf else if (npp->np_port >= 0) 1197f841f6adSraf (void) _port_dispatch(npp->np_port, 0, PORT_SOURCE_AIO, 1198f841f6adSraf npp->np_event, npp->np_object, npp->np_user); 1199f841f6adSraf 1200f841f6adSraf if (npp->np_lio_signo) 1201f841f6adSraf (void) __sigqueue(__pid, npp->np_lio_signo, npp->np_lio_user, 1202f841f6adSraf SI_ASYNCIO, 1); 1203f841f6adSraf else if (npp->np_lio_port >= 0) 1204f841f6adSraf (void) _port_dispatch(npp->np_lio_port, 0, PORT_SOURCE_AIO, 1205f841f6adSraf npp->np_lio_event, npp->np_lio_object, npp->np_lio_user); 1206f841f6adSraf } 1207f841f6adSraf 1208f841f6adSraf /* 1209f841f6adSraf * Asynchronous notification worker. 1210f841f6adSraf */ 1211f841f6adSraf void * 1212f841f6adSraf _aio_do_notify(void *arg) 1213f841f6adSraf { 1214f841f6adSraf aio_worker_t *aiowp = (aio_worker_t *)arg; 1215f841f6adSraf aio_req_t *reqp; 1216f841f6adSraf 1217f841f6adSraf /* 1218f841f6adSraf * This isn't really necessary. All signals are blocked. 1219f841f6adSraf */ 1220f841f6adSraf if (pthread_setspecific(_aio_key, aiowp) != 0) 1221f841f6adSraf aio_panic("_aio_do_notify, pthread_setspecific()"); 1222f841f6adSraf 1223f841f6adSraf /* 1224f841f6adSraf * Notifications are never cancelled. 1225f841f6adSraf * All signals remain blocked, forever. 1226f841f6adSraf */ 1227f841f6adSraf for (;;) { 1228f841f6adSraf while ((reqp = _aio_req_get(aiowp)) == NULL) { 1229f841f6adSraf if (_aio_idle(aiowp) != 0) 1230f841f6adSraf aio_panic("_aio_do_notify: _aio_idle() failed"); 1231f841f6adSraf } 1232f841f6adSraf send_notification(&reqp->req_notify); 1233f841f6adSraf _aio_req_free(reqp); 1234f841f6adSraf } 1235f841f6adSraf 1236f841f6adSraf /* NOTREACHED */ 1237f841f6adSraf return (NULL); 1238f841f6adSraf } 1239f841f6adSraf 1240f841f6adSraf /* 1241f841f6adSraf * Do the completion semantics for a request that was either canceled 1242f841f6adSraf * by _aio_cancel_req() or was completed by _aio_do_request(). 1243f841f6adSraf */ 1244f841f6adSraf static void 1245f841f6adSraf _aiodone(aio_req_t *reqp, ssize_t retval, int error) 1246f841f6adSraf { 1247f841f6adSraf aio_result_t *resultp = reqp->req_resultp; 1248f841f6adSraf int notify = 0; 1249f841f6adSraf aio_lio_t *head; 1250f841f6adSraf int sigev_none; 1251f841f6adSraf int sigev_signal; 1252f841f6adSraf int sigev_thread; 1253f841f6adSraf int sigev_port; 1254f841f6adSraf notif_param_t np; 1255f841f6adSraf 1256f841f6adSraf /* 1257f841f6adSraf * We call _aiodone() only for Posix I/O. 1258f841f6adSraf */ 1259f841f6adSraf ASSERT(POSIX_AIO(reqp)); 1260f841f6adSraf 1261f841f6adSraf sigev_none = 0; 1262f841f6adSraf sigev_signal = 0; 1263f841f6adSraf sigev_thread = 0; 1264f841f6adSraf sigev_port = 0; 1265f841f6adSraf np.np_signo = 0; 1266f841f6adSraf np.np_port = -1; 1267f841f6adSraf np.np_lio_signo = 0; 1268f841f6adSraf np.np_lio_port = -1; 1269f841f6adSraf 1270f841f6adSraf switch (reqp->req_sigevent.sigev_notify) { 1271f841f6adSraf case SIGEV_NONE: 1272f841f6adSraf sigev_none = 1; 1273f841f6adSraf break; 1274f841f6adSraf case SIGEV_SIGNAL: 1275f841f6adSraf sigev_signal = 1; 1276f841f6adSraf break; 1277f841f6adSraf case SIGEV_THREAD: 1278f841f6adSraf sigev_thread = 1; 1279f841f6adSraf break; 1280f841f6adSraf case SIGEV_PORT: 1281f841f6adSraf sigev_port = 1; 1282f841f6adSraf break; 1283f841f6adSraf default: 1284f841f6adSraf aio_panic("_aiodone: improper sigev_notify"); 1285f841f6adSraf break; 1286f841f6adSraf } 1287f841f6adSraf 1288f841f6adSraf /* 1289f841f6adSraf * Figure out the notification parameters while holding __aio_mutex. 1290f841f6adSraf * Actually perform the notifications after dropping __aio_mutex. 1291f841f6adSraf * This allows us to sleep for a long time (if the notifications 1292f841f6adSraf * incur delays) without impeding other async I/O operations. 1293f841f6adSraf */ 1294f841f6adSraf 1295f841f6adSraf sig_mutex_lock(&__aio_mutex); 1296f841f6adSraf 1297f841f6adSraf if (sigev_signal) { 1298f841f6adSraf if ((np.np_signo = reqp->req_sigevent.sigev_signo) != 0) 1299f841f6adSraf notify = 1; 1300f841f6adSraf np.np_user = reqp->req_sigevent.sigev_value.sival_ptr; 1301f841f6adSraf } else if (sigev_thread | sigev_port) { 1302f841f6adSraf if ((np.np_port = reqp->req_sigevent.sigev_signo) >= 0) 1303f841f6adSraf notify = 1; 1304f841f6adSraf np.np_event = reqp->req_op; 1305f841f6adSraf if (np.np_event == AIOFSYNC && reqp->req_largefile) 1306f841f6adSraf np.np_event = AIOFSYNC64; 1307f841f6adSraf np.np_object = (uintptr_t)reqp->req_aiocbp; 1308f841f6adSraf np.np_user = reqp->req_sigevent.sigev_value.sival_ptr; 1309f841f6adSraf } 1310f841f6adSraf 1311f841f6adSraf if (resultp->aio_errno == EINPROGRESS) 1312f841f6adSraf _aio_set_result(reqp, retval, error); 1313f841f6adSraf 1314f841f6adSraf _aio_outstand_cnt--; 1315f841f6adSraf 1316f841f6adSraf head = reqp->req_head; 1317f841f6adSraf reqp->req_head = NULL; 1318f841f6adSraf 1319f841f6adSraf if (sigev_none) { 1320f841f6adSraf _aio_enq_doneq(reqp); 1321f841f6adSraf reqp = NULL; 1322f841f6adSraf } else { 1323f841f6adSraf (void) _aio_hash_del(resultp); 1324f841f6adSraf _aio_req_mark_done(reqp); 1325f841f6adSraf } 1326f841f6adSraf 1327f841f6adSraf _aio_waitn_wakeup(); 1328f841f6adSraf 1329f841f6adSraf /* 1330f841f6adSraf * __aio_waitn() sets AIO_WAIT_INPROGRESS and 1331f841f6adSraf * __aio_suspend() increments "_aio_kernel_suspend" 1332f841f6adSraf * when they are waiting in the kernel for completed I/Os. 1333f841f6adSraf * 1334f841f6adSraf * _kaio(AIONOTIFY) awakes the corresponding function 1335f841f6adSraf * in the kernel; then the corresponding __aio_waitn() or 1336f841f6adSraf * __aio_suspend() function could reap the recently 1337f841f6adSraf * completed I/Os (_aiodone()). 1338f841f6adSraf */ 1339f841f6adSraf if ((_aio_flags & AIO_WAIT_INPROGRESS) || _aio_kernel_suspend > 0) 1340f841f6adSraf (void) _kaio(AIONOTIFY); 1341f841f6adSraf 1342f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1343f841f6adSraf 1344f841f6adSraf if (head != NULL) { 1345f841f6adSraf /* 1346f841f6adSraf * If all the lio requests have completed, 1347f841f6adSraf * prepare to notify the waiting thread. 1348f841f6adSraf */ 1349f841f6adSraf sig_mutex_lock(&head->lio_mutex); 1350f841f6adSraf ASSERT(head->lio_refcnt == head->lio_nent); 1351f841f6adSraf if (head->lio_refcnt == 1) { 1352f841f6adSraf int waiting = 0; 1353f841f6adSraf if (head->lio_mode == LIO_WAIT) { 1354f841f6adSraf if ((waiting = head->lio_waiting) != 0) 1355f841f6adSraf (void) cond_signal(&head->lio_cond_cv); 1356f841f6adSraf } else if (head->lio_port < 0) { /* none or signal */ 1357f841f6adSraf if ((np.np_lio_signo = head->lio_signo) != 0) 1358f841f6adSraf notify = 1; 1359f841f6adSraf np.np_lio_user = head->lio_sigval.sival_ptr; 1360f841f6adSraf } else { /* thread or port */ 1361f841f6adSraf notify = 1; 1362f841f6adSraf np.np_lio_port = head->lio_port; 1363f841f6adSraf np.np_lio_event = head->lio_event; 1364f841f6adSraf np.np_lio_object = 1365f841f6adSraf (uintptr_t)head->lio_sigevent; 1366f841f6adSraf np.np_lio_user = head->lio_sigval.sival_ptr; 1367f841f6adSraf } 1368f841f6adSraf head->lio_nent = head->lio_refcnt = 0; 1369f841f6adSraf sig_mutex_unlock(&head->lio_mutex); 1370f841f6adSraf if (waiting == 0) 1371f841f6adSraf _aio_lio_free(head); 1372f841f6adSraf } else { 1373f841f6adSraf head->lio_nent--; 1374f841f6adSraf head->lio_refcnt--; 1375f841f6adSraf sig_mutex_unlock(&head->lio_mutex); 1376f841f6adSraf } 1377f841f6adSraf } 1378f841f6adSraf 1379f841f6adSraf /* 1380f841f6adSraf * The request is completed; now perform the notifications. 1381f841f6adSraf */ 1382f841f6adSraf if (notify) { 1383f841f6adSraf if (reqp != NULL) { 1384f841f6adSraf /* 1385f841f6adSraf * We usually put the request on the notification 1386f841f6adSraf * queue because we don't want to block and delay 1387f841f6adSraf * other operations behind us in the work queue. 1388f841f6adSraf * Also we must never block on a cancel notification 1389f841f6adSraf * because we are being called from an application 1390f841f6adSraf * thread in this case and that could lead to deadlock 1391f841f6adSraf * if no other thread is receiving notificatins. 1392f841f6adSraf */ 1393f841f6adSraf reqp->req_notify = np; 1394f841f6adSraf reqp->req_op = AIONOTIFY; 1395f841f6adSraf _aio_req_add(reqp, &__workers_no, AIONOTIFY); 1396f841f6adSraf reqp = NULL; 1397f841f6adSraf } else { 1398f841f6adSraf /* 1399f841f6adSraf * We already put the request on the done queue, 1400f841f6adSraf * so we can't queue it to the notification queue. 1401f841f6adSraf * Just do the notification directly. 1402f841f6adSraf */ 1403f841f6adSraf send_notification(&np); 1404f841f6adSraf } 1405f841f6adSraf } 1406f841f6adSraf 1407f841f6adSraf if (reqp != NULL) 1408f841f6adSraf _aio_req_free(reqp); 1409f841f6adSraf } 1410f841f6adSraf 1411f841f6adSraf /* 1412f841f6adSraf * Delete fsync requests from list head until there is 1413f841f6adSraf * only one left. Return 0 when there is only one, 1414f841f6adSraf * otherwise return a non-zero value. 1415f841f6adSraf */ 1416f841f6adSraf static int 1417f841f6adSraf _aio_fsync_del(aio_worker_t *aiowp, aio_req_t *reqp) 1418f841f6adSraf { 1419f841f6adSraf aio_lio_t *head = reqp->req_head; 1420f841f6adSraf int rval = 0; 1421f841f6adSraf 1422f841f6adSraf ASSERT(reqp == aiowp->work_req); 1423f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 1424f841f6adSraf sig_mutex_lock(&head->lio_mutex); 1425f841f6adSraf if (head->lio_refcnt > 1) { 1426f841f6adSraf head->lio_refcnt--; 1427f841f6adSraf head->lio_nent--; 1428f841f6adSraf aiowp->work_req = NULL; 1429f841f6adSraf sig_mutex_unlock(&head->lio_mutex); 1430f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1431f841f6adSraf sig_mutex_lock(&__aio_mutex); 1432f841f6adSraf _aio_outstand_cnt--; 1433f841f6adSraf _aio_waitn_wakeup(); 1434f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1435f841f6adSraf _aio_req_free(reqp); 1436f841f6adSraf return (1); 1437f841f6adSraf } 1438f841f6adSraf ASSERT(head->lio_nent == 1 && head->lio_refcnt == 1); 1439f841f6adSraf reqp->req_head = NULL; 1440f841f6adSraf if (head->lio_canned) 1441f841f6adSraf reqp->req_state = AIO_REQ_CANCELED; 1442f841f6adSraf if (head->lio_mode == LIO_DESTROY) { 1443f841f6adSraf aiowp->work_req = NULL; 1444f841f6adSraf rval = 1; 1445f841f6adSraf } 1446f841f6adSraf sig_mutex_unlock(&head->lio_mutex); 1447f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1448f841f6adSraf head->lio_refcnt--; 1449f841f6adSraf head->lio_nent--; 1450f841f6adSraf _aio_lio_free(head); 1451f841f6adSraf if (rval != 0) 1452f841f6adSraf _aio_req_free(reqp); 1453f841f6adSraf return (rval); 1454f841f6adSraf } 1455f841f6adSraf 1456f841f6adSraf /* 1457f841f6adSraf * A worker is set idle when its work queue is empty. 1458f841f6adSraf * The worker checks again that it has no more work 1459f841f6adSraf * and then goes to sleep waiting for more work. 1460f841f6adSraf */ 1461f841f6adSraf int 1462f841f6adSraf _aio_idle(aio_worker_t *aiowp) 1463f841f6adSraf { 1464f841f6adSraf int error = 0; 1465f841f6adSraf 1466f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 1467f841f6adSraf if (aiowp->work_count1 == 0) { 1468f841f6adSraf ASSERT(aiowp->work_minload1 == 0); 1469f841f6adSraf aiowp->work_idleflg = 1; 1470f841f6adSraf /* 1471f841f6adSraf * A cancellation handler is not needed here. 1472f841f6adSraf * aio worker threads are never cancelled via pthread_cancel(). 1473f841f6adSraf */ 1474f841f6adSraf error = sig_cond_wait(&aiowp->work_idle_cv, 1475f841f6adSraf &aiowp->work_qlock1); 1476f841f6adSraf /* 1477f841f6adSraf * The idle flag is normally cleared before worker is awakened 1478f841f6adSraf * by aio_req_add(). On error (EINTR), we clear it ourself. 1479f841f6adSraf */ 1480f841f6adSraf if (error) 1481f841f6adSraf aiowp->work_idleflg = 0; 1482f841f6adSraf } 1483f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1484f841f6adSraf return (error); 1485f841f6adSraf } 1486f841f6adSraf 1487f841f6adSraf /* 1488f841f6adSraf * A worker's completed AIO requests are placed onto a global 1489f841f6adSraf * done queue. The application is only sent a SIGIO signal if 1490f841f6adSraf * the process has a handler enabled and it is not waiting via 1491f841f6adSraf * aiowait(). 1492f841f6adSraf */ 1493f841f6adSraf static void 1494f841f6adSraf _aio_work_done(aio_worker_t *aiowp) 1495f841f6adSraf { 1496f841f6adSraf aio_req_t *reqp; 1497f841f6adSraf 1498f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 1499f841f6adSraf reqp = aiowp->work_prev1; 1500f841f6adSraf reqp->req_next = NULL; 1501f841f6adSraf aiowp->work_done1 = 0; 1502f841f6adSraf aiowp->work_tail1 = aiowp->work_next1; 1503f841f6adSraf if (aiowp->work_tail1 == NULL) 1504f841f6adSraf aiowp->work_head1 = NULL; 1505f841f6adSraf aiowp->work_prev1 = NULL; 1506f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1507f841f6adSraf sig_mutex_lock(&__aio_mutex); 1508f841f6adSraf _aio_donecnt++; 1509f841f6adSraf _aio_outstand_cnt--; 1510f841f6adSraf _aio_req_done_cnt--; 1511f841f6adSraf ASSERT(_aio_donecnt > 0 && 1512f841f6adSraf _aio_outstand_cnt >= 0 && 1513f841f6adSraf _aio_req_done_cnt >= 0); 1514f841f6adSraf ASSERT(reqp != NULL); 1515f841f6adSraf 1516f841f6adSraf if (_aio_done_tail == NULL) { 1517f841f6adSraf _aio_done_head = _aio_done_tail = reqp; 1518f841f6adSraf } else { 1519f841f6adSraf _aio_done_head->req_next = reqp; 1520f841f6adSraf _aio_done_head = reqp; 1521f841f6adSraf } 1522f841f6adSraf 1523f841f6adSraf if (_aiowait_flag) { 1524f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1525f841f6adSraf (void) _kaio(AIONOTIFY); 1526f841f6adSraf } else { 1527f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1528f841f6adSraf if (_sigio_enabled) 1529f841f6adSraf (void) kill(__pid, SIGIO); 1530f841f6adSraf } 1531f841f6adSraf } 1532f841f6adSraf 1533f841f6adSraf /* 1534f841f6adSraf * The done queue consists of AIO requests that are in either the 1535f841f6adSraf * AIO_REQ_DONE or AIO_REQ_CANCELED state. Requests that were cancelled 1536f841f6adSraf * are discarded. If the done queue is empty then NULL is returned. 1537f841f6adSraf * Otherwise the address of a done aio_result_t is returned. 1538f841f6adSraf */ 1539f841f6adSraf aio_result_t * 1540f841f6adSraf _aio_req_done(void) 1541f841f6adSraf { 1542f841f6adSraf aio_req_t *reqp; 1543f841f6adSraf aio_result_t *resultp; 1544f841f6adSraf 1545f841f6adSraf ASSERT(MUTEX_HELD(&__aio_mutex)); 1546f841f6adSraf 1547f841f6adSraf if ((reqp = _aio_done_tail) != NULL) { 1548f841f6adSraf if ((_aio_done_tail = reqp->req_next) == NULL) 1549f841f6adSraf _aio_done_head = NULL; 1550f841f6adSraf ASSERT(_aio_donecnt > 0); 1551f841f6adSraf _aio_donecnt--; 1552f841f6adSraf (void) _aio_hash_del(reqp->req_resultp); 1553f841f6adSraf resultp = reqp->req_resultp; 1554f841f6adSraf ASSERT(reqp->req_state == AIO_REQ_DONE); 1555f841f6adSraf _aio_req_free(reqp); 1556f841f6adSraf return (resultp); 1557f841f6adSraf } 1558f841f6adSraf /* is queue empty? */ 1559f841f6adSraf if (reqp == NULL && _aio_outstand_cnt == 0) { 1560f841f6adSraf return ((aio_result_t *)-1); 1561f841f6adSraf } 1562f841f6adSraf return (NULL); 1563f841f6adSraf } 1564f841f6adSraf 1565f841f6adSraf /* 1566f841f6adSraf * Set the return and errno values for the application's use. 1567f841f6adSraf * 1568f841f6adSraf * For the Posix interfaces, we must set the return value first followed 1569f841f6adSraf * by the errno value because the Posix interfaces allow for a change 1570f841f6adSraf * in the errno value from EINPROGRESS to something else to signal 1571f841f6adSraf * the completion of the asynchronous request. 1572f841f6adSraf * 1573f841f6adSraf * The opposite is true for the Solaris interfaces. These allow for 1574f841f6adSraf * a change in the return value from AIO_INPROGRESS to something else 1575f841f6adSraf * to signal the completion of the asynchronous request. 1576f841f6adSraf */ 1577f841f6adSraf void 1578f841f6adSraf _aio_set_result(aio_req_t *reqp, ssize_t retval, int error) 1579f841f6adSraf { 1580f841f6adSraf aio_result_t *resultp = reqp->req_resultp; 1581f841f6adSraf 1582f841f6adSraf if (POSIX_AIO(reqp)) { 1583f841f6adSraf resultp->aio_return = retval; 1584f841f6adSraf membar_producer(); 1585f841f6adSraf resultp->aio_errno = error; 1586f841f6adSraf } else { 1587f841f6adSraf resultp->aio_errno = error; 1588f841f6adSraf membar_producer(); 1589f841f6adSraf resultp->aio_return = retval; 1590f841f6adSraf } 1591f841f6adSraf } 1592f841f6adSraf 1593f841f6adSraf /* 1594f841f6adSraf * Add an AIO request onto the next work queue. 1595f841f6adSraf * A circular list of workers is used to choose the next worker. 1596f841f6adSraf */ 1597f841f6adSraf void 1598f841f6adSraf _aio_req_add(aio_req_t *reqp, aio_worker_t **nextworker, int mode) 1599f841f6adSraf { 1600f841f6adSraf ulwp_t *self = curthread; 1601f841f6adSraf aio_worker_t *aiowp; 1602f841f6adSraf aio_worker_t *first; 1603f841f6adSraf int load_bal_flg = 1; 1604f841f6adSraf int found; 1605f841f6adSraf 1606f841f6adSraf ASSERT(reqp->req_state != AIO_REQ_DONEQ); 1607f841f6adSraf reqp->req_next = NULL; 1608f841f6adSraf /* 1609f841f6adSraf * Try to acquire the next worker's work queue. If it is locked, 1610f841f6adSraf * then search the list of workers until a queue is found unlocked, 1611f841f6adSraf * or until the list is completely traversed at which point another 1612f841f6adSraf * worker will be created. 1613f841f6adSraf */ 1614f841f6adSraf sigoff(self); /* defer SIGIO */ 1615f841f6adSraf sig_mutex_lock(&__aio_mutex); 1616f841f6adSraf first = aiowp = *nextworker; 1617f841f6adSraf if (mode != AIONOTIFY) 1618f841f6adSraf _aio_outstand_cnt++; 1619f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1620f841f6adSraf 1621f841f6adSraf switch (mode) { 1622f841f6adSraf case AIOREAD: 1623f841f6adSraf case AIOWRITE: 1624f841f6adSraf case AIOAREAD: 1625f841f6adSraf case AIOAWRITE: 1626f841f6adSraf #if !defined(_LP64) 1627f841f6adSraf case AIOAREAD64: 1628f841f6adSraf case AIOAWRITE64: 1629f841f6adSraf #endif 1630f841f6adSraf /* try to find an idle worker */ 1631f841f6adSraf found = 0; 1632f841f6adSraf do { 1633f841f6adSraf if (sig_mutex_trylock(&aiowp->work_qlock1) == 0) { 1634f841f6adSraf if (aiowp->work_idleflg) { 1635f841f6adSraf found = 1; 1636f841f6adSraf break; 1637f841f6adSraf } 1638f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1639f841f6adSraf } 1640f841f6adSraf } while ((aiowp = aiowp->work_forw) != first); 1641f841f6adSraf 1642f841f6adSraf if (found) { 1643f841f6adSraf aiowp->work_minload1++; 1644f841f6adSraf break; 1645f841f6adSraf } 1646f841f6adSraf 1647f841f6adSraf /* try to acquire some worker's queue lock */ 1648f841f6adSraf do { 1649f841f6adSraf if (sig_mutex_trylock(&aiowp->work_qlock1) == 0) { 1650f841f6adSraf found = 1; 1651f841f6adSraf break; 1652f841f6adSraf } 1653f841f6adSraf } while ((aiowp = aiowp->work_forw) != first); 1654f841f6adSraf 1655f841f6adSraf /* 1656f841f6adSraf * Create more workers when the workers appear overloaded. 1657f841f6adSraf * Either all the workers are busy draining their queues 1658f841f6adSraf * or no worker's queue lock could be acquired. 1659f841f6adSraf */ 1660f841f6adSraf if (!found) { 1661f841f6adSraf if (_aio_worker_cnt < _max_workers) { 1662f841f6adSraf if (_aio_create_worker(reqp, mode)) 1663f841f6adSraf aio_panic("_aio_req_add: add worker"); 1664f841f6adSraf sigon(self); /* reenable SIGIO */ 1665f841f6adSraf return; 1666f841f6adSraf } 1667f841f6adSraf 1668f841f6adSraf /* 1669f841f6adSraf * No worker available and we have created 1670f841f6adSraf * _max_workers, keep going through the 1671f841f6adSraf * list slowly until we get a lock 1672f841f6adSraf */ 1673f841f6adSraf while (sig_mutex_trylock(&aiowp->work_qlock1) != 0) { 1674f841f6adSraf /* 1675f841f6adSraf * give someone else a chance 1676f841f6adSraf */ 1677f841f6adSraf _aio_delay(1); 1678f841f6adSraf aiowp = aiowp->work_forw; 1679f841f6adSraf } 1680f841f6adSraf } 1681f841f6adSraf 1682f841f6adSraf ASSERT(MUTEX_HELD(&aiowp->work_qlock1)); 1683f841f6adSraf if (_aio_worker_cnt < _max_workers && 1684f841f6adSraf aiowp->work_minload1 >= _minworkload) { 1685f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1686f841f6adSraf sig_mutex_lock(&__aio_mutex); 1687f841f6adSraf *nextworker = aiowp->work_forw; 1688f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1689f841f6adSraf if (_aio_create_worker(reqp, mode)) 1690f841f6adSraf aio_panic("aio_req_add: add worker"); 1691f841f6adSraf sigon(self); /* reenable SIGIO */ 1692f841f6adSraf return; 1693f841f6adSraf } 1694f841f6adSraf aiowp->work_minload1++; 1695f841f6adSraf break; 1696f841f6adSraf case AIOFSYNC: 1697f841f6adSraf case AIONOTIFY: 1698f841f6adSraf load_bal_flg = 0; 1699f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 1700f841f6adSraf break; 1701f841f6adSraf default: 1702f841f6adSraf aio_panic("_aio_req_add: invalid mode"); 1703f841f6adSraf break; 1704f841f6adSraf } 1705f841f6adSraf /* 1706f841f6adSraf * Put request onto worker's work queue. 1707f841f6adSraf */ 1708f841f6adSraf if (aiowp->work_tail1 == NULL) { 1709f841f6adSraf ASSERT(aiowp->work_count1 == 0); 1710f841f6adSraf aiowp->work_tail1 = reqp; 1711f841f6adSraf aiowp->work_next1 = reqp; 1712f841f6adSraf } else { 1713f841f6adSraf aiowp->work_head1->req_next = reqp; 1714f841f6adSraf if (aiowp->work_next1 == NULL) 1715f841f6adSraf aiowp->work_next1 = reqp; 1716f841f6adSraf } 1717f841f6adSraf reqp->req_state = AIO_REQ_QUEUED; 1718f841f6adSraf reqp->req_worker = aiowp; 1719f841f6adSraf aiowp->work_head1 = reqp; 1720f841f6adSraf /* 1721f841f6adSraf * Awaken worker if it is not currently active. 1722f841f6adSraf */ 1723f841f6adSraf if (aiowp->work_count1++ == 0 && aiowp->work_idleflg) { 1724f841f6adSraf aiowp->work_idleflg = 0; 1725f841f6adSraf (void) cond_signal(&aiowp->work_idle_cv); 1726f841f6adSraf } 1727f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1728f841f6adSraf 1729f841f6adSraf if (load_bal_flg) { 1730f841f6adSraf sig_mutex_lock(&__aio_mutex); 1731f841f6adSraf *nextworker = aiowp->work_forw; 1732f841f6adSraf sig_mutex_unlock(&__aio_mutex); 1733f841f6adSraf } 1734f841f6adSraf sigon(self); /* reenable SIGIO */ 1735f841f6adSraf } 1736f841f6adSraf 1737f841f6adSraf /* 1738f841f6adSraf * Get an AIO request for a specified worker. 1739f841f6adSraf * If the work queue is empty, return NULL. 1740f841f6adSraf */ 1741f841f6adSraf aio_req_t * 1742f841f6adSraf _aio_req_get(aio_worker_t *aiowp) 1743f841f6adSraf { 1744f841f6adSraf aio_req_t *reqp; 1745f841f6adSraf 1746f841f6adSraf sig_mutex_lock(&aiowp->work_qlock1); 1747f841f6adSraf if ((reqp = aiowp->work_next1) != NULL) { 1748f841f6adSraf /* 1749f841f6adSraf * Remove a POSIX request from the queue; the 1750f841f6adSraf * request queue is a singularly linked list 1751f841f6adSraf * with a previous pointer. The request is 1752f841f6adSraf * removed by updating the previous pointer. 1753f841f6adSraf * 1754f841f6adSraf * Non-posix requests are left on the queue 1755f841f6adSraf * to eventually be placed on the done queue. 1756f841f6adSraf */ 1757f841f6adSraf 1758f841f6adSraf if (POSIX_AIO(reqp)) { 1759f841f6adSraf if (aiowp->work_prev1 == NULL) { 1760f841f6adSraf aiowp->work_tail1 = reqp->req_next; 1761f841f6adSraf if (aiowp->work_tail1 == NULL) 1762f841f6adSraf aiowp->work_head1 = NULL; 1763f841f6adSraf } else { 1764f841f6adSraf aiowp->work_prev1->req_next = reqp->req_next; 1765f841f6adSraf if (aiowp->work_head1 == reqp) 1766f841f6adSraf aiowp->work_head1 = reqp->req_next; 1767f841f6adSraf } 1768f841f6adSraf 1769f841f6adSraf } else { 1770f841f6adSraf aiowp->work_prev1 = reqp; 1771f841f6adSraf ASSERT(aiowp->work_done1 >= 0); 1772f841f6adSraf aiowp->work_done1++; 1773f841f6adSraf } 1774f841f6adSraf ASSERT(reqp != reqp->req_next); 1775f841f6adSraf aiowp->work_next1 = reqp->req_next; 1776f841f6adSraf ASSERT(aiowp->work_count1 >= 1); 1777f841f6adSraf aiowp->work_count1--; 1778f841f6adSraf switch (reqp->req_op) { 1779f841f6adSraf case AIOREAD: 1780f841f6adSraf case AIOWRITE: 1781f841f6adSraf case AIOAREAD: 1782f841f6adSraf case AIOAWRITE: 1783f841f6adSraf #if !defined(_LP64) 1784f841f6adSraf case AIOAREAD64: 1785f841f6adSraf case AIOAWRITE64: 1786f841f6adSraf #endif 1787f841f6adSraf ASSERT(aiowp->work_minload1 > 0); 1788f841f6adSraf aiowp->work_minload1--; 1789f841f6adSraf break; 1790f841f6adSraf } 1791f841f6adSraf reqp->req_state = AIO_REQ_INPROGRESS; 1792f841f6adSraf } 1793f841f6adSraf aiowp->work_req = reqp; 1794f841f6adSraf ASSERT(reqp != NULL || aiowp->work_count1 == 0); 1795f841f6adSraf sig_mutex_unlock(&aiowp->work_qlock1); 1796f841f6adSraf return (reqp); 1797f841f6adSraf } 1798f841f6adSraf 1799f841f6adSraf static void 1800f841f6adSraf _aio_req_del(aio_worker_t *aiowp, aio_req_t *reqp, int ostate) 1801f841f6adSraf { 1802f841f6adSraf aio_req_t **last; 1803f841f6adSraf aio_req_t *lastrp; 1804f841f6adSraf aio_req_t *next; 1805f841f6adSraf 1806f841f6adSraf ASSERT(aiowp != NULL); 1807f841f6adSraf ASSERT(MUTEX_HELD(&aiowp->work_qlock1)); 1808f841f6adSraf if (POSIX_AIO(reqp)) { 1809f841f6adSraf if (ostate != AIO_REQ_QUEUED) 1810f841f6adSraf return; 1811f841f6adSraf } 1812f841f6adSraf last = &aiowp->work_tail1; 1813f841f6adSraf lastrp = aiowp->work_tail1; 1814f841f6adSraf ASSERT(ostate == AIO_REQ_QUEUED || ostate == AIO_REQ_INPROGRESS); 1815f841f6adSraf while ((next = *last) != NULL) { 1816f841f6adSraf if (next == reqp) { 1817f841f6adSraf *last = next->req_next; 1818f841f6adSraf if (aiowp->work_next1 == next) 1819f841f6adSraf aiowp->work_next1 = next->req_next; 1820f841f6adSraf 1821f841f6adSraf if ((next->req_next != NULL) || 1822f841f6adSraf (aiowp->work_done1 == 0)) { 1823f841f6adSraf if (aiowp->work_head1 == next) 1824f841f6adSraf aiowp->work_head1 = next->req_next; 1825f841f6adSraf if (aiowp->work_prev1 == next) 1826f841f6adSraf aiowp->work_prev1 = next->req_next; 1827f841f6adSraf } else { 1828f841f6adSraf if (aiowp->work_head1 == next) 1829f841f6adSraf aiowp->work_head1 = lastrp; 1830f841f6adSraf if (aiowp->work_prev1 == next) 1831f841f6adSraf aiowp->work_prev1 = lastrp; 1832f841f6adSraf } 1833f841f6adSraf 1834f841f6adSraf if (ostate == AIO_REQ_QUEUED) { 1835f841f6adSraf ASSERT(aiowp->work_count1 >= 1); 1836f841f6adSraf aiowp->work_count1--; 1837f841f6adSraf ASSERT(aiowp->work_minload1 >= 1); 1838f841f6adSraf aiowp->work_minload1--; 1839f841f6adSraf } else { 1840f841f6adSraf ASSERT(ostate == AIO_REQ_INPROGRESS && 1841f841f6adSraf !POSIX_AIO(reqp)); 1842f841f6adSraf aiowp->work_done1--; 1843f841f6adSraf } 1844f841f6adSraf return; 1845f841f6adSraf } 1846f841f6adSraf last = &next->req_next; 1847f841f6adSraf lastrp = next; 1848f841f6adSraf } 1849f841f6adSraf /* NOTREACHED */ 1850f841f6adSraf } 1851f841f6adSraf 1852f841f6adSraf static void 1853f841f6adSraf _aio_enq_doneq(aio_req_t *reqp) 1854f841f6adSraf { 1855f841f6adSraf if (_aio_doneq == NULL) { 1856f841f6adSraf _aio_doneq = reqp; 1857f841f6adSraf reqp->req_next = reqp->req_prev = reqp; 1858f841f6adSraf } else { 1859f841f6adSraf reqp->req_next = _aio_doneq; 1860f841f6adSraf reqp->req_prev = _aio_doneq->req_prev; 1861f841f6adSraf _aio_doneq->req_prev->req_next = reqp; 1862f841f6adSraf _aio_doneq->req_prev = reqp; 1863f841f6adSraf } 1864f841f6adSraf reqp->req_state = AIO_REQ_DONEQ; 1865f841f6adSraf _aio_doneq_cnt++; 1866f841f6adSraf } 1867f841f6adSraf 1868f841f6adSraf /* 1869f841f6adSraf * caller owns the _aio_mutex 1870f841f6adSraf */ 1871f841f6adSraf aio_req_t * 1872f841f6adSraf _aio_req_remove(aio_req_t *reqp) 1873f841f6adSraf { 1874f841f6adSraf if (reqp && reqp->req_state != AIO_REQ_DONEQ) 1875f841f6adSraf return (NULL); 1876f841f6adSraf 1877f841f6adSraf if (reqp) { 1878f841f6adSraf /* request in done queue */ 1879f841f6adSraf if (_aio_doneq == reqp) 1880f841f6adSraf _aio_doneq = reqp->req_next; 1881f841f6adSraf if (_aio_doneq == reqp) { 1882f841f6adSraf /* only one request on queue */ 1883f841f6adSraf _aio_doneq = NULL; 1884f841f6adSraf } else { 1885f841f6adSraf aio_req_t *tmp = reqp->req_next; 1886f841f6adSraf reqp->req_prev->req_next = tmp; 1887f841f6adSraf tmp->req_prev = reqp->req_prev; 1888f841f6adSraf } 1889f841f6adSraf } else if ((reqp = _aio_doneq) != NULL) { 1890f841f6adSraf if (reqp == reqp->req_next) { 1891f841f6adSraf /* only one request on queue */ 1892f841f6adSraf _aio_doneq = NULL; 1893f841f6adSraf } else { 1894f841f6adSraf reqp->req_prev->req_next = _aio_doneq = reqp->req_next; 1895f841f6adSraf _aio_doneq->req_prev = reqp->req_prev; 1896f841f6adSraf } 1897f841f6adSraf } 1898f841f6adSraf if (reqp) { 1899f841f6adSraf _aio_doneq_cnt--; 1900f841f6adSraf reqp->req_next = reqp->req_prev = reqp; 1901f841f6adSraf reqp->req_state = AIO_REQ_DONE; 1902f841f6adSraf } 1903f841f6adSraf return (reqp); 1904f841f6adSraf } 1905f841f6adSraf 1906f841f6adSraf /* 1907f841f6adSraf * An AIO request is identified by an aio_result_t pointer. The library 1908f841f6adSraf * maps this aio_result_t pointer to its internal representation using a 1909f841f6adSraf * hash table. This function adds an aio_result_t pointer to the hash table. 1910f841f6adSraf */ 1911f841f6adSraf static int 1912f841f6adSraf _aio_hash_insert(aio_result_t *resultp, aio_req_t *reqp) 1913f841f6adSraf { 1914f841f6adSraf aio_hash_t *hashp; 1915f841f6adSraf aio_req_t **prev; 1916f841f6adSraf aio_req_t *next; 1917f841f6adSraf 1918f841f6adSraf hashp = _aio_hash + AIOHASH(resultp); 1919f841f6adSraf lmutex_lock(&hashp->hash_lock); 1920f841f6adSraf prev = &hashp->hash_ptr; 1921f841f6adSraf while ((next = *prev) != NULL) { 1922f841f6adSraf if (resultp == next->req_resultp) { 1923f841f6adSraf lmutex_unlock(&hashp->hash_lock); 1924f841f6adSraf return (-1); 1925f841f6adSraf } 1926f841f6adSraf prev = &next->req_link; 1927f841f6adSraf } 1928f841f6adSraf *prev = reqp; 1929f841f6adSraf ASSERT(reqp->req_link == NULL); 1930f841f6adSraf lmutex_unlock(&hashp->hash_lock); 1931f841f6adSraf return (0); 1932f841f6adSraf } 1933f841f6adSraf 1934f841f6adSraf /* 1935f841f6adSraf * Remove an entry from the hash table. 1936f841f6adSraf */ 1937f841f6adSraf aio_req_t * 1938f841f6adSraf _aio_hash_del(aio_result_t *resultp) 1939f841f6adSraf { 1940f841f6adSraf aio_hash_t *hashp; 1941f841f6adSraf aio_req_t **prev; 1942f841f6adSraf aio_req_t *next = NULL; 1943f841f6adSraf 1944f841f6adSraf if (_aio_hash != NULL) { 1945f841f6adSraf hashp = _aio_hash + AIOHASH(resultp); 1946f841f6adSraf lmutex_lock(&hashp->hash_lock); 1947f841f6adSraf prev = &hashp->hash_ptr; 1948f841f6adSraf while ((next = *prev) != NULL) { 1949f841f6adSraf if (resultp == next->req_resultp) { 1950f841f6adSraf *prev = next->req_link; 1951f841f6adSraf next->req_link = NULL; 1952f841f6adSraf break; 1953f841f6adSraf } 1954f841f6adSraf prev = &next->req_link; 1955f841f6adSraf } 1956f841f6adSraf lmutex_unlock(&hashp->hash_lock); 1957f841f6adSraf } 1958f841f6adSraf return (next); 1959f841f6adSraf } 1960f841f6adSraf 1961f841f6adSraf /* 1962f841f6adSraf * find an entry in the hash table 1963f841f6adSraf */ 1964f841f6adSraf aio_req_t * 1965f841f6adSraf _aio_hash_find(aio_result_t *resultp) 1966f841f6adSraf { 1967f841f6adSraf aio_hash_t *hashp; 1968f841f6adSraf aio_req_t **prev; 1969f841f6adSraf aio_req_t *next = NULL; 1970f841f6adSraf 1971f841f6adSraf if (_aio_hash != NULL) { 1972f841f6adSraf hashp = _aio_hash + AIOHASH(resultp); 1973f841f6adSraf lmutex_lock(&hashp->hash_lock); 1974f841f6adSraf prev = &hashp->hash_ptr; 1975f841f6adSraf while ((next = *prev) != NULL) { 1976f841f6adSraf if (resultp == next->req_resultp) 1977f841f6adSraf break; 1978f841f6adSraf prev = &next->req_link; 1979f841f6adSraf } 1980f841f6adSraf lmutex_unlock(&hashp->hash_lock); 1981f841f6adSraf } 1982f841f6adSraf return (next); 1983f841f6adSraf } 1984f841f6adSraf 1985f841f6adSraf /* 1986f841f6adSraf * AIO interface for POSIX 1987f841f6adSraf */ 1988f841f6adSraf int 1989f841f6adSraf _aio_rw(aiocb_t *aiocbp, aio_lio_t *lio_head, aio_worker_t **nextworker, 1990f841f6adSraf int mode, int flg) 1991f841f6adSraf { 1992f841f6adSraf aio_req_t *reqp; 1993f841f6adSraf aio_args_t *ap; 1994f841f6adSraf int kerr; 1995f841f6adSraf 1996f841f6adSraf if (aiocbp == NULL) { 1997f841f6adSraf errno = EINVAL; 1998f841f6adSraf return (-1); 1999f841f6adSraf } 2000f841f6adSraf 2001f841f6adSraf /* initialize kaio */ 2002f841f6adSraf if (!_kaio_ok) 2003f841f6adSraf _kaio_init(); 2004f841f6adSraf 2005f841f6adSraf aiocbp->aio_state = NOCHECK; 2006f841f6adSraf 2007f841f6adSraf /* 2008f841f6adSraf * If we have been called because a list I/O 2009f841f6adSraf * kaio() failed, we dont want to repeat the 2010f841f6adSraf * system call 2011f841f6adSraf */ 2012f841f6adSraf 2013f841f6adSraf if (flg & AIO_KAIO) { 2014f841f6adSraf /* 2015f841f6adSraf * Try kernel aio first. 2016f841f6adSraf * If errno is ENOTSUP/EBADFD, 2017f841f6adSraf * fall back to the thread implementation. 2018f841f6adSraf */ 2019f841f6adSraf if (_kaio_ok > 0 && KAIO_SUPPORTED(aiocbp->aio_fildes)) { 2020f841f6adSraf aiocbp->aio_resultp.aio_errno = EINPROGRESS; 2021f841f6adSraf aiocbp->aio_state = CHECK; 2022f841f6adSraf kerr = (int)_kaio(mode, aiocbp); 2023f841f6adSraf if (kerr == 0) 2024f841f6adSraf return (0); 2025f841f6adSraf if (errno != ENOTSUP && errno != EBADFD) { 2026f841f6adSraf aiocbp->aio_resultp.aio_errno = errno; 2027f841f6adSraf aiocbp->aio_resultp.aio_return = -1; 2028f841f6adSraf aiocbp->aio_state = NOCHECK; 2029f841f6adSraf return (-1); 2030f841f6adSraf } 2031f841f6adSraf if (errno == EBADFD) 2032f841f6adSraf SET_KAIO_NOT_SUPPORTED(aiocbp->aio_fildes); 2033f841f6adSraf } 2034f841f6adSraf } 2035f841f6adSraf 2036f841f6adSraf aiocbp->aio_resultp.aio_errno = EINPROGRESS; 2037f841f6adSraf aiocbp->aio_state = USERAIO; 2038f841f6adSraf 2039f841f6adSraf if (!__uaio_ok && __uaio_init() == -1) 2040f841f6adSraf return (-1); 2041f841f6adSraf 2042f841f6adSraf if ((reqp = _aio_req_alloc()) == NULL) { 2043f841f6adSraf errno = EAGAIN; 2044f841f6adSraf return (-1); 2045f841f6adSraf } 2046f841f6adSraf 2047f841f6adSraf /* 2048f841f6adSraf * If an LIO request, add the list head to the aio request 2049f841f6adSraf */ 2050f841f6adSraf reqp->req_head = lio_head; 2051f841f6adSraf reqp->req_type = AIO_POSIX_REQ; 2052f841f6adSraf reqp->req_op = mode; 2053f841f6adSraf reqp->req_largefile = 0; 2054f841f6adSraf 2055f841f6adSraf if (aiocbp->aio_sigevent.sigev_notify == SIGEV_NONE) { 2056f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_NONE; 2057f841f6adSraf } else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 2058f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_SIGNAL; 2059f841f6adSraf reqp->req_sigevent.sigev_signo = 2060f841f6adSraf aiocbp->aio_sigevent.sigev_signo; 2061f841f6adSraf reqp->req_sigevent.sigev_value.sival_ptr = 2062f841f6adSraf aiocbp->aio_sigevent.sigev_value.sival_ptr; 2063f841f6adSraf } else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_PORT) { 2064f841f6adSraf port_notify_t *pn = aiocbp->aio_sigevent.sigev_value.sival_ptr; 2065f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_PORT; 2066f841f6adSraf /* 2067f841f6adSraf * Reuse the sigevent structure to contain the port number 2068f841f6adSraf * and the user value. Same for SIGEV_THREAD, below. 2069f841f6adSraf */ 2070f841f6adSraf reqp->req_sigevent.sigev_signo = 2071f841f6adSraf pn->portnfy_port; 2072f841f6adSraf reqp->req_sigevent.sigev_value.sival_ptr = 2073f841f6adSraf pn->portnfy_user; 2074f841f6adSraf } else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_THREAD) { 2075f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_THREAD; 2076f841f6adSraf /* 2077f841f6adSraf * The sigevent structure contains the port number 2078f841f6adSraf * and the user value. Same for SIGEV_PORT, above. 2079f841f6adSraf */ 2080f841f6adSraf reqp->req_sigevent.sigev_signo = 2081f841f6adSraf aiocbp->aio_sigevent.sigev_signo; 2082f841f6adSraf reqp->req_sigevent.sigev_value.sival_ptr = 2083f841f6adSraf aiocbp->aio_sigevent.sigev_value.sival_ptr; 2084f841f6adSraf } 2085f841f6adSraf 2086f841f6adSraf reqp->req_resultp = &aiocbp->aio_resultp; 2087f841f6adSraf reqp->req_aiocbp = aiocbp; 2088f841f6adSraf ap = &reqp->req_args; 2089f841f6adSraf ap->fd = aiocbp->aio_fildes; 2090f841f6adSraf ap->buf = (caddr_t)aiocbp->aio_buf; 2091f841f6adSraf ap->bufsz = aiocbp->aio_nbytes; 2092f841f6adSraf ap->offset = aiocbp->aio_offset; 2093f841f6adSraf 2094f841f6adSraf if ((flg & AIO_NO_DUPS) && 2095f841f6adSraf _aio_hash_insert(&aiocbp->aio_resultp, reqp) != 0) { 2096f841f6adSraf aio_panic("_aio_rw(): request already in hash table"); 2097f841f6adSraf _aio_req_free(reqp); 2098f841f6adSraf errno = EINVAL; 2099f841f6adSraf return (-1); 2100f841f6adSraf } 2101f841f6adSraf _aio_req_add(reqp, nextworker, mode); 2102f841f6adSraf return (0); 2103f841f6adSraf } 2104f841f6adSraf 2105f841f6adSraf #if !defined(_LP64) 2106f841f6adSraf /* 2107f841f6adSraf * 64-bit AIO interface for POSIX 2108f841f6adSraf */ 2109f841f6adSraf int 2110f841f6adSraf _aio_rw64(aiocb64_t *aiocbp, aio_lio_t *lio_head, aio_worker_t **nextworker, 2111f841f6adSraf int mode, int flg) 2112f841f6adSraf { 2113f841f6adSraf aio_req_t *reqp; 2114f841f6adSraf aio_args_t *ap; 2115f841f6adSraf int kerr; 2116f841f6adSraf 2117f841f6adSraf if (aiocbp == NULL) { 2118f841f6adSraf errno = EINVAL; 2119f841f6adSraf return (-1); 2120f841f6adSraf } 2121f841f6adSraf 2122f841f6adSraf /* initialize kaio */ 2123f841f6adSraf if (!_kaio_ok) 2124f841f6adSraf _kaio_init(); 2125f841f6adSraf 2126f841f6adSraf aiocbp->aio_state = NOCHECK; 2127f841f6adSraf 2128f841f6adSraf /* 2129f841f6adSraf * If we have been called because a list I/O 2130f841f6adSraf * kaio() failed, we dont want to repeat the 2131f841f6adSraf * system call 2132f841f6adSraf */ 2133f841f6adSraf 2134f841f6adSraf if (flg & AIO_KAIO) { 2135f841f6adSraf /* 2136f841f6adSraf * Try kernel aio first. 2137f841f6adSraf * If errno is ENOTSUP/EBADFD, 2138f841f6adSraf * fall back to the thread implementation. 2139f841f6adSraf */ 2140f841f6adSraf if (_kaio_ok > 0 && KAIO_SUPPORTED(aiocbp->aio_fildes)) { 2141f841f6adSraf aiocbp->aio_resultp.aio_errno = EINPROGRESS; 2142f841f6adSraf aiocbp->aio_state = CHECK; 2143f841f6adSraf kerr = (int)_kaio(mode, aiocbp); 2144f841f6adSraf if (kerr == 0) 2145f841f6adSraf return (0); 2146f841f6adSraf if (errno != ENOTSUP && errno != EBADFD) { 2147f841f6adSraf aiocbp->aio_resultp.aio_errno = errno; 2148f841f6adSraf aiocbp->aio_resultp.aio_return = -1; 2149f841f6adSraf aiocbp->aio_state = NOCHECK; 2150f841f6adSraf return (-1); 2151f841f6adSraf } 2152f841f6adSraf if (errno == EBADFD) 2153f841f6adSraf SET_KAIO_NOT_SUPPORTED(aiocbp->aio_fildes); 2154f841f6adSraf } 2155f841f6adSraf } 2156f841f6adSraf 2157f841f6adSraf aiocbp->aio_resultp.aio_errno = EINPROGRESS; 2158f841f6adSraf aiocbp->aio_state = USERAIO; 2159f841f6adSraf 2160f841f6adSraf if (!__uaio_ok && __uaio_init() == -1) 2161f841f6adSraf return (-1); 2162f841f6adSraf 2163f841f6adSraf if ((reqp = _aio_req_alloc()) == NULL) { 2164f841f6adSraf errno = EAGAIN; 2165f841f6adSraf return (-1); 2166f841f6adSraf } 2167f841f6adSraf 2168f841f6adSraf /* 2169f841f6adSraf * If an LIO request, add the list head to the aio request 2170f841f6adSraf */ 2171f841f6adSraf reqp->req_head = lio_head; 2172f841f6adSraf reqp->req_type = AIO_POSIX_REQ; 2173f841f6adSraf reqp->req_op = mode; 2174f841f6adSraf reqp->req_largefile = 1; 2175f841f6adSraf 2176f841f6adSraf if (aiocbp->aio_sigevent.sigev_notify == SIGEV_NONE) { 2177f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_NONE; 2178f841f6adSraf } else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_SIGNAL) { 2179f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_SIGNAL; 2180f841f6adSraf reqp->req_sigevent.sigev_signo = 2181f841f6adSraf aiocbp->aio_sigevent.sigev_signo; 2182f841f6adSraf reqp->req_sigevent.sigev_value.sival_ptr = 2183f841f6adSraf aiocbp->aio_sigevent.sigev_value.sival_ptr; 2184f841f6adSraf } else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_PORT) { 2185f841f6adSraf port_notify_t *pn = aiocbp->aio_sigevent.sigev_value.sival_ptr; 2186f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_PORT; 2187f841f6adSraf reqp->req_sigevent.sigev_signo = 2188f841f6adSraf pn->portnfy_port; 2189f841f6adSraf reqp->req_sigevent.sigev_value.sival_ptr = 2190f841f6adSraf pn->portnfy_user; 2191f841f6adSraf } else if (aiocbp->aio_sigevent.sigev_notify == SIGEV_THREAD) { 2192f841f6adSraf reqp->req_sigevent.sigev_notify = SIGEV_THREAD; 2193f841f6adSraf reqp->req_sigevent.sigev_signo = 2194f841f6adSraf aiocbp->aio_sigevent.sigev_signo; 2195f841f6adSraf reqp->req_sigevent.sigev_value.sival_ptr = 2196f841f6adSraf aiocbp->aio_sigevent.sigev_value.sival_ptr; 2197f841f6adSraf } 2198f841f6adSraf 2199f841f6adSraf reqp->req_resultp = &aiocbp->aio_resultp; 2200f841f6adSraf reqp->req_aiocbp = aiocbp; 2201f841f6adSraf ap = &reqp->req_args; 2202f841f6adSraf ap->fd = aiocbp->aio_fildes; 2203f841f6adSraf ap->buf = (caddr_t)aiocbp->aio_buf; 2204f841f6adSraf ap->bufsz = aiocbp->aio_nbytes; 2205f841f6adSraf ap->offset = aiocbp->aio_offset; 2206f841f6adSraf 2207f841f6adSraf if ((flg & AIO_NO_DUPS) && 2208f841f6adSraf _aio_hash_insert(&aiocbp->aio_resultp, reqp) != 0) { 2209f841f6adSraf aio_panic("_aio_rw64(): request already in hash table"); 2210f841f6adSraf _aio_req_free(reqp); 2211f841f6adSraf errno = EINVAL; 2212f841f6adSraf return (-1); 2213f841f6adSraf } 2214f841f6adSraf _aio_req_add(reqp, nextworker, mode); 2215f841f6adSraf return (0); 2216f841f6adSraf } 2217f841f6adSraf #endif /* !defined(_LP64) */ 2218