1 /*- 2 * Copyright (c) 1997 John S. Dyson. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. John S. Dyson's name may not be used to endorse or promote products 10 * derived from this software without specific prior written permission. 11 * 12 * DISCLAIMER: This code isn't warranted to do anything useful. Anything 13 * bad that happens because of using this software isn't the responsibility 14 * of the author. This software is distributed AS-IS. 15 */ 16 17 /* 18 * This file contains support for the POSIX 1003.1B AIO/LIO facility. 19 */ 20 21 #include <sys/cdefs.h> 22 __FBSDID("$FreeBSD$"); 23 24 #include "opt_compat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/malloc.h> 29 #include <sys/bio.h> 30 #include <sys/buf.h> 31 #include <sys/capsicum.h> 32 #include <sys/eventhandler.h> 33 #include <sys/sysproto.h> 34 #include <sys/filedesc.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/kthread.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/unistd.h> 44 #include <sys/posix4.h> 45 #include <sys/proc.h> 46 #include <sys/resourcevar.h> 47 #include <sys/signalvar.h> 48 #include <sys/syscallsubr.h> 49 #include <sys/protosw.h> 50 #include <sys/rwlock.h> 51 #include <sys/sema.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/syscall.h> 55 #include <sys/sysent.h> 56 #include <sys/sysctl.h> 57 #include <sys/syslog.h> 58 #include <sys/sx.h> 59 #include <sys/taskqueue.h> 60 #include <sys/vnode.h> 61 #include <sys/conf.h> 62 #include <sys/event.h> 63 #include <sys/mount.h> 64 #include <geom/geom.h> 65 66 #include <machine/atomic.h> 67 68 #include <vm/vm.h> 69 #include <vm/vm_page.h> 70 #include <vm/vm_extern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_object.h> 74 #include <vm/uma.h> 75 #include <sys/aio.h> 76 77 /* 78 * Counter for allocating reference ids to new jobs. Wrapped to 1 on 79 * overflow. (XXX will be removed soon.) 80 */ 81 static u_long jobrefid; 82 83 /* 84 * Counter for aio_fsync. 85 */ 86 static uint64_t jobseqno; 87 88 #ifndef MAX_AIO_PER_PROC 89 #define MAX_AIO_PER_PROC 32 90 #endif 91 92 #ifndef MAX_AIO_QUEUE_PER_PROC 93 #define MAX_AIO_QUEUE_PER_PROC 256 /* Bigger than AIO_LISTIO_MAX */ 94 #endif 95 96 #ifndef MAX_AIO_QUEUE 97 #define MAX_AIO_QUEUE 1024 /* Bigger than AIO_LISTIO_MAX */ 98 #endif 99 100 #ifndef MAX_BUF_AIO 101 #define MAX_BUF_AIO 16 102 #endif 103 104 FEATURE(aio, "Asynchronous I/O"); 105 106 static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list"); 107 108 static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, 109 "Async IO management"); 110 111 static int enable_aio_unsafe = 0; 112 SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0, 113 "Permit asynchronous IO on all file types, not just known-safe types"); 114 115 static unsigned int unsafe_warningcnt = 1; 116 SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW, 117 &unsafe_warningcnt, 0, 118 "Warnings that will be triggered upon failed IO requests on unsafe files"); 119 120 static int max_aio_procs = MAX_AIO_PROCS; 121 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0, 122 "Maximum number of kernel processes to use for handling async IO "); 123 124 static int num_aio_procs = 0; 125 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0, 126 "Number of presently active kernel processes for async IO"); 127 128 /* 129 * The code will adjust the actual number of AIO processes towards this 130 * number when it gets a chance. 131 */ 132 static int target_aio_procs = TARGET_AIO_PROCS; 133 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 134 0, 135 "Preferred number of ready kernel processes for async IO"); 136 137 static int max_queue_count = MAX_AIO_QUEUE; 138 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, 139 "Maximum number of aio requests to queue, globally"); 140 141 static int num_queue_count = 0; 142 SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, 143 "Number of queued aio requests"); 144 145 static int num_buf_aio = 0; 146 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, 147 "Number of aio requests presently handled by the buf subsystem"); 148 149 /* Number of async I/O processes in the process of being started */ 150 /* XXX This should be local to aio_aqueue() */ 151 static int num_aio_resv_start = 0; 152 153 static int aiod_lifetime; 154 SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, 155 "Maximum lifetime for idle aiod"); 156 157 static int max_aio_per_proc = MAX_AIO_PER_PROC; 158 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 159 0, 160 "Maximum active aio requests per process (stored in the process)"); 161 162 static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; 163 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, 164 &max_aio_queue_per_proc, 0, 165 "Maximum queued aio requests per process (stored in the process)"); 166 167 static int max_buf_aio = MAX_BUF_AIO; 168 SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, 169 "Maximum buf aio requests per process (stored in the process)"); 170 171 #ifdef COMPAT_FREEBSD6 172 typedef struct oaiocb { 173 int aio_fildes; /* File descriptor */ 174 off_t aio_offset; /* File offset for I/O */ 175 volatile void *aio_buf; /* I/O buffer in process space */ 176 size_t aio_nbytes; /* Number of bytes for I/O */ 177 struct osigevent aio_sigevent; /* Signal to deliver */ 178 int aio_lio_opcode; /* LIO opcode */ 179 int aio_reqprio; /* Request priority -- ignored */ 180 struct __aiocb_private _aiocb_private; 181 } oaiocb_t; 182 #endif 183 184 /* 185 * Below is a key of locks used to protect each member of struct kaiocb 186 * aioliojob and kaioinfo and any backends. 187 * 188 * * - need not protected 189 * a - locked by kaioinfo lock 190 * b - locked by backend lock, the backend lock can be null in some cases, 191 * for example, BIO belongs to this type, in this case, proc lock is 192 * reused. 193 * c - locked by aio_job_mtx, the lock for the generic file I/O backend. 194 */ 195 196 /* 197 * If the routine that services an AIO request blocks while running in an 198 * AIO kernel process it can starve other I/O requests. BIO requests 199 * queued via aio_qphysio() complete in GEOM and do not use AIO kernel 200 * processes at all. Socket I/O requests use a separate pool of 201 * kprocs and also force non-blocking I/O. Other file I/O requests 202 * use the generic fo_read/fo_write operations which can block. The 203 * fsync and mlock operations can also block while executing. Ideally 204 * none of these requests would block while executing. 205 * 206 * Note that the service routines cannot toggle O_NONBLOCK in the file 207 * structure directly while handling a request due to races with 208 * userland threads. 209 */ 210 211 /* jobflags */ 212 #define KAIOCB_QUEUEING 0x01 213 #define KAIOCB_CANCELLED 0x02 214 #define KAIOCB_CANCELLING 0x04 215 #define KAIOCB_CHECKSYNC 0x08 216 #define KAIOCB_CLEARED 0x10 217 #define KAIOCB_FINISHED 0x20 218 219 /* 220 * AIO process info 221 */ 222 #define AIOP_FREE 0x1 /* proc on free queue */ 223 224 struct aioproc { 225 int aioprocflags; /* (c) AIO proc flags */ 226 TAILQ_ENTRY(aioproc) list; /* (c) list of processes */ 227 struct proc *aioproc; /* (*) the AIO proc */ 228 }; 229 230 /* 231 * data-structure for lio signal management 232 */ 233 struct aioliojob { 234 int lioj_flags; /* (a) listio flags */ 235 int lioj_count; /* (a) listio flags */ 236 int lioj_finished_count; /* (a) listio flags */ 237 struct sigevent lioj_signal; /* (a) signal on all I/O done */ 238 TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */ 239 struct knlist klist; /* (a) list of knotes */ 240 ksiginfo_t lioj_ksi; /* (a) Realtime signal info */ 241 }; 242 243 #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ 244 #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ 245 #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */ 246 247 /* 248 * per process aio data structure 249 */ 250 struct kaioinfo { 251 struct mtx kaio_mtx; /* the lock to protect this struct */ 252 int kaio_flags; /* (a) per process kaio flags */ 253 int kaio_maxactive_count; /* (*) maximum number of AIOs */ 254 int kaio_active_count; /* (c) number of currently used AIOs */ 255 int kaio_qallowed_count; /* (*) maxiumu size of AIO queue */ 256 int kaio_count; /* (a) size of AIO queue */ 257 int kaio_ballowed_count; /* (*) maximum number of buffers */ 258 int kaio_buffer_count; /* (a) number of physio buffers */ 259 TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */ 260 TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */ 261 TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */ 262 TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */ 263 TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */ 264 TAILQ_HEAD(,kaiocb) kaio_syncready; /* (a) second q for aio_fsync */ 265 struct task kaio_task; /* (*) task to kick aio processes */ 266 struct task kaio_sync_task; /* (*) task to schedule fsync jobs */ 267 }; 268 269 #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx) 270 #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx) 271 #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f)) 272 #define AIO_MTX(ki) (&(ki)->kaio_mtx) 273 274 #define KAIO_RUNDOWN 0x1 /* process is being run down */ 275 #define KAIO_WAKEUP 0x2 /* wakeup process when AIO completes */ 276 277 /* 278 * Operations used to interact with userland aio control blocks. 279 * Different ABIs provide their own operations. 280 */ 281 struct aiocb_ops { 282 int (*copyin)(struct aiocb *ujob, struct aiocb *kjob); 283 long (*fetch_status)(struct aiocb *ujob); 284 long (*fetch_error)(struct aiocb *ujob); 285 int (*store_status)(struct aiocb *ujob, long status); 286 int (*store_error)(struct aiocb *ujob, long error); 287 int (*store_kernelinfo)(struct aiocb *ujob, long jobref); 288 int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob); 289 }; 290 291 static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */ 292 static struct sema aio_newproc_sem; 293 static struct mtx aio_job_mtx; 294 static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */ 295 static struct unrhdr *aiod_unr; 296 297 void aio_init_aioinfo(struct proc *p); 298 static int aio_onceonly(void); 299 static int aio_free_entry(struct kaiocb *job); 300 static void aio_process_rw(struct kaiocb *job); 301 static void aio_process_sync(struct kaiocb *job); 302 static void aio_process_mlock(struct kaiocb *job); 303 static void aio_schedule_fsync(void *context, int pending); 304 static int aio_newproc(int *); 305 int aio_aqueue(struct thread *td, struct aiocb *ujob, 306 struct aioliojob *lio, int type, struct aiocb_ops *ops); 307 static int aio_queue_file(struct file *fp, struct kaiocb *job); 308 static void aio_physwakeup(struct bio *bp); 309 static void aio_proc_rundown(void *arg, struct proc *p); 310 static void aio_proc_rundown_exec(void *arg, struct proc *p, 311 struct image_params *imgp); 312 static int aio_qphysio(struct proc *p, struct kaiocb *job); 313 static void aio_daemon(void *param); 314 static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job); 315 static bool aio_clear_cancel_function_locked(struct kaiocb *job); 316 static int aio_kick(struct proc *userp); 317 static void aio_kick_nowait(struct proc *userp); 318 static void aio_kick_helper(void *context, int pending); 319 static int filt_aioattach(struct knote *kn); 320 static void filt_aiodetach(struct knote *kn); 321 static int filt_aio(struct knote *kn, long hint); 322 static int filt_lioattach(struct knote *kn); 323 static void filt_liodetach(struct knote *kn); 324 static int filt_lio(struct knote *kn, long hint); 325 326 /* 327 * Zones for: 328 * kaio Per process async io info 329 * aiop async io process data 330 * aiocb async io jobs 331 * aiol list io job pointer - internal to aio_suspend XXX 332 * aiolio list io jobs 333 */ 334 static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiol_zone, aiolio_zone; 335 336 /* kqueue filters for aio */ 337 static struct filterops aio_filtops = { 338 .f_isfd = 0, 339 .f_attach = filt_aioattach, 340 .f_detach = filt_aiodetach, 341 .f_event = filt_aio, 342 }; 343 static struct filterops lio_filtops = { 344 .f_isfd = 0, 345 .f_attach = filt_lioattach, 346 .f_detach = filt_liodetach, 347 .f_event = filt_lio 348 }; 349 350 static eventhandler_tag exit_tag, exec_tag; 351 352 TASKQUEUE_DEFINE_THREAD(aiod_kick); 353 354 /* 355 * Main operations function for use as a kernel module. 356 */ 357 static int 358 aio_modload(struct module *module, int cmd, void *arg) 359 { 360 int error = 0; 361 362 switch (cmd) { 363 case MOD_LOAD: 364 aio_onceonly(); 365 break; 366 case MOD_SHUTDOWN: 367 break; 368 default: 369 error = EOPNOTSUPP; 370 break; 371 } 372 return (error); 373 } 374 375 static moduledata_t aio_mod = { 376 "aio", 377 &aio_modload, 378 NULL 379 }; 380 381 DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY); 382 MODULE_VERSION(aio, 1); 383 384 /* 385 * Startup initialization 386 */ 387 static int 388 aio_onceonly(void) 389 { 390 391 exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL, 392 EVENTHANDLER_PRI_ANY); 393 exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, 394 NULL, EVENTHANDLER_PRI_ANY); 395 kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); 396 kqueue_add_filteropts(EVFILT_LIO, &lio_filtops); 397 TAILQ_INIT(&aio_freeproc); 398 sema_init(&aio_newproc_sem, 0, "aio_new_proc"); 399 mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF); 400 TAILQ_INIT(&aio_jobs); 401 aiod_unr = new_unrhdr(1, INT_MAX, NULL); 402 kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL, 403 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 404 aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL, 405 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 406 aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL, 407 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 408 aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL, 409 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 410 aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL, 411 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 412 aiod_lifetime = AIOD_LIFETIME_DEFAULT; 413 jobrefid = 1; 414 p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO); 415 p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, AIO_LISTIO_MAX); 416 p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE); 417 p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0); 418 419 return (0); 420 } 421 422 /* 423 * Init the per-process aioinfo structure. The aioinfo limits are set 424 * per-process for user limit (resource) management. 425 */ 426 void 427 aio_init_aioinfo(struct proc *p) 428 { 429 struct kaioinfo *ki; 430 431 ki = uma_zalloc(kaio_zone, M_WAITOK); 432 mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW); 433 ki->kaio_flags = 0; 434 ki->kaio_maxactive_count = max_aio_per_proc; 435 ki->kaio_active_count = 0; 436 ki->kaio_qallowed_count = max_aio_queue_per_proc; 437 ki->kaio_count = 0; 438 ki->kaio_ballowed_count = max_buf_aio; 439 ki->kaio_buffer_count = 0; 440 TAILQ_INIT(&ki->kaio_all); 441 TAILQ_INIT(&ki->kaio_done); 442 TAILQ_INIT(&ki->kaio_jobqueue); 443 TAILQ_INIT(&ki->kaio_liojoblist); 444 TAILQ_INIT(&ki->kaio_syncqueue); 445 TAILQ_INIT(&ki->kaio_syncready); 446 TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p); 447 TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki); 448 PROC_LOCK(p); 449 if (p->p_aioinfo == NULL) { 450 p->p_aioinfo = ki; 451 PROC_UNLOCK(p); 452 } else { 453 PROC_UNLOCK(p); 454 mtx_destroy(&ki->kaio_mtx); 455 uma_zfree(kaio_zone, ki); 456 } 457 458 while (num_aio_procs < MIN(target_aio_procs, max_aio_procs)) 459 aio_newproc(NULL); 460 } 461 462 static int 463 aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi) 464 { 465 struct thread *td; 466 int error; 467 468 error = sigev_findtd(p, sigev, &td); 469 if (error) 470 return (error); 471 if (!KSI_ONQ(ksi)) { 472 ksiginfo_set_sigev(ksi, sigev); 473 ksi->ksi_code = SI_ASYNCIO; 474 ksi->ksi_flags |= KSI_EXT | KSI_INS; 475 tdsendsignal(p, td, ksi->ksi_signo, ksi); 476 } 477 PROC_UNLOCK(p); 478 return (error); 479 } 480 481 /* 482 * Free a job entry. Wait for completion if it is currently active, but don't 483 * delay forever. If we delay, we return a flag that says that we have to 484 * restart the queue scan. 485 */ 486 static int 487 aio_free_entry(struct kaiocb *job) 488 { 489 struct kaioinfo *ki; 490 struct aioliojob *lj; 491 struct proc *p; 492 493 p = job->userproc; 494 MPASS(curproc == p); 495 ki = p->p_aioinfo; 496 MPASS(ki != NULL); 497 498 AIO_LOCK_ASSERT(ki, MA_OWNED); 499 MPASS(job->jobflags & KAIOCB_FINISHED); 500 501 atomic_subtract_int(&num_queue_count, 1); 502 503 ki->kaio_count--; 504 MPASS(ki->kaio_count >= 0); 505 506 TAILQ_REMOVE(&ki->kaio_done, job, plist); 507 TAILQ_REMOVE(&ki->kaio_all, job, allist); 508 509 lj = job->lio; 510 if (lj) { 511 lj->lioj_count--; 512 lj->lioj_finished_count--; 513 514 if (lj->lioj_count == 0) { 515 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 516 /* lio is going away, we need to destroy any knotes */ 517 knlist_delete(&lj->klist, curthread, 1); 518 PROC_LOCK(p); 519 sigqueue_take(&lj->lioj_ksi); 520 PROC_UNLOCK(p); 521 uma_zfree(aiolio_zone, lj); 522 } 523 } 524 525 /* job is going away, we need to destroy any knotes */ 526 knlist_delete(&job->klist, curthread, 1); 527 PROC_LOCK(p); 528 sigqueue_take(&job->ksi); 529 PROC_UNLOCK(p); 530 531 AIO_UNLOCK(ki); 532 533 /* 534 * The thread argument here is used to find the owning process 535 * and is also passed to fo_close() which may pass it to various 536 * places such as devsw close() routines. Because of that, we 537 * need a thread pointer from the process owning the job that is 538 * persistent and won't disappear out from under us or move to 539 * another process. 540 * 541 * Currently, all the callers of this function call it to remove 542 * a kaiocb from the current process' job list either via a 543 * syscall or due to the current process calling exit() or 544 * execve(). Thus, we know that p == curproc. We also know that 545 * curthread can't exit since we are curthread. 546 * 547 * Therefore, we use curthread as the thread to pass to 548 * knlist_delete(). This does mean that it is possible for the 549 * thread pointer at close time to differ from the thread pointer 550 * at open time, but this is already true of file descriptors in 551 * a multithreaded process. 552 */ 553 if (job->fd_file) 554 fdrop(job->fd_file, curthread); 555 crfree(job->cred); 556 uma_zfree(aiocb_zone, job); 557 AIO_LOCK(ki); 558 559 return (0); 560 } 561 562 static void 563 aio_proc_rundown_exec(void *arg, struct proc *p, 564 struct image_params *imgp __unused) 565 { 566 aio_proc_rundown(arg, p); 567 } 568 569 static int 570 aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job) 571 { 572 aio_cancel_fn_t *func; 573 int cancelled; 574 575 AIO_LOCK_ASSERT(ki, MA_OWNED); 576 if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED)) 577 return (0); 578 MPASS((job->jobflags & KAIOCB_CANCELLING) == 0); 579 job->jobflags |= KAIOCB_CANCELLED; 580 581 func = job->cancel_fn; 582 583 /* 584 * If there is no cancel routine, just leave the job marked as 585 * cancelled. The job should be in active use by a caller who 586 * should complete it normally or when it fails to install a 587 * cancel routine. 588 */ 589 if (func == NULL) 590 return (0); 591 592 /* 593 * Set the CANCELLING flag so that aio_complete() will defer 594 * completions of this job. This prevents the job from being 595 * freed out from under the cancel callback. After the 596 * callback any deferred completion (whether from the callback 597 * or any other source) will be completed. 598 */ 599 job->jobflags |= KAIOCB_CANCELLING; 600 AIO_UNLOCK(ki); 601 func(job); 602 AIO_LOCK(ki); 603 job->jobflags &= ~KAIOCB_CANCELLING; 604 if (job->jobflags & KAIOCB_FINISHED) { 605 cancelled = job->uaiocb._aiocb_private.error == ECANCELED; 606 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); 607 aio_bio_done_notify(p, job); 608 } else { 609 /* 610 * The cancel callback might have scheduled an 611 * operation to cancel this request, but it is 612 * only counted as cancelled if the request is 613 * cancelled when the callback returns. 614 */ 615 cancelled = 0; 616 } 617 return (cancelled); 618 } 619 620 /* 621 * Rundown the jobs for a given process. 622 */ 623 static void 624 aio_proc_rundown(void *arg, struct proc *p) 625 { 626 struct kaioinfo *ki; 627 struct aioliojob *lj; 628 struct kaiocb *job, *jobn; 629 630 KASSERT(curthread->td_proc == p, 631 ("%s: called on non-curproc", __func__)); 632 ki = p->p_aioinfo; 633 if (ki == NULL) 634 return; 635 636 AIO_LOCK(ki); 637 ki->kaio_flags |= KAIO_RUNDOWN; 638 639 restart: 640 641 /* 642 * Try to cancel all pending requests. This code simulates 643 * aio_cancel on all pending I/O requests. 644 */ 645 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { 646 aio_cancel_job(p, ki, job); 647 } 648 649 /* Wait for all running I/O to be finished */ 650 if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) { 651 ki->kaio_flags |= KAIO_WAKEUP; 652 msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz); 653 goto restart; 654 } 655 656 /* Free all completed I/O requests. */ 657 while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL) 658 aio_free_entry(job); 659 660 while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) { 661 if (lj->lioj_count == 0) { 662 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 663 knlist_delete(&lj->klist, curthread, 1); 664 PROC_LOCK(p); 665 sigqueue_take(&lj->lioj_ksi); 666 PROC_UNLOCK(p); 667 uma_zfree(aiolio_zone, lj); 668 } else { 669 panic("LIO job not cleaned up: C:%d, FC:%d\n", 670 lj->lioj_count, lj->lioj_finished_count); 671 } 672 } 673 AIO_UNLOCK(ki); 674 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task); 675 taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task); 676 mtx_destroy(&ki->kaio_mtx); 677 uma_zfree(kaio_zone, ki); 678 p->p_aioinfo = NULL; 679 } 680 681 /* 682 * Select a job to run (called by an AIO daemon). 683 */ 684 static struct kaiocb * 685 aio_selectjob(struct aioproc *aiop) 686 { 687 struct kaiocb *job; 688 struct kaioinfo *ki; 689 struct proc *userp; 690 691 mtx_assert(&aio_job_mtx, MA_OWNED); 692 restart: 693 TAILQ_FOREACH(job, &aio_jobs, list) { 694 userp = job->userproc; 695 ki = userp->p_aioinfo; 696 697 if (ki->kaio_active_count < ki->kaio_maxactive_count) { 698 TAILQ_REMOVE(&aio_jobs, job, list); 699 if (!aio_clear_cancel_function(job)) 700 goto restart; 701 702 /* Account for currently active jobs. */ 703 ki->kaio_active_count++; 704 break; 705 } 706 } 707 return (job); 708 } 709 710 /* 711 * Move all data to a permanent storage device. This code 712 * simulates the fsync syscall. 713 */ 714 static int 715 aio_fsync_vnode(struct thread *td, struct vnode *vp) 716 { 717 struct mount *mp; 718 int error; 719 720 if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 721 goto drop; 722 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 723 if (vp->v_object != NULL) { 724 VM_OBJECT_WLOCK(vp->v_object); 725 vm_object_page_clean(vp->v_object, 0, 0, 0); 726 VM_OBJECT_WUNLOCK(vp->v_object); 727 } 728 error = VOP_FSYNC(vp, MNT_WAIT, td); 729 730 VOP_UNLOCK(vp, 0); 731 vn_finished_write(mp); 732 drop: 733 return (error); 734 } 735 736 /* 737 * The AIO processing activity for LIO_READ/LIO_WRITE. This is the code that 738 * does the I/O request for the non-physio version of the operations. The 739 * normal vn operations are used, and this code should work in all instances 740 * for every type of file, including pipes, sockets, fifos, and regular files. 741 * 742 * XXX I don't think it works well for socket, pipe, and fifo. 743 */ 744 static void 745 aio_process_rw(struct kaiocb *job) 746 { 747 struct ucred *td_savedcred; 748 struct thread *td; 749 struct aiocb *cb; 750 struct file *fp; 751 struct uio auio; 752 struct iovec aiov; 753 ssize_t cnt; 754 long msgsnd_st, msgsnd_end; 755 long msgrcv_st, msgrcv_end; 756 long oublock_st, oublock_end; 757 long inblock_st, inblock_end; 758 int error; 759 760 KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || 761 job->uaiocb.aio_lio_opcode == LIO_WRITE, 762 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 763 764 aio_switch_vmspace(job); 765 td = curthread; 766 td_savedcred = td->td_ucred; 767 td->td_ucred = job->cred; 768 cb = &job->uaiocb; 769 fp = job->fd_file; 770 771 aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; 772 aiov.iov_len = cb->aio_nbytes; 773 774 auio.uio_iov = &aiov; 775 auio.uio_iovcnt = 1; 776 auio.uio_offset = cb->aio_offset; 777 auio.uio_resid = cb->aio_nbytes; 778 cnt = cb->aio_nbytes; 779 auio.uio_segflg = UIO_USERSPACE; 780 auio.uio_td = td; 781 782 msgrcv_st = td->td_ru.ru_msgrcv; 783 msgsnd_st = td->td_ru.ru_msgsnd; 784 inblock_st = td->td_ru.ru_inblock; 785 oublock_st = td->td_ru.ru_oublock; 786 787 /* 788 * aio_aqueue() acquires a reference to the file that is 789 * released in aio_free_entry(). 790 */ 791 if (cb->aio_lio_opcode == LIO_READ) { 792 auio.uio_rw = UIO_READ; 793 if (auio.uio_resid == 0) 794 error = 0; 795 else 796 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); 797 } else { 798 if (fp->f_type == DTYPE_VNODE) 799 bwillwrite(); 800 auio.uio_rw = UIO_WRITE; 801 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); 802 } 803 msgrcv_end = td->td_ru.ru_msgrcv; 804 msgsnd_end = td->td_ru.ru_msgsnd; 805 inblock_end = td->td_ru.ru_inblock; 806 oublock_end = td->td_ru.ru_oublock; 807 808 job->msgrcv = msgrcv_end - msgrcv_st; 809 job->msgsnd = msgsnd_end - msgsnd_st; 810 job->inblock = inblock_end - inblock_st; 811 job->outblock = oublock_end - oublock_st; 812 813 if ((error) && (auio.uio_resid != cnt)) { 814 if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) 815 error = 0; 816 if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { 817 PROC_LOCK(job->userproc); 818 kern_psignal(job->userproc, SIGPIPE); 819 PROC_UNLOCK(job->userproc); 820 } 821 } 822 823 cnt -= auio.uio_resid; 824 td->td_ucred = td_savedcred; 825 if (error) 826 aio_complete(job, -1, error); 827 else 828 aio_complete(job, cnt, 0); 829 } 830 831 static void 832 aio_process_sync(struct kaiocb *job) 833 { 834 struct thread *td = curthread; 835 struct ucred *td_savedcred = td->td_ucred; 836 struct file *fp = job->fd_file; 837 int error = 0; 838 839 KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC, 840 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 841 842 td->td_ucred = job->cred; 843 if (fp->f_vnode != NULL) 844 error = aio_fsync_vnode(td, fp->f_vnode); 845 td->td_ucred = td_savedcred; 846 if (error) 847 aio_complete(job, -1, error); 848 else 849 aio_complete(job, 0, 0); 850 } 851 852 static void 853 aio_process_mlock(struct kaiocb *job) 854 { 855 struct aiocb *cb = &job->uaiocb; 856 int error; 857 858 KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK, 859 ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); 860 861 aio_switch_vmspace(job); 862 error = kern_mlock(job->userproc, job->cred, 863 __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes); 864 aio_complete(job, error != 0 ? -1 : 0, error); 865 } 866 867 static void 868 aio_bio_done_notify(struct proc *userp, struct kaiocb *job) 869 { 870 struct aioliojob *lj; 871 struct kaioinfo *ki; 872 struct kaiocb *sjob, *sjobn; 873 int lj_done; 874 bool schedule_fsync; 875 876 ki = userp->p_aioinfo; 877 AIO_LOCK_ASSERT(ki, MA_OWNED); 878 lj = job->lio; 879 lj_done = 0; 880 if (lj) { 881 lj->lioj_finished_count++; 882 if (lj->lioj_count == lj->lioj_finished_count) 883 lj_done = 1; 884 } 885 TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist); 886 MPASS(job->jobflags & KAIOCB_FINISHED); 887 888 if (ki->kaio_flags & KAIO_RUNDOWN) 889 goto notification_done; 890 891 if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 892 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) 893 aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi); 894 895 KNOTE_LOCKED(&job->klist, 1); 896 897 if (lj_done) { 898 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 899 lj->lioj_flags |= LIOJ_KEVENT_POSTED; 900 KNOTE_LOCKED(&lj->klist, 1); 901 } 902 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) 903 == LIOJ_SIGNAL 904 && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 905 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 906 aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi); 907 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 908 } 909 } 910 911 notification_done: 912 if (job->jobflags & KAIOCB_CHECKSYNC) { 913 schedule_fsync = false; 914 TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) { 915 if (job->fd_file != sjob->fd_file || 916 job->seqno >= sjob->seqno) 917 continue; 918 if (--sjob->pending > 0) 919 continue; 920 TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list); 921 if (!aio_clear_cancel_function_locked(sjob)) 922 continue; 923 TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list); 924 schedule_fsync = true; 925 } 926 if (schedule_fsync) 927 taskqueue_enqueue(taskqueue_aiod_kick, 928 &ki->kaio_sync_task); 929 } 930 if (ki->kaio_flags & KAIO_WAKEUP) { 931 ki->kaio_flags &= ~KAIO_WAKEUP; 932 wakeup(&userp->p_aioinfo); 933 } 934 } 935 936 static void 937 aio_schedule_fsync(void *context, int pending) 938 { 939 struct kaioinfo *ki; 940 struct kaiocb *job; 941 942 ki = context; 943 AIO_LOCK(ki); 944 while (!TAILQ_EMPTY(&ki->kaio_syncready)) { 945 job = TAILQ_FIRST(&ki->kaio_syncready); 946 TAILQ_REMOVE(&ki->kaio_syncready, job, list); 947 AIO_UNLOCK(ki); 948 aio_schedule(job, aio_process_sync); 949 AIO_LOCK(ki); 950 } 951 AIO_UNLOCK(ki); 952 } 953 954 bool 955 aio_cancel_cleared(struct kaiocb *job) 956 { 957 struct kaioinfo *ki; 958 959 /* 960 * The caller should hold the same queue lock held when 961 * aio_clear_cancel_function() was called and set this flag 962 * ensuring this check sees an up-to-date value. However, 963 * there is no way to assert that. 964 */ 965 ki = job->userproc->p_aioinfo; 966 return ((job->jobflags & KAIOCB_CLEARED) != 0); 967 } 968 969 static bool 970 aio_clear_cancel_function_locked(struct kaiocb *job) 971 { 972 973 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); 974 MPASS(job->cancel_fn != NULL); 975 if (job->jobflags & KAIOCB_CANCELLING) { 976 job->jobflags |= KAIOCB_CLEARED; 977 return (false); 978 } 979 job->cancel_fn = NULL; 980 return (true); 981 } 982 983 bool 984 aio_clear_cancel_function(struct kaiocb *job) 985 { 986 struct kaioinfo *ki; 987 bool ret; 988 989 ki = job->userproc->p_aioinfo; 990 AIO_LOCK(ki); 991 ret = aio_clear_cancel_function_locked(job); 992 AIO_UNLOCK(ki); 993 return (ret); 994 } 995 996 static bool 997 aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func) 998 { 999 1000 AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); 1001 if (job->jobflags & KAIOCB_CANCELLED) 1002 return (false); 1003 job->cancel_fn = func; 1004 return (true); 1005 } 1006 1007 bool 1008 aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func) 1009 { 1010 struct kaioinfo *ki; 1011 bool ret; 1012 1013 ki = job->userproc->p_aioinfo; 1014 AIO_LOCK(ki); 1015 ret = aio_set_cancel_function_locked(job, func); 1016 AIO_UNLOCK(ki); 1017 return (ret); 1018 } 1019 1020 void 1021 aio_complete(struct kaiocb *job, long status, int error) 1022 { 1023 struct kaioinfo *ki; 1024 struct proc *userp; 1025 1026 job->uaiocb._aiocb_private.error = error; 1027 job->uaiocb._aiocb_private.status = status; 1028 1029 userp = job->userproc; 1030 ki = userp->p_aioinfo; 1031 1032 AIO_LOCK(ki); 1033 KASSERT(!(job->jobflags & KAIOCB_FINISHED), 1034 ("duplicate aio_complete")); 1035 job->jobflags |= KAIOCB_FINISHED; 1036 if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) { 1037 TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); 1038 aio_bio_done_notify(userp, job); 1039 } 1040 AIO_UNLOCK(ki); 1041 } 1042 1043 void 1044 aio_cancel(struct kaiocb *job) 1045 { 1046 1047 aio_complete(job, -1, ECANCELED); 1048 } 1049 1050 void 1051 aio_switch_vmspace(struct kaiocb *job) 1052 { 1053 1054 vmspace_switch_aio(job->userproc->p_vmspace); 1055 } 1056 1057 /* 1058 * The AIO daemon, most of the actual work is done in aio_process_*, 1059 * but the setup (and address space mgmt) is done in this routine. 1060 */ 1061 static void 1062 aio_daemon(void *_id) 1063 { 1064 struct kaiocb *job; 1065 struct aioproc *aiop; 1066 struct kaioinfo *ki; 1067 struct proc *p; 1068 struct vmspace *myvm; 1069 struct thread *td = curthread; 1070 int id = (intptr_t)_id; 1071 1072 /* 1073 * Grab an extra reference on the daemon's vmspace so that it 1074 * doesn't get freed by jobs that switch to a different 1075 * vmspace. 1076 */ 1077 p = td->td_proc; 1078 myvm = vmspace_acquire_ref(p); 1079 1080 KASSERT(p->p_textvp == NULL, ("kthread has a textvp")); 1081 1082 /* 1083 * Allocate and ready the aio control info. There is one aiop structure 1084 * per daemon. 1085 */ 1086 aiop = uma_zalloc(aiop_zone, M_WAITOK); 1087 aiop->aioproc = p; 1088 aiop->aioprocflags = 0; 1089 1090 /* 1091 * Wakeup parent process. (Parent sleeps to keep from blasting away 1092 * and creating too many daemons.) 1093 */ 1094 sema_post(&aio_newproc_sem); 1095 1096 mtx_lock(&aio_job_mtx); 1097 for (;;) { 1098 /* 1099 * Take daemon off of free queue 1100 */ 1101 if (aiop->aioprocflags & AIOP_FREE) { 1102 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1103 aiop->aioprocflags &= ~AIOP_FREE; 1104 } 1105 1106 /* 1107 * Check for jobs. 1108 */ 1109 while ((job = aio_selectjob(aiop)) != NULL) { 1110 mtx_unlock(&aio_job_mtx); 1111 1112 ki = job->userproc->p_aioinfo; 1113 job->handle_fn(job); 1114 1115 mtx_lock(&aio_job_mtx); 1116 /* Decrement the active job count. */ 1117 ki->kaio_active_count--; 1118 } 1119 1120 /* 1121 * Disconnect from user address space. 1122 */ 1123 if (p->p_vmspace != myvm) { 1124 mtx_unlock(&aio_job_mtx); 1125 vmspace_switch_aio(myvm); 1126 mtx_lock(&aio_job_mtx); 1127 /* 1128 * We have to restart to avoid race, we only sleep if 1129 * no job can be selected. 1130 */ 1131 continue; 1132 } 1133 1134 mtx_assert(&aio_job_mtx, MA_OWNED); 1135 1136 TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); 1137 aiop->aioprocflags |= AIOP_FREE; 1138 1139 /* 1140 * If daemon is inactive for a long time, allow it to exit, 1141 * thereby freeing resources. 1142 */ 1143 if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy", 1144 aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) && 1145 (aiop->aioprocflags & AIOP_FREE) && 1146 num_aio_procs > target_aio_procs) 1147 break; 1148 } 1149 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1150 num_aio_procs--; 1151 mtx_unlock(&aio_job_mtx); 1152 uma_zfree(aiop_zone, aiop); 1153 free_unr(aiod_unr, id); 1154 vmspace_free(myvm); 1155 1156 KASSERT(p->p_vmspace == myvm, 1157 ("AIOD: bad vmspace for exiting daemon")); 1158 KASSERT(myvm->vm_refcnt > 1, 1159 ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt)); 1160 kproc_exit(0); 1161 } 1162 1163 /* 1164 * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The 1165 * AIO daemon modifies its environment itself. 1166 */ 1167 static int 1168 aio_newproc(int *start) 1169 { 1170 int error; 1171 struct proc *p; 1172 int id; 1173 1174 id = alloc_unr(aiod_unr); 1175 error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p, 1176 RFNOWAIT, 0, "aiod%d", id); 1177 if (error == 0) { 1178 /* 1179 * Wait until daemon is started. 1180 */ 1181 sema_wait(&aio_newproc_sem); 1182 mtx_lock(&aio_job_mtx); 1183 num_aio_procs++; 1184 if (start != NULL) 1185 (*start)--; 1186 mtx_unlock(&aio_job_mtx); 1187 } else { 1188 free_unr(aiod_unr, id); 1189 } 1190 return (error); 1191 } 1192 1193 /* 1194 * Try the high-performance, low-overhead physio method for eligible 1195 * VCHR devices. This method doesn't use an aio helper thread, and 1196 * thus has very low overhead. 1197 * 1198 * Assumes that the caller, aio_aqueue(), has incremented the file 1199 * structure's reference count, preventing its deallocation for the 1200 * duration of this call. 1201 */ 1202 static int 1203 aio_qphysio(struct proc *p, struct kaiocb *job) 1204 { 1205 struct aiocb *cb; 1206 struct file *fp; 1207 struct bio *bp; 1208 struct buf *pbuf; 1209 struct vnode *vp; 1210 struct cdevsw *csw; 1211 struct cdev *dev; 1212 struct kaioinfo *ki; 1213 int error, ref, poff; 1214 vm_prot_t prot; 1215 1216 cb = &job->uaiocb; 1217 fp = job->fd_file; 1218 1219 if (fp == NULL || fp->f_type != DTYPE_VNODE) 1220 return (-1); 1221 1222 vp = fp->f_vnode; 1223 if (vp->v_type != VCHR) 1224 return (-1); 1225 if (vp->v_bufobj.bo_bsize == 0) 1226 return (-1); 1227 if (cb->aio_nbytes % vp->v_bufobj.bo_bsize) 1228 return (-1); 1229 1230 ref = 0; 1231 csw = devvn_refthread(vp, &dev, &ref); 1232 if (csw == NULL) 1233 return (ENXIO); 1234 1235 if ((csw->d_flags & D_DISK) == 0) { 1236 error = -1; 1237 goto unref; 1238 } 1239 if (cb->aio_nbytes > dev->si_iosize_max) { 1240 error = -1; 1241 goto unref; 1242 } 1243 1244 ki = p->p_aioinfo; 1245 poff = (vm_offset_t)cb->aio_buf & PAGE_MASK; 1246 if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { 1247 if (cb->aio_nbytes > MAXPHYS) { 1248 error = -1; 1249 goto unref; 1250 } 1251 1252 pbuf = NULL; 1253 } else { 1254 if (cb->aio_nbytes > MAXPHYS - poff) { 1255 error = -1; 1256 goto unref; 1257 } 1258 if (ki->kaio_buffer_count >= ki->kaio_ballowed_count) { 1259 error = -1; 1260 goto unref; 1261 } 1262 1263 job->pbuf = pbuf = (struct buf *)getpbuf(NULL); 1264 BUF_KERNPROC(pbuf); 1265 AIO_LOCK(ki); 1266 ki->kaio_buffer_count++; 1267 AIO_UNLOCK(ki); 1268 } 1269 job->bp = bp = g_alloc_bio(); 1270 1271 bp->bio_length = cb->aio_nbytes; 1272 bp->bio_bcount = cb->aio_nbytes; 1273 bp->bio_done = aio_physwakeup; 1274 bp->bio_data = (void *)(uintptr_t)cb->aio_buf; 1275 bp->bio_offset = cb->aio_offset; 1276 bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; 1277 bp->bio_dev = dev; 1278 bp->bio_caller1 = (void *)job; 1279 1280 prot = VM_PROT_READ; 1281 if (cb->aio_lio_opcode == LIO_READ) 1282 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 1283 job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 1284 (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages, 1285 nitems(job->pages)); 1286 if (job->npages < 0) { 1287 error = EFAULT; 1288 goto doerror; 1289 } 1290 if (pbuf != NULL) { 1291 pmap_qenter((vm_offset_t)pbuf->b_data, 1292 job->pages, job->npages); 1293 bp->bio_data = pbuf->b_data + poff; 1294 atomic_add_int(&num_buf_aio, 1); 1295 } else { 1296 bp->bio_ma = job->pages; 1297 bp->bio_ma_n = job->npages; 1298 bp->bio_ma_offset = poff; 1299 bp->bio_data = unmapped_buf; 1300 bp->bio_flags |= BIO_UNMAPPED; 1301 } 1302 1303 /* Perform transfer. */ 1304 csw->d_strategy(bp); 1305 dev_relthread(dev, ref); 1306 return (0); 1307 1308 doerror: 1309 if (pbuf != NULL) { 1310 AIO_LOCK(ki); 1311 ki->kaio_buffer_count--; 1312 AIO_UNLOCK(ki); 1313 relpbuf(pbuf, NULL); 1314 job->pbuf = NULL; 1315 } 1316 g_destroy_bio(bp); 1317 job->bp = NULL; 1318 unref: 1319 dev_relthread(dev, ref); 1320 return (error); 1321 } 1322 1323 #ifdef COMPAT_FREEBSD6 1324 static int 1325 convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig) 1326 { 1327 1328 /* 1329 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are 1330 * supported by AIO with the old sigevent structure. 1331 */ 1332 nsig->sigev_notify = osig->sigev_notify; 1333 switch (nsig->sigev_notify) { 1334 case SIGEV_NONE: 1335 break; 1336 case SIGEV_SIGNAL: 1337 nsig->sigev_signo = osig->__sigev_u.__sigev_signo; 1338 break; 1339 case SIGEV_KEVENT: 1340 nsig->sigev_notify_kqueue = 1341 osig->__sigev_u.__sigev_notify_kqueue; 1342 nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr; 1343 break; 1344 default: 1345 return (EINVAL); 1346 } 1347 return (0); 1348 } 1349 1350 static int 1351 aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob) 1352 { 1353 struct oaiocb *ojob; 1354 int error; 1355 1356 bzero(kjob, sizeof(struct aiocb)); 1357 error = copyin(ujob, kjob, sizeof(struct oaiocb)); 1358 if (error) 1359 return (error); 1360 ojob = (struct oaiocb *)kjob; 1361 return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent)); 1362 } 1363 #endif 1364 1365 static int 1366 aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob) 1367 { 1368 1369 return (copyin(ujob, kjob, sizeof(struct aiocb))); 1370 } 1371 1372 static long 1373 aiocb_fetch_status(struct aiocb *ujob) 1374 { 1375 1376 return (fuword(&ujob->_aiocb_private.status)); 1377 } 1378 1379 static long 1380 aiocb_fetch_error(struct aiocb *ujob) 1381 { 1382 1383 return (fuword(&ujob->_aiocb_private.error)); 1384 } 1385 1386 static int 1387 aiocb_store_status(struct aiocb *ujob, long status) 1388 { 1389 1390 return (suword(&ujob->_aiocb_private.status, status)); 1391 } 1392 1393 static int 1394 aiocb_store_error(struct aiocb *ujob, long error) 1395 { 1396 1397 return (suword(&ujob->_aiocb_private.error, error)); 1398 } 1399 1400 static int 1401 aiocb_store_kernelinfo(struct aiocb *ujob, long jobref) 1402 { 1403 1404 return (suword(&ujob->_aiocb_private.kernelinfo, jobref)); 1405 } 1406 1407 static int 1408 aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) 1409 { 1410 1411 return (suword(ujobp, (long)ujob)); 1412 } 1413 1414 static struct aiocb_ops aiocb_ops = { 1415 .copyin = aiocb_copyin, 1416 .fetch_status = aiocb_fetch_status, 1417 .fetch_error = aiocb_fetch_error, 1418 .store_status = aiocb_store_status, 1419 .store_error = aiocb_store_error, 1420 .store_kernelinfo = aiocb_store_kernelinfo, 1421 .store_aiocb = aiocb_store_aiocb, 1422 }; 1423 1424 #ifdef COMPAT_FREEBSD6 1425 static struct aiocb_ops aiocb_ops_osigevent = { 1426 .copyin = aiocb_copyin_old_sigevent, 1427 .fetch_status = aiocb_fetch_status, 1428 .fetch_error = aiocb_fetch_error, 1429 .store_status = aiocb_store_status, 1430 .store_error = aiocb_store_error, 1431 .store_kernelinfo = aiocb_store_kernelinfo, 1432 .store_aiocb = aiocb_store_aiocb, 1433 }; 1434 #endif 1435 1436 /* 1437 * Queue a new AIO request. Choosing either the threaded or direct physio VCHR 1438 * technique is done in this code. 1439 */ 1440 int 1441 aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj, 1442 int type, struct aiocb_ops *ops) 1443 { 1444 struct proc *p = td->td_proc; 1445 cap_rights_t rights; 1446 struct file *fp; 1447 struct kaiocb *job; 1448 struct kaioinfo *ki; 1449 struct kevent kev; 1450 int opcode; 1451 int error; 1452 int fd, kqfd; 1453 int jid; 1454 u_short evflags; 1455 1456 if (p->p_aioinfo == NULL) 1457 aio_init_aioinfo(p); 1458 1459 ki = p->p_aioinfo; 1460 1461 ops->store_status(ujob, -1); 1462 ops->store_error(ujob, 0); 1463 ops->store_kernelinfo(ujob, -1); 1464 1465 if (num_queue_count >= max_queue_count || 1466 ki->kaio_count >= ki->kaio_qallowed_count) { 1467 ops->store_error(ujob, EAGAIN); 1468 return (EAGAIN); 1469 } 1470 1471 job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); 1472 knlist_init_mtx(&job->klist, AIO_MTX(ki)); 1473 1474 error = ops->copyin(ujob, &job->uaiocb); 1475 if (error) { 1476 ops->store_error(ujob, error); 1477 uma_zfree(aiocb_zone, job); 1478 return (error); 1479 } 1480 1481 if (job->uaiocb.aio_nbytes > IOSIZE_MAX) { 1482 uma_zfree(aiocb_zone, job); 1483 return (EINVAL); 1484 } 1485 1486 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && 1487 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && 1488 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && 1489 job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { 1490 ops->store_error(ujob, EINVAL); 1491 uma_zfree(aiocb_zone, job); 1492 return (EINVAL); 1493 } 1494 1495 if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || 1496 job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && 1497 !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) { 1498 uma_zfree(aiocb_zone, job); 1499 return (EINVAL); 1500 } 1501 1502 ksiginfo_init(&job->ksi); 1503 1504 /* Save userspace address of the job info. */ 1505 job->ujob = ujob; 1506 1507 /* Get the opcode. */ 1508 if (type != LIO_NOP) 1509 job->uaiocb.aio_lio_opcode = type; 1510 opcode = job->uaiocb.aio_lio_opcode; 1511 1512 /* 1513 * Validate the opcode and fetch the file object for the specified 1514 * file descriptor. 1515 * 1516 * XXXRW: Moved the opcode validation up here so that we don't 1517 * retrieve a file descriptor without knowing what the capabiltity 1518 * should be. 1519 */ 1520 fd = job->uaiocb.aio_fildes; 1521 switch (opcode) { 1522 case LIO_WRITE: 1523 error = fget_write(td, fd, 1524 cap_rights_init(&rights, CAP_PWRITE), &fp); 1525 break; 1526 case LIO_READ: 1527 error = fget_read(td, fd, 1528 cap_rights_init(&rights, CAP_PREAD), &fp); 1529 break; 1530 case LIO_SYNC: 1531 error = fget(td, fd, cap_rights_init(&rights, CAP_FSYNC), &fp); 1532 break; 1533 case LIO_MLOCK: 1534 fp = NULL; 1535 break; 1536 case LIO_NOP: 1537 error = fget(td, fd, cap_rights_init(&rights), &fp); 1538 break; 1539 default: 1540 error = EINVAL; 1541 } 1542 if (error) { 1543 uma_zfree(aiocb_zone, job); 1544 ops->store_error(ujob, error); 1545 return (error); 1546 } 1547 1548 if (opcode == LIO_SYNC && fp->f_vnode == NULL) { 1549 error = EINVAL; 1550 goto aqueue_fail; 1551 } 1552 1553 if ((opcode == LIO_READ || opcode == LIO_WRITE) && 1554 job->uaiocb.aio_offset < 0 && 1555 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) { 1556 error = EINVAL; 1557 goto aqueue_fail; 1558 } 1559 1560 job->fd_file = fp; 1561 1562 mtx_lock(&aio_job_mtx); 1563 jid = jobrefid++; 1564 job->seqno = jobseqno++; 1565 mtx_unlock(&aio_job_mtx); 1566 error = ops->store_kernelinfo(ujob, jid); 1567 if (error) { 1568 error = EINVAL; 1569 goto aqueue_fail; 1570 } 1571 job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; 1572 1573 if (opcode == LIO_NOP) { 1574 fdrop(fp, td); 1575 uma_zfree(aiocb_zone, job); 1576 return (0); 1577 } 1578 1579 if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) 1580 goto no_kqueue; 1581 evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags; 1582 if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) { 1583 error = EINVAL; 1584 goto aqueue_fail; 1585 } 1586 kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue; 1587 kev.ident = (uintptr_t)job->ujob; 1588 kev.filter = EVFILT_AIO; 1589 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags; 1590 kev.data = (intptr_t)job; 1591 kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; 1592 error = kqfd_register(kqfd, &kev, td, 1); 1593 if (error) 1594 goto aqueue_fail; 1595 1596 no_kqueue: 1597 1598 ops->store_error(ujob, EINPROGRESS); 1599 job->uaiocb._aiocb_private.error = EINPROGRESS; 1600 job->userproc = p; 1601 job->cred = crhold(td->td_ucred); 1602 job->jobflags = KAIOCB_QUEUEING; 1603 job->lio = lj; 1604 1605 if (opcode == LIO_MLOCK) { 1606 aio_schedule(job, aio_process_mlock); 1607 error = 0; 1608 } else if (fp->f_ops->fo_aio_queue == NULL) 1609 error = aio_queue_file(fp, job); 1610 else 1611 error = fo_aio_queue(fp, job); 1612 if (error) 1613 goto aqueue_fail; 1614 1615 AIO_LOCK(ki); 1616 job->jobflags &= ~KAIOCB_QUEUEING; 1617 TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); 1618 ki->kaio_count++; 1619 if (lj) 1620 lj->lioj_count++; 1621 atomic_add_int(&num_queue_count, 1); 1622 if (job->jobflags & KAIOCB_FINISHED) { 1623 /* 1624 * The queue callback completed the request synchronously. 1625 * The bulk of the completion is deferred in that case 1626 * until this point. 1627 */ 1628 aio_bio_done_notify(p, job); 1629 } else 1630 TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); 1631 AIO_UNLOCK(ki); 1632 return (0); 1633 1634 aqueue_fail: 1635 knlist_delete(&job->klist, curthread, 0); 1636 if (fp) 1637 fdrop(fp, td); 1638 uma_zfree(aiocb_zone, job); 1639 ops->store_error(ujob, error); 1640 return (error); 1641 } 1642 1643 static void 1644 aio_cancel_daemon_job(struct kaiocb *job) 1645 { 1646 1647 mtx_lock(&aio_job_mtx); 1648 if (!aio_cancel_cleared(job)) 1649 TAILQ_REMOVE(&aio_jobs, job, list); 1650 mtx_unlock(&aio_job_mtx); 1651 aio_cancel(job); 1652 } 1653 1654 void 1655 aio_schedule(struct kaiocb *job, aio_handle_fn_t *func) 1656 { 1657 1658 mtx_lock(&aio_job_mtx); 1659 if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) { 1660 mtx_unlock(&aio_job_mtx); 1661 aio_cancel(job); 1662 return; 1663 } 1664 job->handle_fn = func; 1665 TAILQ_INSERT_TAIL(&aio_jobs, job, list); 1666 aio_kick_nowait(job->userproc); 1667 mtx_unlock(&aio_job_mtx); 1668 } 1669 1670 static void 1671 aio_cancel_sync(struct kaiocb *job) 1672 { 1673 struct kaioinfo *ki; 1674 1675 ki = job->userproc->p_aioinfo; 1676 AIO_LOCK(ki); 1677 if (!aio_cancel_cleared(job)) 1678 TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); 1679 AIO_UNLOCK(ki); 1680 aio_cancel(job); 1681 } 1682 1683 int 1684 aio_queue_file(struct file *fp, struct kaiocb *job) 1685 { 1686 struct aioliojob *lj; 1687 struct kaioinfo *ki; 1688 struct kaiocb *job2; 1689 struct vnode *vp; 1690 struct mount *mp; 1691 int error, opcode; 1692 bool safe; 1693 1694 lj = job->lio; 1695 ki = job->userproc->p_aioinfo; 1696 opcode = job->uaiocb.aio_lio_opcode; 1697 if (opcode == LIO_SYNC) 1698 goto queueit; 1699 1700 if ((error = aio_qphysio(job->userproc, job)) == 0) 1701 goto done; 1702 #if 0 1703 /* 1704 * XXX: This means qphysio() failed with EFAULT. The current 1705 * behavior is to retry the operation via fo_read/fo_write. 1706 * Wouldn't it be better to just complete the request with an 1707 * error here? 1708 */ 1709 if (error > 0) 1710 goto done; 1711 #endif 1712 queueit: 1713 safe = false; 1714 if (fp->f_type == DTYPE_VNODE) { 1715 vp = fp->f_vnode; 1716 if (vp->v_type == VREG || vp->v_type == VDIR) { 1717 mp = fp->f_vnode->v_mount; 1718 if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0) 1719 safe = true; 1720 } 1721 } 1722 if (!(safe || enable_aio_unsafe)) { 1723 counted_warning(&unsafe_warningcnt, 1724 "is attempting to use unsafe AIO requests"); 1725 return (EOPNOTSUPP); 1726 } 1727 1728 if (opcode == LIO_SYNC) { 1729 AIO_LOCK(ki); 1730 TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) { 1731 if (job2->fd_file == job->fd_file && 1732 job2->uaiocb.aio_lio_opcode != LIO_SYNC && 1733 job2->seqno < job->seqno) { 1734 job2->jobflags |= KAIOCB_CHECKSYNC; 1735 job->pending++; 1736 } 1737 } 1738 if (job->pending != 0) { 1739 if (!aio_set_cancel_function_locked(job, 1740 aio_cancel_sync)) { 1741 AIO_UNLOCK(ki); 1742 aio_cancel(job); 1743 return (0); 1744 } 1745 TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list); 1746 AIO_UNLOCK(ki); 1747 return (0); 1748 } 1749 AIO_UNLOCK(ki); 1750 } 1751 1752 switch (opcode) { 1753 case LIO_READ: 1754 case LIO_WRITE: 1755 aio_schedule(job, aio_process_rw); 1756 error = 0; 1757 break; 1758 case LIO_SYNC: 1759 aio_schedule(job, aio_process_sync); 1760 error = 0; 1761 break; 1762 default: 1763 error = EINVAL; 1764 } 1765 done: 1766 return (error); 1767 } 1768 1769 static void 1770 aio_kick_nowait(struct proc *userp) 1771 { 1772 struct kaioinfo *ki = userp->p_aioinfo; 1773 struct aioproc *aiop; 1774 1775 mtx_assert(&aio_job_mtx, MA_OWNED); 1776 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1777 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1778 aiop->aioprocflags &= ~AIOP_FREE; 1779 wakeup(aiop->aioproc); 1780 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && 1781 ki->kaio_active_count + num_aio_resv_start < 1782 ki->kaio_maxactive_count) { 1783 taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task); 1784 } 1785 } 1786 1787 static int 1788 aio_kick(struct proc *userp) 1789 { 1790 struct kaioinfo *ki = userp->p_aioinfo; 1791 struct aioproc *aiop; 1792 int error, ret = 0; 1793 1794 mtx_assert(&aio_job_mtx, MA_OWNED); 1795 retryproc: 1796 if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { 1797 TAILQ_REMOVE(&aio_freeproc, aiop, list); 1798 aiop->aioprocflags &= ~AIOP_FREE; 1799 wakeup(aiop->aioproc); 1800 } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && 1801 ki->kaio_active_count + num_aio_resv_start < 1802 ki->kaio_maxactive_count) { 1803 num_aio_resv_start++; 1804 mtx_unlock(&aio_job_mtx); 1805 error = aio_newproc(&num_aio_resv_start); 1806 mtx_lock(&aio_job_mtx); 1807 if (error) { 1808 num_aio_resv_start--; 1809 goto retryproc; 1810 } 1811 } else { 1812 ret = -1; 1813 } 1814 return (ret); 1815 } 1816 1817 static void 1818 aio_kick_helper(void *context, int pending) 1819 { 1820 struct proc *userp = context; 1821 1822 mtx_lock(&aio_job_mtx); 1823 while (--pending >= 0) { 1824 if (aio_kick(userp)) 1825 break; 1826 } 1827 mtx_unlock(&aio_job_mtx); 1828 } 1829 1830 /* 1831 * Support the aio_return system call, as a side-effect, kernel resources are 1832 * released. 1833 */ 1834 static int 1835 kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) 1836 { 1837 struct proc *p = td->td_proc; 1838 struct kaiocb *job; 1839 struct kaioinfo *ki; 1840 long status, error; 1841 1842 ki = p->p_aioinfo; 1843 if (ki == NULL) 1844 return (EINVAL); 1845 AIO_LOCK(ki); 1846 TAILQ_FOREACH(job, &ki->kaio_done, plist) { 1847 if (job->ujob == ujob) 1848 break; 1849 } 1850 if (job != NULL) { 1851 MPASS(job->jobflags & KAIOCB_FINISHED); 1852 status = job->uaiocb._aiocb_private.status; 1853 error = job->uaiocb._aiocb_private.error; 1854 td->td_retval[0] = status; 1855 td->td_ru.ru_oublock += job->outblock; 1856 td->td_ru.ru_inblock += job->inblock; 1857 td->td_ru.ru_msgsnd += job->msgsnd; 1858 td->td_ru.ru_msgrcv += job->msgrcv; 1859 aio_free_entry(job); 1860 AIO_UNLOCK(ki); 1861 ops->store_error(ujob, error); 1862 ops->store_status(ujob, status); 1863 } else { 1864 error = EINVAL; 1865 AIO_UNLOCK(ki); 1866 } 1867 return (error); 1868 } 1869 1870 int 1871 sys_aio_return(struct thread *td, struct aio_return_args *uap) 1872 { 1873 1874 return (kern_aio_return(td, uap->aiocbp, &aiocb_ops)); 1875 } 1876 1877 /* 1878 * Allow a process to wakeup when any of the I/O requests are completed. 1879 */ 1880 static int 1881 kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist, 1882 struct timespec *ts) 1883 { 1884 struct proc *p = td->td_proc; 1885 struct timeval atv; 1886 struct kaioinfo *ki; 1887 struct kaiocb *firstjob, *job; 1888 int error, i, timo; 1889 1890 timo = 0; 1891 if (ts) { 1892 if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1893 return (EINVAL); 1894 1895 TIMESPEC_TO_TIMEVAL(&atv, ts); 1896 if (itimerfix(&atv)) 1897 return (EINVAL); 1898 timo = tvtohz(&atv); 1899 } 1900 1901 ki = p->p_aioinfo; 1902 if (ki == NULL) 1903 return (EAGAIN); 1904 1905 if (njoblist == 0) 1906 return (0); 1907 1908 AIO_LOCK(ki); 1909 for (;;) { 1910 firstjob = NULL; 1911 error = 0; 1912 TAILQ_FOREACH(job, &ki->kaio_all, allist) { 1913 for (i = 0; i < njoblist; i++) { 1914 if (job->ujob == ujoblist[i]) { 1915 if (firstjob == NULL) 1916 firstjob = job; 1917 if (job->jobflags & KAIOCB_FINISHED) 1918 goto RETURN; 1919 } 1920 } 1921 } 1922 /* All tasks were finished. */ 1923 if (firstjob == NULL) 1924 break; 1925 1926 ki->kaio_flags |= KAIO_WAKEUP; 1927 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, 1928 "aiospn", timo); 1929 if (error == ERESTART) 1930 error = EINTR; 1931 if (error) 1932 break; 1933 } 1934 RETURN: 1935 AIO_UNLOCK(ki); 1936 return (error); 1937 } 1938 1939 int 1940 sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap) 1941 { 1942 struct timespec ts, *tsp; 1943 struct aiocb **ujoblist; 1944 int error; 1945 1946 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX) 1947 return (EINVAL); 1948 1949 if (uap->timeout) { 1950 /* Get timespec struct. */ 1951 if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) 1952 return (error); 1953 tsp = &ts; 1954 } else 1955 tsp = NULL; 1956 1957 ujoblist = uma_zalloc(aiol_zone, M_WAITOK); 1958 error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0])); 1959 if (error == 0) 1960 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); 1961 uma_zfree(aiol_zone, ujoblist); 1962 return (error); 1963 } 1964 1965 /* 1966 * aio_cancel cancels any non-physio aio operations not currently in 1967 * progress. 1968 */ 1969 int 1970 sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap) 1971 { 1972 struct proc *p = td->td_proc; 1973 struct kaioinfo *ki; 1974 struct kaiocb *job, *jobn; 1975 struct file *fp; 1976 cap_rights_t rights; 1977 int error; 1978 int cancelled = 0; 1979 int notcancelled = 0; 1980 struct vnode *vp; 1981 1982 /* Lookup file object. */ 1983 error = fget(td, uap->fd, cap_rights_init(&rights), &fp); 1984 if (error) 1985 return (error); 1986 1987 ki = p->p_aioinfo; 1988 if (ki == NULL) 1989 goto done; 1990 1991 if (fp->f_type == DTYPE_VNODE) { 1992 vp = fp->f_vnode; 1993 if (vn_isdisk(vp, &error)) { 1994 fdrop(fp, td); 1995 td->td_retval[0] = AIO_NOTCANCELED; 1996 return (0); 1997 } 1998 } 1999 2000 AIO_LOCK(ki); 2001 TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { 2002 if ((uap->fd == job->uaiocb.aio_fildes) && 2003 ((uap->aiocbp == NULL) || 2004 (uap->aiocbp == job->ujob))) { 2005 if (aio_cancel_job(p, ki, job)) { 2006 cancelled++; 2007 } else { 2008 notcancelled++; 2009 } 2010 if (uap->aiocbp != NULL) 2011 break; 2012 } 2013 } 2014 AIO_UNLOCK(ki); 2015 2016 done: 2017 fdrop(fp, td); 2018 2019 if (uap->aiocbp != NULL) { 2020 if (cancelled) { 2021 td->td_retval[0] = AIO_CANCELED; 2022 return (0); 2023 } 2024 } 2025 2026 if (notcancelled) { 2027 td->td_retval[0] = AIO_NOTCANCELED; 2028 return (0); 2029 } 2030 2031 if (cancelled) { 2032 td->td_retval[0] = AIO_CANCELED; 2033 return (0); 2034 } 2035 2036 td->td_retval[0] = AIO_ALLDONE; 2037 2038 return (0); 2039 } 2040 2041 /* 2042 * aio_error is implemented in the kernel level for compatibility purposes 2043 * only. For a user mode async implementation, it would be best to do it in 2044 * a userland subroutine. 2045 */ 2046 static int 2047 kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) 2048 { 2049 struct proc *p = td->td_proc; 2050 struct kaiocb *job; 2051 struct kaioinfo *ki; 2052 int status; 2053 2054 ki = p->p_aioinfo; 2055 if (ki == NULL) { 2056 td->td_retval[0] = EINVAL; 2057 return (0); 2058 } 2059 2060 AIO_LOCK(ki); 2061 TAILQ_FOREACH(job, &ki->kaio_all, allist) { 2062 if (job->ujob == ujob) { 2063 if (job->jobflags & KAIOCB_FINISHED) 2064 td->td_retval[0] = 2065 job->uaiocb._aiocb_private.error; 2066 else 2067 td->td_retval[0] = EINPROGRESS; 2068 AIO_UNLOCK(ki); 2069 return (0); 2070 } 2071 } 2072 AIO_UNLOCK(ki); 2073 2074 /* 2075 * Hack for failure of aio_aqueue. 2076 */ 2077 status = ops->fetch_status(ujob); 2078 if (status == -1) { 2079 td->td_retval[0] = ops->fetch_error(ujob); 2080 return (0); 2081 } 2082 2083 td->td_retval[0] = EINVAL; 2084 return (0); 2085 } 2086 2087 int 2088 sys_aio_error(struct thread *td, struct aio_error_args *uap) 2089 { 2090 2091 return (kern_aio_error(td, uap->aiocbp, &aiocb_ops)); 2092 } 2093 2094 /* syscall - asynchronous read from a file (REALTIME) */ 2095 #ifdef COMPAT_FREEBSD6 2096 int 2097 freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap) 2098 { 2099 2100 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2101 &aiocb_ops_osigevent)); 2102 } 2103 #endif 2104 2105 int 2106 sys_aio_read(struct thread *td, struct aio_read_args *uap) 2107 { 2108 2109 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops)); 2110 } 2111 2112 /* syscall - asynchronous write to a file (REALTIME) */ 2113 #ifdef COMPAT_FREEBSD6 2114 int 2115 freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap) 2116 { 2117 2118 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 2119 &aiocb_ops_osigevent)); 2120 } 2121 #endif 2122 2123 int 2124 sys_aio_write(struct thread *td, struct aio_write_args *uap) 2125 { 2126 2127 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops)); 2128 } 2129 2130 int 2131 sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap) 2132 { 2133 2134 return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops)); 2135 } 2136 2137 static int 2138 kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list, 2139 struct aiocb **acb_list, int nent, struct sigevent *sig, 2140 struct aiocb_ops *ops) 2141 { 2142 struct proc *p = td->td_proc; 2143 struct aiocb *job; 2144 struct kaioinfo *ki; 2145 struct aioliojob *lj; 2146 struct kevent kev; 2147 int error; 2148 int nerror; 2149 int i; 2150 2151 if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT)) 2152 return (EINVAL); 2153 2154 if (nent < 0 || nent > AIO_LISTIO_MAX) 2155 return (EINVAL); 2156 2157 if (p->p_aioinfo == NULL) 2158 aio_init_aioinfo(p); 2159 2160 ki = p->p_aioinfo; 2161 2162 lj = uma_zalloc(aiolio_zone, M_WAITOK); 2163 lj->lioj_flags = 0; 2164 lj->lioj_count = 0; 2165 lj->lioj_finished_count = 0; 2166 knlist_init_mtx(&lj->klist, AIO_MTX(ki)); 2167 ksiginfo_init(&lj->lioj_ksi); 2168 2169 /* 2170 * Setup signal. 2171 */ 2172 if (sig && (mode == LIO_NOWAIT)) { 2173 bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal)); 2174 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 2175 /* Assume only new style KEVENT */ 2176 kev.filter = EVFILT_LIO; 2177 kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; 2178 kev.ident = (uintptr_t)uacb_list; /* something unique */ 2179 kev.data = (intptr_t)lj; 2180 /* pass user defined sigval data */ 2181 kev.udata = lj->lioj_signal.sigev_value.sival_ptr; 2182 error = kqfd_register( 2183 lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1); 2184 if (error) { 2185 uma_zfree(aiolio_zone, lj); 2186 return (error); 2187 } 2188 } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) { 2189 ; 2190 } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 2191 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) { 2192 if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { 2193 uma_zfree(aiolio_zone, lj); 2194 return EINVAL; 2195 } 2196 lj->lioj_flags |= LIOJ_SIGNAL; 2197 } else { 2198 uma_zfree(aiolio_zone, lj); 2199 return EINVAL; 2200 } 2201 } 2202 2203 AIO_LOCK(ki); 2204 TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); 2205 /* 2206 * Add extra aiocb count to avoid the lio to be freed 2207 * by other threads doing aio_waitcomplete or aio_return, 2208 * and prevent event from being sent until we have queued 2209 * all tasks. 2210 */ 2211 lj->lioj_count = 1; 2212 AIO_UNLOCK(ki); 2213 2214 /* 2215 * Get pointers to the list of I/O requests. 2216 */ 2217 nerror = 0; 2218 for (i = 0; i < nent; i++) { 2219 job = acb_list[i]; 2220 if (job != NULL) { 2221 error = aio_aqueue(td, job, lj, LIO_NOP, ops); 2222 if (error != 0) 2223 nerror++; 2224 } 2225 } 2226 2227 error = 0; 2228 AIO_LOCK(ki); 2229 if (mode == LIO_WAIT) { 2230 while (lj->lioj_count - 1 != lj->lioj_finished_count) { 2231 ki->kaio_flags |= KAIO_WAKEUP; 2232 error = msleep(&p->p_aioinfo, AIO_MTX(ki), 2233 PRIBIO | PCATCH, "aiospn", 0); 2234 if (error == ERESTART) 2235 error = EINTR; 2236 if (error) 2237 break; 2238 } 2239 } else { 2240 if (lj->lioj_count - 1 == lj->lioj_finished_count) { 2241 if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { 2242 lj->lioj_flags |= LIOJ_KEVENT_POSTED; 2243 KNOTE_LOCKED(&lj->klist, 1); 2244 } 2245 if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) 2246 == LIOJ_SIGNAL 2247 && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || 2248 lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { 2249 aio_sendsig(p, &lj->lioj_signal, 2250 &lj->lioj_ksi); 2251 lj->lioj_flags |= LIOJ_SIGNAL_POSTED; 2252 } 2253 } 2254 } 2255 lj->lioj_count--; 2256 if (lj->lioj_count == 0) { 2257 TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); 2258 knlist_delete(&lj->klist, curthread, 1); 2259 PROC_LOCK(p); 2260 sigqueue_take(&lj->lioj_ksi); 2261 PROC_UNLOCK(p); 2262 AIO_UNLOCK(ki); 2263 uma_zfree(aiolio_zone, lj); 2264 } else 2265 AIO_UNLOCK(ki); 2266 2267 if (nerror) 2268 return (EIO); 2269 return (error); 2270 } 2271 2272 /* syscall - list directed I/O (REALTIME) */ 2273 #ifdef COMPAT_FREEBSD6 2274 int 2275 freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap) 2276 { 2277 struct aiocb **acb_list; 2278 struct sigevent *sigp, sig; 2279 struct osigevent osig; 2280 int error, nent; 2281 2282 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2283 return (EINVAL); 2284 2285 nent = uap->nent; 2286 if (nent < 0 || nent > AIO_LISTIO_MAX) 2287 return (EINVAL); 2288 2289 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2290 error = copyin(uap->sig, &osig, sizeof(osig)); 2291 if (error) 2292 return (error); 2293 error = convert_old_sigevent(&osig, &sig); 2294 if (error) 2295 return (error); 2296 sigp = &sig; 2297 } else 2298 sigp = NULL; 2299 2300 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2301 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); 2302 if (error == 0) 2303 error = kern_lio_listio(td, uap->mode, 2304 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 2305 &aiocb_ops_osigevent); 2306 free(acb_list, M_LIO); 2307 return (error); 2308 } 2309 #endif 2310 2311 /* syscall - list directed I/O (REALTIME) */ 2312 int 2313 sys_lio_listio(struct thread *td, struct lio_listio_args *uap) 2314 { 2315 struct aiocb **acb_list; 2316 struct sigevent *sigp, sig; 2317 int error, nent; 2318 2319 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2320 return (EINVAL); 2321 2322 nent = uap->nent; 2323 if (nent < 0 || nent > AIO_LISTIO_MAX) 2324 return (EINVAL); 2325 2326 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2327 error = copyin(uap->sig, &sig, sizeof(sig)); 2328 if (error) 2329 return (error); 2330 sigp = &sig; 2331 } else 2332 sigp = NULL; 2333 2334 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2335 error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); 2336 if (error == 0) 2337 error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list, 2338 nent, sigp, &aiocb_ops); 2339 free(acb_list, M_LIO); 2340 return (error); 2341 } 2342 2343 static void 2344 aio_physwakeup(struct bio *bp) 2345 { 2346 struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; 2347 struct proc *userp; 2348 struct kaioinfo *ki; 2349 size_t nbytes; 2350 int error, nblks; 2351 2352 /* Release mapping into kernel space. */ 2353 userp = job->userproc; 2354 ki = userp->p_aioinfo; 2355 if (job->pbuf) { 2356 pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages); 2357 relpbuf(job->pbuf, NULL); 2358 job->pbuf = NULL; 2359 atomic_subtract_int(&num_buf_aio, 1); 2360 AIO_LOCK(ki); 2361 ki->kaio_buffer_count--; 2362 AIO_UNLOCK(ki); 2363 } 2364 vm_page_unhold_pages(job->pages, job->npages); 2365 2366 bp = job->bp; 2367 job->bp = NULL; 2368 nbytes = job->uaiocb.aio_nbytes - bp->bio_resid; 2369 error = 0; 2370 if (bp->bio_flags & BIO_ERROR) 2371 error = bp->bio_error; 2372 nblks = btodb(nbytes); 2373 if (job->uaiocb.aio_lio_opcode == LIO_WRITE) 2374 job->outblock += nblks; 2375 else 2376 job->inblock += nblks; 2377 2378 if (error) 2379 aio_complete(job, -1, error); 2380 else 2381 aio_complete(job, nbytes, 0); 2382 2383 g_destroy_bio(bp); 2384 } 2385 2386 /* syscall - wait for the next completion of an aio request */ 2387 static int 2388 kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp, 2389 struct timespec *ts, struct aiocb_ops *ops) 2390 { 2391 struct proc *p = td->td_proc; 2392 struct timeval atv; 2393 struct kaioinfo *ki; 2394 struct kaiocb *job; 2395 struct aiocb *ujob; 2396 long error, status; 2397 int timo; 2398 2399 ops->store_aiocb(ujobp, NULL); 2400 2401 if (ts == NULL) { 2402 timo = 0; 2403 } else if (ts->tv_sec == 0 && ts->tv_nsec == 0) { 2404 timo = -1; 2405 } else { 2406 if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000)) 2407 return (EINVAL); 2408 2409 TIMESPEC_TO_TIMEVAL(&atv, ts); 2410 if (itimerfix(&atv)) 2411 return (EINVAL); 2412 timo = tvtohz(&atv); 2413 } 2414 2415 if (p->p_aioinfo == NULL) 2416 aio_init_aioinfo(p); 2417 ki = p->p_aioinfo; 2418 2419 error = 0; 2420 job = NULL; 2421 AIO_LOCK(ki); 2422 while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) { 2423 if (timo == -1) { 2424 error = EWOULDBLOCK; 2425 break; 2426 } 2427 ki->kaio_flags |= KAIO_WAKEUP; 2428 error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, 2429 "aiowc", timo); 2430 if (timo && error == ERESTART) 2431 error = EINTR; 2432 if (error) 2433 break; 2434 } 2435 2436 if (job != NULL) { 2437 MPASS(job->jobflags & KAIOCB_FINISHED); 2438 ujob = job->ujob; 2439 status = job->uaiocb._aiocb_private.status; 2440 error = job->uaiocb._aiocb_private.error; 2441 td->td_retval[0] = status; 2442 td->td_ru.ru_oublock += job->outblock; 2443 td->td_ru.ru_inblock += job->inblock; 2444 td->td_ru.ru_msgsnd += job->msgsnd; 2445 td->td_ru.ru_msgrcv += job->msgrcv; 2446 aio_free_entry(job); 2447 AIO_UNLOCK(ki); 2448 ops->store_aiocb(ujobp, ujob); 2449 ops->store_error(ujob, error); 2450 ops->store_status(ujob, status); 2451 } else 2452 AIO_UNLOCK(ki); 2453 2454 return (error); 2455 } 2456 2457 int 2458 sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) 2459 { 2460 struct timespec ts, *tsp; 2461 int error; 2462 2463 if (uap->timeout) { 2464 /* Get timespec struct. */ 2465 error = copyin(uap->timeout, &ts, sizeof(ts)); 2466 if (error) 2467 return (error); 2468 tsp = &ts; 2469 } else 2470 tsp = NULL; 2471 2472 return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops)); 2473 } 2474 2475 static int 2476 kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob, 2477 struct aiocb_ops *ops) 2478 { 2479 2480 if (op != O_SYNC) /* XXX lack of O_DSYNC */ 2481 return (EINVAL); 2482 return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops)); 2483 } 2484 2485 int 2486 sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap) 2487 { 2488 2489 return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops)); 2490 } 2491 2492 /* kqueue attach function */ 2493 static int 2494 filt_aioattach(struct knote *kn) 2495 { 2496 struct kaiocb *job; 2497 2498 job = (struct kaiocb *)(uintptr_t)kn->kn_sdata; 2499 2500 /* 2501 * The job pointer must be validated before using it, so 2502 * registration is restricted to the kernel; the user cannot 2503 * set EV_FLAG1. 2504 */ 2505 if ((kn->kn_flags & EV_FLAG1) == 0) 2506 return (EPERM); 2507 kn->kn_ptr.p_aio = job; 2508 kn->kn_flags &= ~EV_FLAG1; 2509 2510 knlist_add(&job->klist, kn, 0); 2511 2512 return (0); 2513 } 2514 2515 /* kqueue detach function */ 2516 static void 2517 filt_aiodetach(struct knote *kn) 2518 { 2519 struct knlist *knl; 2520 2521 knl = &kn->kn_ptr.p_aio->klist; 2522 knl->kl_lock(knl->kl_lockarg); 2523 if (!knlist_empty(knl)) 2524 knlist_remove(knl, kn, 1); 2525 knl->kl_unlock(knl->kl_lockarg); 2526 } 2527 2528 /* kqueue filter function */ 2529 /*ARGSUSED*/ 2530 static int 2531 filt_aio(struct knote *kn, long hint) 2532 { 2533 struct kaiocb *job = kn->kn_ptr.p_aio; 2534 2535 kn->kn_data = job->uaiocb._aiocb_private.error; 2536 if (!(job->jobflags & KAIOCB_FINISHED)) 2537 return (0); 2538 kn->kn_flags |= EV_EOF; 2539 return (1); 2540 } 2541 2542 /* kqueue attach function */ 2543 static int 2544 filt_lioattach(struct knote *kn) 2545 { 2546 struct aioliojob *lj; 2547 2548 lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata; 2549 2550 /* 2551 * The aioliojob pointer must be validated before using it, so 2552 * registration is restricted to the kernel; the user cannot 2553 * set EV_FLAG1. 2554 */ 2555 if ((kn->kn_flags & EV_FLAG1) == 0) 2556 return (EPERM); 2557 kn->kn_ptr.p_lio = lj; 2558 kn->kn_flags &= ~EV_FLAG1; 2559 2560 knlist_add(&lj->klist, kn, 0); 2561 2562 return (0); 2563 } 2564 2565 /* kqueue detach function */ 2566 static void 2567 filt_liodetach(struct knote *kn) 2568 { 2569 struct knlist *knl; 2570 2571 knl = &kn->kn_ptr.p_lio->klist; 2572 knl->kl_lock(knl->kl_lockarg); 2573 if (!knlist_empty(knl)) 2574 knlist_remove(knl, kn, 1); 2575 knl->kl_unlock(knl->kl_lockarg); 2576 } 2577 2578 /* kqueue filter function */ 2579 /*ARGSUSED*/ 2580 static int 2581 filt_lio(struct knote *kn, long hint) 2582 { 2583 struct aioliojob * lj = kn->kn_ptr.p_lio; 2584 2585 return (lj->lioj_flags & LIOJ_KEVENT_POSTED); 2586 } 2587 2588 #ifdef COMPAT_FREEBSD32 2589 #include <sys/mount.h> 2590 #include <sys/socket.h> 2591 #include <compat/freebsd32/freebsd32.h> 2592 #include <compat/freebsd32/freebsd32_proto.h> 2593 #include <compat/freebsd32/freebsd32_signal.h> 2594 #include <compat/freebsd32/freebsd32_syscall.h> 2595 #include <compat/freebsd32/freebsd32_util.h> 2596 2597 struct __aiocb_private32 { 2598 int32_t status; 2599 int32_t error; 2600 uint32_t kernelinfo; 2601 }; 2602 2603 #ifdef COMPAT_FREEBSD6 2604 typedef struct oaiocb32 { 2605 int aio_fildes; /* File descriptor */ 2606 uint64_t aio_offset __packed; /* File offset for I/O */ 2607 uint32_t aio_buf; /* I/O buffer in process space */ 2608 uint32_t aio_nbytes; /* Number of bytes for I/O */ 2609 struct osigevent32 aio_sigevent; /* Signal to deliver */ 2610 int aio_lio_opcode; /* LIO opcode */ 2611 int aio_reqprio; /* Request priority -- ignored */ 2612 struct __aiocb_private32 _aiocb_private; 2613 } oaiocb32_t; 2614 #endif 2615 2616 typedef struct aiocb32 { 2617 int32_t aio_fildes; /* File descriptor */ 2618 uint64_t aio_offset __packed; /* File offset for I/O */ 2619 uint32_t aio_buf; /* I/O buffer in process space */ 2620 uint32_t aio_nbytes; /* Number of bytes for I/O */ 2621 int __spare__[2]; 2622 uint32_t __spare2__; 2623 int aio_lio_opcode; /* LIO opcode */ 2624 int aio_reqprio; /* Request priority -- ignored */ 2625 struct __aiocb_private32 _aiocb_private; 2626 struct sigevent32 aio_sigevent; /* Signal to deliver */ 2627 } aiocb32_t; 2628 2629 #ifdef COMPAT_FREEBSD6 2630 static int 2631 convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig) 2632 { 2633 2634 /* 2635 * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are 2636 * supported by AIO with the old sigevent structure. 2637 */ 2638 CP(*osig, *nsig, sigev_notify); 2639 switch (nsig->sigev_notify) { 2640 case SIGEV_NONE: 2641 break; 2642 case SIGEV_SIGNAL: 2643 nsig->sigev_signo = osig->__sigev_u.__sigev_signo; 2644 break; 2645 case SIGEV_KEVENT: 2646 nsig->sigev_notify_kqueue = 2647 osig->__sigev_u.__sigev_notify_kqueue; 2648 PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr); 2649 break; 2650 default: 2651 return (EINVAL); 2652 } 2653 return (0); 2654 } 2655 2656 static int 2657 aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob) 2658 { 2659 struct oaiocb32 job32; 2660 int error; 2661 2662 bzero(kjob, sizeof(struct aiocb)); 2663 error = copyin(ujob, &job32, sizeof(job32)); 2664 if (error) 2665 return (error); 2666 2667 CP(job32, *kjob, aio_fildes); 2668 CP(job32, *kjob, aio_offset); 2669 PTRIN_CP(job32, *kjob, aio_buf); 2670 CP(job32, *kjob, aio_nbytes); 2671 CP(job32, *kjob, aio_lio_opcode); 2672 CP(job32, *kjob, aio_reqprio); 2673 CP(job32, *kjob, _aiocb_private.status); 2674 CP(job32, *kjob, _aiocb_private.error); 2675 PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo); 2676 return (convert_old_sigevent32(&job32.aio_sigevent, 2677 &kjob->aio_sigevent)); 2678 } 2679 #endif 2680 2681 static int 2682 aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob) 2683 { 2684 struct aiocb32 job32; 2685 int error; 2686 2687 error = copyin(ujob, &job32, sizeof(job32)); 2688 if (error) 2689 return (error); 2690 CP(job32, *kjob, aio_fildes); 2691 CP(job32, *kjob, aio_offset); 2692 PTRIN_CP(job32, *kjob, aio_buf); 2693 CP(job32, *kjob, aio_nbytes); 2694 CP(job32, *kjob, aio_lio_opcode); 2695 CP(job32, *kjob, aio_reqprio); 2696 CP(job32, *kjob, _aiocb_private.status); 2697 CP(job32, *kjob, _aiocb_private.error); 2698 PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo); 2699 return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent)); 2700 } 2701 2702 static long 2703 aiocb32_fetch_status(struct aiocb *ujob) 2704 { 2705 struct aiocb32 *ujob32; 2706 2707 ujob32 = (struct aiocb32 *)ujob; 2708 return (fuword32(&ujob32->_aiocb_private.status)); 2709 } 2710 2711 static long 2712 aiocb32_fetch_error(struct aiocb *ujob) 2713 { 2714 struct aiocb32 *ujob32; 2715 2716 ujob32 = (struct aiocb32 *)ujob; 2717 return (fuword32(&ujob32->_aiocb_private.error)); 2718 } 2719 2720 static int 2721 aiocb32_store_status(struct aiocb *ujob, long status) 2722 { 2723 struct aiocb32 *ujob32; 2724 2725 ujob32 = (struct aiocb32 *)ujob; 2726 return (suword32(&ujob32->_aiocb_private.status, status)); 2727 } 2728 2729 static int 2730 aiocb32_store_error(struct aiocb *ujob, long error) 2731 { 2732 struct aiocb32 *ujob32; 2733 2734 ujob32 = (struct aiocb32 *)ujob; 2735 return (suword32(&ujob32->_aiocb_private.error, error)); 2736 } 2737 2738 static int 2739 aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref) 2740 { 2741 struct aiocb32 *ujob32; 2742 2743 ujob32 = (struct aiocb32 *)ujob; 2744 return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref)); 2745 } 2746 2747 static int 2748 aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) 2749 { 2750 2751 return (suword32(ujobp, (long)ujob)); 2752 } 2753 2754 static struct aiocb_ops aiocb32_ops = { 2755 .copyin = aiocb32_copyin, 2756 .fetch_status = aiocb32_fetch_status, 2757 .fetch_error = aiocb32_fetch_error, 2758 .store_status = aiocb32_store_status, 2759 .store_error = aiocb32_store_error, 2760 .store_kernelinfo = aiocb32_store_kernelinfo, 2761 .store_aiocb = aiocb32_store_aiocb, 2762 }; 2763 2764 #ifdef COMPAT_FREEBSD6 2765 static struct aiocb_ops aiocb32_ops_osigevent = { 2766 .copyin = aiocb32_copyin_old_sigevent, 2767 .fetch_status = aiocb32_fetch_status, 2768 .fetch_error = aiocb32_fetch_error, 2769 .store_status = aiocb32_store_status, 2770 .store_error = aiocb32_store_error, 2771 .store_kernelinfo = aiocb32_store_kernelinfo, 2772 .store_aiocb = aiocb32_store_aiocb, 2773 }; 2774 #endif 2775 2776 int 2777 freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap) 2778 { 2779 2780 return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); 2781 } 2782 2783 int 2784 freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap) 2785 { 2786 struct timespec32 ts32; 2787 struct timespec ts, *tsp; 2788 struct aiocb **ujoblist; 2789 uint32_t *ujoblist32; 2790 int error, i; 2791 2792 if (uap->nent < 0 || uap->nent > AIO_LISTIO_MAX) 2793 return (EINVAL); 2794 2795 if (uap->timeout) { 2796 /* Get timespec struct. */ 2797 if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0) 2798 return (error); 2799 CP(ts32, ts, tv_sec); 2800 CP(ts32, ts, tv_nsec); 2801 tsp = &ts; 2802 } else 2803 tsp = NULL; 2804 2805 ujoblist = uma_zalloc(aiol_zone, M_WAITOK); 2806 ujoblist32 = (uint32_t *)ujoblist; 2807 error = copyin(uap->aiocbp, ujoblist32, uap->nent * 2808 sizeof(ujoblist32[0])); 2809 if (error == 0) { 2810 for (i = uap->nent; i > 0; i--) 2811 ujoblist[i] = PTRIN(ujoblist32[i]); 2812 2813 error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); 2814 } 2815 uma_zfree(aiol_zone, ujoblist); 2816 return (error); 2817 } 2818 2819 int 2820 freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap) 2821 { 2822 2823 return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); 2824 } 2825 2826 #ifdef COMPAT_FREEBSD6 2827 int 2828 freebsd6_freebsd32_aio_read(struct thread *td, 2829 struct freebsd6_freebsd32_aio_read_args *uap) 2830 { 2831 2832 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2833 &aiocb32_ops_osigevent)); 2834 } 2835 #endif 2836 2837 int 2838 freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap) 2839 { 2840 2841 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, 2842 &aiocb32_ops)); 2843 } 2844 2845 #ifdef COMPAT_FREEBSD6 2846 int 2847 freebsd6_freebsd32_aio_write(struct thread *td, 2848 struct freebsd6_freebsd32_aio_write_args *uap) 2849 { 2850 2851 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 2852 &aiocb32_ops_osigevent)); 2853 } 2854 #endif 2855 2856 int 2857 freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap) 2858 { 2859 2860 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, 2861 &aiocb32_ops)); 2862 } 2863 2864 int 2865 freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap) 2866 { 2867 2868 return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK, 2869 &aiocb32_ops)); 2870 } 2871 2872 int 2873 freebsd32_aio_waitcomplete(struct thread *td, 2874 struct freebsd32_aio_waitcomplete_args *uap) 2875 { 2876 struct timespec32 ts32; 2877 struct timespec ts, *tsp; 2878 int error; 2879 2880 if (uap->timeout) { 2881 /* Get timespec struct. */ 2882 error = copyin(uap->timeout, &ts32, sizeof(ts32)); 2883 if (error) 2884 return (error); 2885 CP(ts32, ts, tv_sec); 2886 CP(ts32, ts, tv_nsec); 2887 tsp = &ts; 2888 } else 2889 tsp = NULL; 2890 2891 return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp, 2892 &aiocb32_ops)); 2893 } 2894 2895 int 2896 freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap) 2897 { 2898 2899 return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp, 2900 &aiocb32_ops)); 2901 } 2902 2903 #ifdef COMPAT_FREEBSD6 2904 int 2905 freebsd6_freebsd32_lio_listio(struct thread *td, 2906 struct freebsd6_freebsd32_lio_listio_args *uap) 2907 { 2908 struct aiocb **acb_list; 2909 struct sigevent *sigp, sig; 2910 struct osigevent32 osig; 2911 uint32_t *acb_list32; 2912 int error, i, nent; 2913 2914 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2915 return (EINVAL); 2916 2917 nent = uap->nent; 2918 if (nent < 0 || nent > AIO_LISTIO_MAX) 2919 return (EINVAL); 2920 2921 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2922 error = copyin(uap->sig, &osig, sizeof(osig)); 2923 if (error) 2924 return (error); 2925 error = convert_old_sigevent32(&osig, &sig); 2926 if (error) 2927 return (error); 2928 sigp = &sig; 2929 } else 2930 sigp = NULL; 2931 2932 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); 2933 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); 2934 if (error) { 2935 free(acb_list32, M_LIO); 2936 return (error); 2937 } 2938 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2939 for (i = 0; i < nent; i++) 2940 acb_list[i] = PTRIN(acb_list32[i]); 2941 free(acb_list32, M_LIO); 2942 2943 error = kern_lio_listio(td, uap->mode, 2944 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 2945 &aiocb32_ops_osigevent); 2946 free(acb_list, M_LIO); 2947 return (error); 2948 } 2949 #endif 2950 2951 int 2952 freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap) 2953 { 2954 struct aiocb **acb_list; 2955 struct sigevent *sigp, sig; 2956 struct sigevent32 sig32; 2957 uint32_t *acb_list32; 2958 int error, i, nent; 2959 2960 if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) 2961 return (EINVAL); 2962 2963 nent = uap->nent; 2964 if (nent < 0 || nent > AIO_LISTIO_MAX) 2965 return (EINVAL); 2966 2967 if (uap->sig && (uap->mode == LIO_NOWAIT)) { 2968 error = copyin(uap->sig, &sig32, sizeof(sig32)); 2969 if (error) 2970 return (error); 2971 error = convert_sigevent32(&sig32, &sig); 2972 if (error) 2973 return (error); 2974 sigp = &sig; 2975 } else 2976 sigp = NULL; 2977 2978 acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); 2979 error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); 2980 if (error) { 2981 free(acb_list32, M_LIO); 2982 return (error); 2983 } 2984 acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); 2985 for (i = 0; i < nent; i++) 2986 acb_list[i] = PTRIN(acb_list32[i]); 2987 free(acb_list32, M_LIO); 2988 2989 error = kern_lio_listio(td, uap->mode, 2990 (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, 2991 &aiocb32_ops); 2992 free(acb_list, M_LIO); 2993 return (error); 2994 } 2995 2996 #endif 2997