Lines Matching refs:info

139 static int  kcov_alloc(struct kcov_info *info, size_t entries);
140 static void kcov_free(struct kcov_info *info);
166 struct kcov_info *info; in get_kinfo() local
182 info = td->td_kcov_info; in get_kinfo()
183 if (info == NULL || in get_kinfo()
184 atomic_load_acq_int(&info->state) != KCOV_STATE_RUNNING) in get_kinfo()
187 return (info); in get_kinfo()
194 struct kcov_info *info; in trace_pc() local
198 info = get_kinfo(td); in trace_pc()
199 if (info == NULL) in trace_pc()
205 if (info->mode != KCOV_MODE_TRACE_PC) in trace_pc()
208 KASSERT(info->kvaddr != 0, ("%s: NULL buf while running", __func__)); in trace_pc()
210 buf = (uint64_t *)info->kvaddr; in trace_pc()
214 if (index + 2 > info->entries) in trace_pc()
225 struct kcov_info *info; in trace_cmp() local
229 info = get_kinfo(td); in trace_cmp()
230 if (info == NULL) in trace_cmp()
236 if (info->mode != KCOV_MODE_TRACE_CMP) in trace_cmp()
239 KASSERT(info->kvaddr != 0, ("%s: NULL buf while running", __func__)); in trace_cmp()
241 buf = (uint64_t *)info->kvaddr; in trace_cmp()
247 if (index * 4 + 4 + 1 > info->entries) in trace_cmp()
270 struct kcov_info *info = arg; in kcov_mmap_cleanup() local
282 atomic_store_int(&info->state, KCOV_STATE_DYING); in kcov_mmap_cleanup()
284 thread = info->thread; in kcov_mmap_cleanup()
297 kcov_free(info); in kcov_mmap_cleanup()
303 struct kcov_info *info; in kcov_open() local
306 info = malloc(sizeof(struct kcov_info), M_KCOV_INFO, M_ZERO | M_WAITOK); in kcov_open()
307 info->state = KCOV_STATE_OPEN; in kcov_open()
308 info->thread = NULL; in kcov_open()
309 info->mode = -1; in kcov_open()
311 if ((error = devfs_set_cdevpriv(info, kcov_mmap_cleanup)) != 0) in kcov_open()
312 kcov_mmap_cleanup(info); in kcov_open()
320 struct kcov_info *info; in kcov_close() local
323 if ((error = devfs_get_cdevpriv((void **)&info)) != 0) in kcov_close()
326 KASSERT(info != NULL, ("kcov_close with no kcov_info structure")); in kcov_close()
329 if (info->state == KCOV_STATE_RUNNING) in kcov_close()
339 struct kcov_info *info; in kcov_mmap_single() local
346 if ((error = devfs_get_cdevpriv((void **)&info)) != 0) in kcov_mmap_single()
349 if (info->kvaddr == 0 || size / KCOV_ELEMENT_SIZE != info->entries) in kcov_mmap_single()
352 vm_object_reference(info->bufobj); in kcov_mmap_single()
354 *object = info->bufobj; in kcov_mmap_single()
359 kcov_alloc(struct kcov_info *info, size_t entries) in kcov_alloc() argument
364 KASSERT(info->kvaddr == 0, ("kcov_alloc: Already have a buffer")); in kcov_alloc()
365 KASSERT(info->state == KCOV_STATE_OPEN, in kcov_alloc()
366 ("kcov_alloc: Not in open state (%x)", info->state)); in kcov_alloc()
372 info->bufsize = roundup2(entries * KCOV_ELEMENT_SIZE, PAGE_SIZE); in kcov_alloc()
373 pages = info->bufsize / PAGE_SIZE; in kcov_alloc()
375 if ((info->kvaddr = kva_alloc(info->bufsize)) == 0) in kcov_alloc()
378 info->bufobj = vm_pager_allocate(OBJT_PHYS, 0, info->bufsize, in kcov_alloc()
381 VM_OBJECT_WLOCK(info->bufobj); in kcov_alloc()
383 m = vm_page_grab(info->bufobj, n, in kcov_alloc()
387 pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1); in kcov_alloc()
389 VM_OBJECT_WUNLOCK(info->bufobj); in kcov_alloc()
391 info->entries = entries; in kcov_alloc()
397 kcov_free(struct kcov_info *info) in kcov_free() argument
402 if (info->kvaddr != 0) { in kcov_free()
403 pmap_qremove(info->kvaddr, info->bufsize / PAGE_SIZE); in kcov_free()
404 kva_free(info->kvaddr, info->bufsize); in kcov_free()
406 if (info->bufobj != NULL) { in kcov_free()
407 VM_OBJECT_WLOCK(info->bufobj); in kcov_free()
408 m = vm_page_lookup(info->bufobj, 0); in kcov_free()
409 for (i = 0; i < info->bufsize / PAGE_SIZE; i++) { in kcov_free()
413 VM_OBJECT_WUNLOCK(info->bufobj); in kcov_free()
414 vm_object_deallocate(info->bufobj); in kcov_free()
416 free(info, M_KCOV_INFO); in kcov_free()
423 struct kcov_info *info; in kcov_ioctl() local
426 if ((error = devfs_get_cdevpriv((void **)&info)) != 0) in kcov_ioctl()
434 if (info->state != KCOV_STATE_OPEN) { in kcov_ioctl()
437 error = kcov_alloc(info, *(u_int *)data); in kcov_ioctl()
439 info->state = KCOV_STATE_READY; in kcov_ioctl()
446 if (info->state != KCOV_STATE_READY) { in kcov_ioctl()
469 KASSERT(info->thread == NULL, in kcov_ioctl()
471 info->thread = td; in kcov_ioctl()
472 info->mode = mode; in kcov_ioctl()
477 atomic_store_rel_int(&info->state, KCOV_STATE_RUNNING); in kcov_ioctl()
478 td->td_kcov_info = info; in kcov_ioctl()
482 if (info->state != KCOV_STATE_RUNNING || in kcov_ioctl()
483 info != td->td_kcov_info) { in kcov_ioctl()
495 atomic_store_int(&info->state, KCOV_STATE_READY); in kcov_ioctl()
501 info->mode = -1; in kcov_ioctl()
502 info->thread = NULL; in kcov_ioctl()
516 struct kcov_info *info; in kcov_thread_dtor() local
518 info = td->td_kcov_info; in kcov_thread_dtor()
519 if (info == NULL) in kcov_thread_dtor()
530 if (info->state != KCOV_STATE_DYING) { in kcov_thread_dtor()
535 atomic_store_int(&info->state, KCOV_STATE_READY); in kcov_thread_dtor()
538 info->thread = NULL; in kcov_thread_dtor()
552 kcov_free(info); in kcov_thread_dtor()