/dragonfly/sys/dev/netif/oce/ |
H A D | oce_queue.c | 139 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq); in oce_queue_init_all() 183 if (wq) { in oce_queue_release_all() 223 if (!wq) in oce_wq_init() 230 wq->cfg.nbufs = 2 * wq->cfg.q_len; in oce_wq_init() 231 wq->cfg.nhdl = 2 * wq->cfg.q_len; in oce_wq_init() 248 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map); in oce_wq_init() 267 return wq; in oce_wq_init() 297 bus_dmamap_unload(wq->tag, wq->pckts[i].map); in oce_wq_free() 298 bus_dmamap_destroy(wq->tag, wq->pckts[i].map); in oce_wq_free() 344 wq->wq_free = wq->cfg.q_len; in oce_wq_create() [all …]
|
H A D | oce_if.c | 521 wq = sc->wq[queue_index]; 523 LOCK(&wq->tx_lock); 816 struct oce_wq *wq = sc->wq[wq_index]; in oce_tx() local 855 pd = &wq->pckts[wq->pkt_desc_head]; in oce_tx() 946 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map, in oce_tx() 983 pd = &wq->pckts[wq->pkt_desc_tail]; in oce_tx_complete() 995 if (wq->ring->num_used < (wq->ring->num_items / 2)) { in oce_tx_complete() 1177 if (wq->ring->cidx >= wq->ring->num_items) in oce_wq_handler() 1178 wq->ring->cidx -= wq->ring->num_items; in oce_wq_handler() 1207 br = wq->br; [all …]
|
H A D | oce_sysctl.c | 728 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_pkts, 0, in oce_add_stats_sysctls_be3() 731 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_bytes, 0, in oce_add_stats_sysctls_be3() 734 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_reqs, 0, in oce_add_stats_sysctls_be3() 737 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_stops, 0, in oce_add_stats_sysctls_be3() 740 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_wrbs, 0, in oce_add_stats_sysctls_be3() 743 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_compl, 0, in oce_add_stats_sysctls_be3() 747 &sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop, 0, in oce_add_stats_sysctls_be3() 931 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_pkts, 0, in oce_add_stats_sysctls_xe201() 937 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_reqs, 0, in oce_add_stats_sysctls_xe201() 943 CTLFLAG_RD, &sc->wq[i]->tx_stats.tx_wrbs, 0, in oce_add_stats_sysctls_xe201() [all …]
|
H A D | oce_if.h | 209 #define for_all_wq_queues(sc, wq, i) \ argument 210 for (i = 0, wq = sc->wq[0]; i < sc->nwqs; i++, wq = sc->wq[i]) 849 struct oce_wq *wq[OCE_MAX_WQ]; /* TX work queues */ member 973 int oce_start_wq(struct oce_wq *wq); 983 void oce_drain_wq_cq(struct oce_wq *wq); 1053 int oce_mbox_create_wq(struct oce_wq *wq);
|
H A D | oce_mbox.c | 1665 oce_mbox_create_wq(struct oce_wq *wq) in oce_mbox_create_wq() argument 1669 POCE_SOFTC sc = wq->parent; in oce_mbox_create_wq() 1690 num_pages = oce_page_list(wq->ring, &fwcmd->params.req.pages[0]); in oce_mbox_create_wq() 1692 fwcmd->params.req.nic_wq_type = wq->cfg.wq_type; in oce_mbox_create_wq() 1694 fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1; in oce_mbox_create_wq() 1695 fwcmd->params.req.cq_id = wq->cq->cq_id; in oce_mbox_create_wq() 1709 wq->wq_id = LE_16(fwcmd->params.rsp.wq_id); in oce_mbox_create_wq() 1711 wq->db_offset = LE_32(fwcmd->params.rsp.db_offset); in oce_mbox_create_wq() 1713 wq->db_offset = PD_TXULP_DB; in oce_mbox_create_wq()
|
/dragonfly/sys/dev/drm/ |
H A D | linux_workqueue.c | 126 if (wq->is_draining) in queue_work() 129 if (wq->num_workers > 1) in queue_work() 202 wq = kmalloc(sizeof(*wq), M_DRM, M_WAITOK | M_ZERO); in _create_workqueue_common() 210 wq->num_workers = 1; in _create_workqueue_common() 214 wq->workers = kmalloc(sizeof(struct workqueue_worker) * wq->num_workers, in _create_workqueue_common() 233 kfree(wq); in _create_workqueue_common() 240 return wq; in _create_workqueue_common() 246 drain_workqueue(wq); in destroy_workqueue() 250 kfree(wq->wq_threads); in destroy_workqueue() 251 kfree(wq); in destroy_workqueue() [all …]
|
H A D | linux_wait.c | 65 __wait_event_prefix(wait_queue_head_t *wq, int flags) in __wait_event_prefix() argument 67 lockmgr(&wq->lock, LK_EXCLUSIVE); in __wait_event_prefix() 73 lockmgr(&wq->lock, LK_RELEASE); in __wait_event_prefix()
|
/dragonfly/sys/dev/drm/include/linux/ |
H A D | wait.h | 92 void __wait_event_prefix(wait_queue_head_t *wq, int flags); 122 prepare_to_wait(&wq, &tmp_wq, state); \ 125 __wait_event_prefix(&wq, flags); \ 167 finish_wait(&wq, &tmp_wq); \ 171 #define wait_event(wq, condition) \ argument 172 __wait_event_common(wq, condition, 0, 0, false) 174 #define wait_event_timeout(wq, condition, timeout) \ argument 175 __wait_event_common(wq, condition, timeout, 0, false) 177 #define wait_event_interruptible(wq, condition) \ argument 246 __remove_wait_queue(head, wq); in remove_wait_queue() [all …]
|
H A D | workqueue.h | 114 int queue_work(struct workqueue_struct *wq, struct work_struct *work); 115 int queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, 136 mod_delayed_work(struct workqueue_struct *wq, in mod_delayed_work() argument 140 queue_delayed_work(wq, dwork, delay); in mod_delayed_work() 144 void drain_workqueue(struct workqueue_struct *wq); 145 void flush_workqueue(struct workqueue_struct *wq); 161 void destroy_workqueue(struct workqueue_struct *wq);
|
/dragonfly/sys/dev/drm/i915/ |
H A D | i915_sw_fence.c | 236 list_del(&wq->entry); in i915_sw_fence_wake() 240 kfree(wq); in i915_sw_fence_wake() 247 wait_queue_entry_t *wq; in __i915_sw_fence_check_if_after() local 268 wait_queue_entry_t *wq; in __i915_sw_fence_clear_checked_bit() local 317 if (!wq) { in __i915_sw_fence_await_sw_fence() 318 wq = kmalloc(sizeof(*wq), M_DRM, gfp); in __i915_sw_fence_await_sw_fence() 319 if (!wq) { in __i915_sw_fence_await_sw_fence() 330 INIT_LIST_HEAD(&wq->entry); in __i915_sw_fence_await_sw_fence() 331 wq->flags = pending; in __i915_sw_fence_await_sw_fence() 333 wq->private = fence; in __i915_sw_fence_await_sw_fence() [all …]
|
H A D | i915_gem_userptr.c | 53 struct workqueue_struct *wq; member 146 queue_work(mn->wq, &mo->work); in i915_gem_userptr_mn_invalidate_range_start() 156 flush_workqueue(mn->wq); in i915_gem_userptr_mn_invalidate_range_start() 175 mn->wq = alloc_workqueue("i915-userptr-release", in i915_mmu_notifier_create() 178 if (mn->wq == NULL) { in i915_mmu_notifier_create() 237 destroy_workqueue(mn->wq); in i915_mmu_notifier_find() 283 destroy_workqueue(mn->wq); in i915_mmu_notifier_free()
|
H A D | i915_sw_fence.h | 68 wait_queue_entry_t *wq);
|
H A D | intel_sprite.c | 96 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); in intel_pipe_update_start() local 128 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); in intel_pipe_update_start() 147 finish_wait(wq, &wait); in intel_pipe_update_start()
|
H A D | i915_gem_request.c | 262 queue_delayed_work(i915->wq, in mark_busy() 296 mod_delayed_work(i915->wq, in unreserve_engine()
|
H A D | i915_drv.c | 858 dev_priv->wq = alloc_ordered_workqueue("i915", 0); in i915_workqueues_init() 859 if (dev_priv->wq == NULL) in i915_workqueues_init() 869 destroy_workqueue(dev_priv->wq); in i915_workqueues_init() 888 destroy_workqueue(dev_priv->wq); in i915_workqueues_cleanup()
|
/dragonfly/sys/dev/drm/amd/amdgpu/ |
H A D | amdgpu_sa.c | 56 init_waitqueue_head(&sa_manager->wq); in amdgpu_sa_bo_manager_init() 297 lockmgr(&sa_manager->wq.lock, LK_EXCLUSIVE); in amdgpu_sa_bo_new() 309 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in amdgpu_sa_bo_new() 321 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in amdgpu_sa_bo_new() 333 sa_manager->wq, in amdgpu_sa_bo_new() 340 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in amdgpu_sa_bo_new() 356 lockmgr(&sa_manager->wq.lock, LK_EXCLUSIVE); in amdgpu_sa_bo_free() 366 wake_up_all_locked(&sa_manager->wq); in amdgpu_sa_bo_free() 367 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in amdgpu_sa_bo_free() 378 lockmgr(&sa_manager->wq.lock, LK_EXCLUSIVE); in amdgpu_sa_bo_dump_debug_info() [all …]
|
/dragonfly/sys/dev/drm/radeon/ |
H A D | radeon_sa.c | 56 init_waitqueue_head(&sa_manager->wq); in radeon_sa_bo_manager_init() 333 lockmgr(&sa_manager->wq.lock, LK_EXCLUSIVE); in radeon_sa_bo_new() 345 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in radeon_sa_bo_new() 355 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in radeon_sa_bo_new() 363 sa_manager->wq, in radeon_sa_bo_new() 370 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in radeon_sa_bo_new() 386 lockmgr(&sa_manager->wq.lock, LK_EXCLUSIVE); in radeon_sa_bo_free() 394 wake_up_all_locked(&sa_manager->wq); in radeon_sa_bo_free() 395 lockmgr(&sa_manager->wq.lock, LK_RELEASE); in radeon_sa_bo_free() 405 lockmgr(&sa_manager->wq.lock, LK_EXCLUSIVE); in radeon_sa_bo_dump_debug_info() [all …]
|
/dragonfly/sys/dev/virtual/amazon/ena/ena-com/ |
H A D | ena_plat.h | 225 #define ena_wait_event_t struct { struct cv wq; struct lock lock; } 228 cv_init(&((waitqueue).wq), "cv"); \ 233 cv_destroy(&((waitqueue).wq)); \ 237 cv_init(&((waitqueue).wq), (waitqueue).wq.cv_desc) 241 cv_timedwait(&((waitqueue).wq), &((waitqueue).lock), \ 245 #define ENA_WAIT_EVENT_SIGNAL(waitqueue) cv_broadcast(&((waitqueue).wq))
|
/dragonfly/bin/ed/ |
H A D | POSIX | 24 iii) `wq' for exiting after a write, 68 wq 73 wq
|
/dragonfly/sys/dev/drm/include/drm/ |
H A D | drm_debugfs_crc.h | 59 wait_queue_head_t wq; member
|
H A D | drm_flip_work.h | 87 struct workqueue_struct *wq);
|
/dragonfly/test/stress/stress2/misc/ |
H A D | cleanup.sh | 39 mount | grep -wq $mntpoint && umount $mntpoint
|
/dragonfly/sys/dev/drm/ttm/ |
H A D | ttm_bo.c | 441 schedule_delayed_work(&bdev->wq, in ttm_bo_cleanup_refs_or_queue() 575 container_of(work, struct ttm_bo_device, wq.work); in ttm_bo_delayed_workqueue() 578 schedule_delayed_work(&bdev->wq, in ttm_bo_delayed_workqueue() 614 return cancel_delayed_work_sync(&bdev->wq); in ttm_bo_lock_delayed_workqueue() 621 schedule_delayed_work(&bdev->wq, in ttm_bo_unlock_delayed_workqueue() 1527 cancel_delayed_work_sync(&bdev->wq); in ttm_bo_device_release() 1569 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); in ttm_bo_device_init()
|
/dragonfly/contrib/tcsh-6/ |
H A D | tw.parse.c | 599 Char w, wq; in insert_meta() local 615 wq = w & QUOTE; in insert_meta() 623 wq = QUOTE; /* quotes are always quoted */ in insert_meta() 625 if (!wq && qu && tricky(w) && !(qu == '\"' && tricky_dq(w))) { in insert_meta() 652 else if (wq && qu == '\"' && tricky_dq(w)) { in insert_meta() 658 } else if (wq && in insert_meta()
|
/dragonfly/sys/dev/drm/include/drm/ttm/ |
H A D | ttm_bo_driver.h | 486 struct delayed_work wq; member
|