Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 25 of 255) sorted by relevance

1234567891011

/dragonfly/sys/kern/
H A Dsubr_taskqueue.c120 queue = kmalloc(sizeof(*queue), M_TASKQUEUE, mflags | M_ZERO); in taskqueue_create()
121 if (!queue) in taskqueue_create()
134 return queue; in taskqueue_create()
153 TQ_LOCK(queue); in taskqueue_free()
156 taskqueue_terminate(queue->tq_threads, queue); in taskqueue_free()
177 return queue; in taskqueue_find()
234 queue->tq_enqueue(queue->tq_context); in taskqueue_enqueue_locked()
251 TQ_LOCK(queue); in taskqueue_enqueue()
316 *qpp = queue; in taskqueue_enqueue_optq()
332 TQ_LOCK(queue); in taskqueue_timeout_func()
[all …]
H A Dsubr_gtaskqueue.c116 if (!queue) { in _gtaskqueue_create()
129 lockinit(&queue->tq_lock, queue->tq_name, 0, 0); in _gtaskqueue_create()
131 return (queue); in _gtaskqueue_create()
151 TQ_LOCK(queue); in gtaskqueue_free()
153 gtaskqueue_terminate(queue->tq_threads, queue); in gtaskqueue_free()
177 TQ_LOCK(queue); in grouptask_block()
195 TQ_LOCK(queue); in grouptask_unblock()
209 TQ_LOCK(queue); in grouptaskqueue_enqueue()
222 queue->tq_enqueue(queue->tq_context); in grouptaskqueue_enqueue()
301 TQ_LOCK(queue); in gtaskqueue_block()
[all …]
/dragonfly/share/man/man3/
H A DMakefile16 MLINKS+=queue.3 LIST_EMPTY.3
17 MLINKS+=queue.3 LIST_ENTRY.3
18 MLINKS+=queue.3 LIST_FIRST.3
19 MLINKS+=queue.3 LIST_FOREACH.3
23 MLINKS+=queue.3 LIST_HEAD.3
25 MLINKS+=queue.3 LIST_INIT.3
29 MLINKS+=queue.3 LIST_NEXT.3
30 MLINKS+=queue.3 LIST_REMOVE.3
39 MLINKS+=queue.3 SLIST_HEAD.3
41 MLINKS+=queue.3 SLIST_INIT.3
[all …]
/dragonfly/sys/dev/disk/nvme/
H A Dnvme.c136 queue->sc = sc; in nvme_alloc_subqueue()
197 queue->reqary = kmalloc(sizeof(nvme_request_t) * queue->nqe, in nvme_alloc_subqueue()
243 queue->sc = sc; in nvme_alloc_comqueue()
298 bzero(queue, sizeof(*queue)); in nvme_free_subqueue()
309 queue->nqe = 0; in nvme_free_comqueue()
316 bzero(queue, sizeof(*queue)); in nvme_free_comqueue()
446 if ((queue->subq_tail + queue->unsubmitted + 1) % queue->nqe == in nvme_get_request()
520 cmd = &queue->ksubq[queue->subq_tail]; in nvme_submit_request()
522 if (++queue->subq_tail == queue->nqe) in nvme_submit_request()
524 KKASSERT(queue->subq_tail != queue->subq_head); in nvme_submit_request()
[all …]
/dragonfly/sys/bus/cam/
H A Dcam_queue.c92 if (queue != NULL) { in camq_free()
93 camq_fini(queue); in camq_free()
149 if (queue->entries >= queue->array_size) in camq_insert()
152 queue->entries++; in camq_insert()
153 queue->queue_array[queue->entries] = new_entry; in camq_insert()
156 heap_up(queue->queue_array, queue->entries); in camq_insert()
178 queue->queue_array[index] = queue->queue_array[queue->entries]; in camq_remove()
180 heap_down(queue->queue_array, index, queue->entries - 1); in camq_remove()
182 queue->queue_array[queue->entries] = NULL; in camq_remove()
184 queue->entries--; in camq_remove()
[all …]
H A Dcam_queue.h59 struct camq queue; member
113 u_int32_t camq_resize(struct camq *queue, int new_size);
125 void camq_free(struct camq *queue);
130 void camq_fini(struct camq *queue);
136 void camq_insert(struct camq *queue, cam_pinfo *new_entry);
142 cam_pinfo *camq_remove(struct camq *queue, int index);
152 void camq_change_priority(struct camq *queue, int index,
183 return (ccbq->queue.entries); in cam_ccbq_pending_ccb_count()
197 camq_insert(&ccbq->queue, &new_ccb->ccb_h.pinfo); in cam_ccbq_insert_ccb()
203 camq_remove(&ccbq->queue, ccb->ccb_h.pinfo.index); in cam_ccbq_remove_ccb()
[all …]
/dragonfly/libexec/dma/
H A Ddma.c200 LIST_FOREACH(tit, &queue->queue, next) { in add_recp()
246 go_background(struct queue *queue) in go_background() argument
262 LIST_FOREACH(it, &queue->queue, next) { in go_background()
378 run_queue(struct queue *queue) in run_queue() argument
382 if (LIST_EMPTY(&queue->queue)) in run_queue()
391 show_queue(struct queue *queue) in show_queue() argument
396 if (LIST_EMPTY(&queue->queue)) { in show_queue()
428 struct queue queue; in main() local
461 bzero(&queue, sizeof(queue)); in main()
462 LIST_INIT(&queue.queue); in main()
[all …]
H A Dspool.c75 newspoolf(struct queue *queue) in newspoolf() argument
157 readqueuef(struct queue *queue, char *queuefn) in readqueuef() argument
219 LIST_INSERT_HEAD(&queue->queue, it, next); in readqueuef()
235 linkspool(struct queue *queue) in linkspool() argument
246 LIST_FOREACH(it, &queue->queue, next) { in linkspool()
265 LIST_FOREACH(it, &queue->queue, next) { in linkspool()
274 LIST_FOREACH(it, &queue->queue, next) { in linkspool()
282 load_queue(struct queue *queue) in load_queue() argument
291 bzero(queue, sizeof(*queue)); in load_queue()
292 LIST_INIT(&queue->queue); in load_queue()
[all …]
H A Dmail.c51 struct queue bounceq; in bounce()
63 LIST_INIT(&bounceq.queue); in bounce()
167 parse_addrs(struct parse_state *ps, char *s, struct queue *queue) in parse_addrs() argument
349 writeline(struct queue *queue, const char *line, ssize_t linelen) in writeline() argument
365 if (fwrite("\n", 1, 1, queue->mailf) != 1) in writeline()
375 readmail(struct queue *queue, int nodot, int recp_from_header) in readmail() argument
393 error = fprintf(queue->mailf, in readmail()
400 queue->sender, in readmail()
401 queue->id, in readmail()
424 username, useruid, queue->sender); in readmail()
[all …]
H A Ddma.h122 struct queue { struct
123 struct queueh queue; member
214 int add_recp(struct queue *, const char *, int);
215 void run_queue(struct queue *);
218 int newspoolf(struct queue *);
219 int linkspool(struct queue *);
220 int load_queue(struct queue *);
223 void dropspool(struct queue *, struct qitem *);
232 int readmail(struct queue *, int, int);
/dragonfly/sys/dev/drm/include/drm/
H A Dspsc_queue.h50 queue->head = NULL; in spsc_queue_init()
51 atomic_long_set(&queue->tail, (long)&queue->head); in spsc_queue_init()
52 atomic_set(&queue->job_count, 0); in spsc_queue_init()
57 return queue->head; in spsc_queue_peek()
62 return atomic_read(&queue->job_count); in spsc_queue_count()
75 atomic_inc(&queue->job_count); in spsc_queue_push()
85 return tail == &queue->head; in spsc_queue_push()
96 node = READ_ONCE(queue->head); in spsc_queue_pop()
102 WRITE_ONCE(queue->head, next); in spsc_queue_pop()
107 if (atomic_long_cmpxchg(&queue->tail, in spsc_queue_pop()
[all …]
/dragonfly/share/examples/pf/
H A Dfaq-example317 # std_ext - the standard queue. also the default queue for
25 queue std_ext bandwidth 500Kb cbq(default borrow)
43 # std_int - the standard queue. also the default queue for outgoing
50 queue std_int bandwidth 250Kb cbq(default borrow)
51 queue it_int bandwidth 500Kb cbq(borrow)
53 queue www_int bandwidth 99Mb cbq(red borrow)
70 queue internal_dmz bandwidth 99Mb cbq(borrow)
93 flags S/SA keep state queue www_ext_http
101 pass in on dc0 from $it_net to any queue it_int
102 pass in on dc0 from $boss to any queue boss_int
[all …]
H A Dfaq-example213 # ACK queue.
21 # to this queue.
26 queue std_out priq(default)
27 queue ssh_im_out priority 4 priq(red)
28 queue dns_out priority 5
29 queue tcp_ack_out priority 6
40 # to this queue.
70 keep state queue dns_out
84 queue dns_in
86 queue(std_in, ssh_im_in)
[all …]
H A Dqueue23 # advanced queue example.
12 queue std bandwidth 10% cbq(default)
14 queue developers bandwidth 75% cbq(borrow)
15 queue employees bandwidth 15%
18 queue ssh_interactive bandwidth 25% priority 7
19 queue ssh_bulk bandwidth 75% priority 0
21 block return out on $ext_if inet all queue std
23 keep state queue developers
25 keep state queue employees
27 keep state queue(ssh_bulk, ssh_interactive)
[all …]
H A Dqueue17 queue { deflt, http, ssh, mail, rsets }
8 queue deflt bandwidth 10% priority 0 cbq(default ecn)
9 queue http bandwidth 1.5Mb priority 3 { http_vhosts, http_cust1 }
10 queue http_vhosts bandwidth 40% cbq(borrow red)
11 queue http_cust1 bandwidth 0.5Mb
12 queue mail bandwidth 10% priority 1
13 queue ssh bandwidth 100Kb priority 7 cbq(borrow)
14 queue rsets bandwidth 7500b priority 0 cbq(red)
16 block return in on $ext_if inet all queue rsets
17 pass in on $ext_if inet proto tcp from any to any port 80 keep state queue http
[all …]
H A Dqueue411 # whenever there is no backlogged sibling queue but when a queue gets
12 # backlogged, it is guaranteed that the queue gets its linkshare.
14 altq on dc0 bandwidth 16Mb hfsc queue { eng law art }
15 queue eng bandwidth 10Mb { cs ee ie }
16 queue cs hfsc( default linkshare 50% )
17 queue ee hfsc( linkshare 30% )
18 queue ie hfsc( linkshare 20% )
19 queue law bandwidth 3Mb
20 queue art bandwidth 3Mb
H A Dqueue37 altq on $ext_if priq bandwidth 10Mb queue { pri-low pri-med pri-high }
8 queue pri-low priority 0
9 queue pri-med priority 1 priq(default)
10 queue pri-high priority 2
13 queue(pri-med, pri-high)
14 pass out on $ext_if proto tcp from any to any port 80 keep state queue pri-med
15 pass in on $ext_if proto tcp from any to any port 80 keep state queue pri-low
H A Dackpri4 # Use a simple priority queue to prioritize empty (no payload) TCP ACKs,
12 # priority queue below, download drops only to 48 kB/s.
20 # value. If it's set too high, the priority queue is not effective, and
24 altq on $ext_if priq bandwidth 100Kb queue { q_pri, q_def }
25 queue q_pri priority 7
26 queue q_def priority 1 priq(default)
29 keep state queue (q_def, q_pri)
32 keep state queue (q_def, q_pri)
/dragonfly/contrib/gcc-8.0/libstdc++-v3/include/bits/
H A Dstl_queue.h96 class queue in _GLIBCXX_VISIBILITY()
111 operator==(const queue<_Tp1, _Seq1>&, const queue<_Tp1, _Seq1>&); in _GLIBCXX_VISIBILITY()
115 operator<(const queue<_Tp1, _Seq1>&, const queue<_Tp1, _Seq1>&); in _GLIBCXX_VISIBILITY()
152 queue() in _GLIBCXX_VISIBILITY()
177 queue(const queue& __q, const _Alloc& __a) in _GLIBCXX_VISIBILITY()
181 queue(queue&& __q, const _Alloc& __a) in _GLIBCXX_VISIBILITY()
330 operator==(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
348 operator<(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
354 operator!=(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
360 operator>(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
[all …]
/dragonfly/sys/sys/
H A Dtaskqueue.h79 int taskqueue_enqueue(struct taskqueue *queue, struct task *task);
80 int taskqueue_enqueue_optq(struct taskqueue *queue,
82 int taskqueue_enqueue_timeout(struct taskqueue *queue,
84 int taskqueue_cancel(struct taskqueue *queue, struct task *task,
87 int taskqueue_cancel_timeout(struct taskqueue *queue,
89 void taskqueue_drain(struct taskqueue *queue, struct task *task);
91 void taskqueue_drain_timeout(struct taskqueue *queue,
94 void taskqueue_free(struct taskqueue *queue);
95 void taskqueue_block(struct taskqueue *queue);
96 void taskqueue_unblock(struct taskqueue *queue);
[all …]
/dragonfly/contrib/gcc-4.7/libstdc++-v3/include/bits/
H A Dstl_queue.h92 class queue in _GLIBCXX_VISIBILITY()
103 operator==(const queue<_Tp1, _Seq1>&, const queue<_Tp1, _Seq1>&); in _GLIBCXX_VISIBILITY()
107 operator<(const queue<_Tp1, _Seq1>&, const queue<_Tp1, _Seq1>&); in _GLIBCXX_VISIBILITY()
245 swap(queue& __q) in _GLIBCXX_VISIBILITY()
267 operator==(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
285 operator<(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
291 operator!=(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
297 operator>(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
303 operator<=(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
309 operator>=(const queue<_Tp, _Seq>& __x, const queue<_Tp, _Seq>& __y) in _GLIBCXX_VISIBILITY()
[all …]
/dragonfly/sys/dev/drm/ttm/
H A Dttm_lock.c49 init_waitqueue_head(&lock->queue); in ttm_lock_init()
61 wake_up_all(&lock->queue); in ttm_read_unlock()
90 ret = wait_event_interruptible(lock->queue, in ttm_read_lock()
156 wake_up_all(&lock->queue); in ttm_write_unlock()
188 ret = wait_event_interruptible(lock->queue, in ttm_write_lock()
194 wake_up_all(&lock->queue); in ttm_write_lock()
215 wake_up_all(&lock->queue); in __ttm_vt_unlock()
256 ret = wait_event_interruptible(lock->queue, in ttm_vt_lock()
262 wake_up_all(&lock->queue); in ttm_vt_lock()
268 wait_event(lock->queue, __ttm_vt_lock(lock)); in ttm_vt_lock()
[all …]
/dragonfly/sys/vm/
H A Dvm_page.c251 if (m->queue) { in vm_add_new_page()
916 u_short queue; in _vm_page_queue_spin_lock() local
918 queue = m->queue; in _vm_page_queue_spin_lock()
921 KKASSERT(queue == m->queue); in _vm_page_queue_spin_lock()
929 u_short queue; in _vm_page_queue_spin_unlock() local
931 queue = m->queue; in _vm_page_queue_spin_unlock()
1024 u_short queue; in _vm_page_rem_queue_spinlocked() local
1029 queue = m->queue; in _vm_page_rem_queue_spinlocked()
1073 return queue; in _vm_page_rem_queue_spinlocked()
1117 m->queue = queue; in _vm_page_add_queue_spinlocked()
[all …]
H A Dvm_contig.c148 struct vpgqueues *pq = &vm_page_queues[queue]; in vm_contig_pg_clean()
156 marker.queue = queue; in vm_contig_pg_clean()
159 vm_page_queues_spin_lock(queue); in vm_contig_pg_clean()
161 vm_page_queues_spin_unlock(queue); in vm_contig_pg_clean()
176 KKASSERT(m->queue == queue); in vm_contig_pg_clean()
194 if (m->queue - m->pc != queue) { in vm_contig_pg_clean()
234 vm_page_queues_spin_lock(queue); in vm_contig_pg_clean()
236 vm_page_queues_spin_unlock(queue); in vm_contig_pg_clean()
306 pqtype = m->queue - m->pc; in vm_contig_pg_alloc()
374 pqtype = m->queue - m->pc; in vm_contig_pg_alloc()
[all …]
H A Dvm_swapcache.c202 page_marker[q].queue = PQ_INACTIVE + q; in vm_swapcached_thread()
397 vm_page_queues_spin_lock(marker->queue); in vm_swapcache_writing()
400 KKASSERT(m->queue == marker->queue); in vm_swapcache_writing()
431 vm_page_queues_spin_lock(marker->queue); in vm_swapcache_writing()
438 vm_page_queues_spin_lock(marker->queue); in vm_swapcache_writing()
444 vm_page_queues_spin_lock(marker->queue); in vm_swapcache_writing()
452 vm_page_queues_spin_lock(marker->queue); in vm_swapcache_writing()
518 vm_page_queues_spin_lock(marker->queue); in vm_swapcache_writing()
534 vm_page_queues_spin_lock(marker->queue); in vm_swapcache_writing()
614 if (m->queue - m->pc == PQ_CACHE) { in vm_swapcached_flush()
[all …]

1234567891011