Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 81) sorted by relevance

1234

/dragonfly/contrib/lvm2/dist/daemons/cmirrord/
H A Dfunctions.c554 if (strlen(rq->data) > rq->data_size) { in clog_ctr()
560 rq->data, (int)strlen(rq->data)); in clog_ctr()
617 struct log_c *lc = get_log(rq->uuid, rq->luid); in clog_dtr()
653 struct log_c *lc = get_log(rq->uuid, rq->luid); in clog_presuspend()
851 lc = get_pending_log(rq->uuid, rq->luid); in local_resume()
886 r = create_cluster_cpg(rq->uuid, rq->luid); in local_resume()
1350 lc = get_pending_log(rq->uuid, rq->luid); in clog_get_sync_count()
1412 lc = get_pending_log(rq->uuid, rq->luid); in clog_status_info()
1467 lc = get_pending_log(rq->uuid, rq->luid); in clog_status_table()
1635 if (rq->u_rq.error && rq->u_rq.data_size) { in do_request()
[all …]
H A Dcluster.c322 memcpy(orig_rq, rq, sizeof(*rq) + rq->u_rq.data_size); in handle_cluster_response()
577 if (!rq) { in export_checkpoint()
581 memset(rq, 0, sizeof(*rq)); in export_checkpoint()
594 free(rq); in export_checkpoint()
860 free(rq); in resend_requests()
932 free(rq); in flush_startup_list()
972 memcpy(tmp_rq, rq, sizeof(*rq) + rq->u_rq.data_size); in cpg_message_callback()
1078 memcpy(tmp_rq, rq, sizeof(*rq) + rq->u_rq.data_size); in cpg_message_callback()
1129 SHORT_UUID(rq->u_rq.uuid), rq->originator); in cpg_message_callback()
1147 rq->u_rq.seq, SHORT_UUID(rq->u_rq.uuid), in cpg_message_callback()
[all …]
H A Dlocal.c97 *rq = NULL; in kernel_recv()
164 *rq = (void *)u_rq - in kernel_recv()
169 memset(*rq, 0, (void *)u_rq - (void *)(*rq)); in kernel_recv()
178 *rq = NULL; in kernel_recv()
226 struct clog_request *rq; in do_local_work() local
229 r = kernel_recv(&rq); in do_local_work()
233 if (!rq) in do_local_work()
236 u_rq = &rq->u_rq; in do_local_work()
250 r = do_request(rq, 0); in do_local_work()
284 r = cluster_send(rq); in do_local_work()
[all …]
H A Dfunctions.h23 int local_resume(struct dm_ulog_request *rq);
26 int do_request(struct clog_request *rq, int server);
32 int log_get_state(struct dm_ulog_request *rq);
/dragonfly/sys/dev/netif/oce/
H A Doce_queue.c176 if (rq) { in oce_queue_release_all()
419 if (!rq) in oce_rq_init()
448 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map); in oce_rq_init()
461 return rq; in oce_rq_init()
488 bus_dmamap_unload(rq->tag, rq->pckts[i].map); in oce_rq_free()
489 bus_dmamap_destroy(rq->tag, rq->pckts[i].map); in oce_rq_free()
527 rq->cq = cq; in oce_rq_create()
961 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len); in oce_start_rq()
964 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE); in oce_start_rq()
1133 cq = rq->cq; in oce_drain_rq_cq()
[all …]
H A Doce_if.c1276 if (rq->packets_out == rq->packets_in) { in oce_rx()
1283 pd = &rq->pckts[rq->packets_out]; in oce_rx()
1288 rq->pending--; in oce_rx()
1290 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len; in oce_rx()
1402 if (rq->packets_out == rq->packets_in) { in oce_discard_rx_comp()
1409 pd = &rq->pckts[rq->packets_out]; in oce_discard_rx_comp()
1414 rq->pending--; in oce_discard_rx_comp()
1535 pd = &rq->pckts[rq->packets_in]; in oce_alloc_rx_bufs()
1565 rq->pending++; in oce_alloc_rx_bufs()
1966 struct oce_rq *rq; in oce_if_deactivate() local
[all …]
H A Doce_if.h211 #define for_all_rq_queues(sc, rq, i) \ argument
212 for (i = 0, rq = sc->rq[0]; i < sc->nrqs; i++, rq = sc->rq[i])
213 #define for_all_rss_queues(sc, rq, i) \ argument
214 for (i = 0, rq = sc->rq[i + 1]; i < (sc->nrqs - 1); \
215 i++, rq = sc->rq[i + 1])
850 struct oce_rq *rq[OCE_MAX_RQ]; /* RX work queues */ member
955 int oce_clear_rx_buf(struct oce_rq *rq);
972 int oce_start_rq(struct oce_rq *rq);
982 void oce_drain_rq_cq(struct oce_rq *rq);
992 void oce_free_posted_rxbuf(struct oce_rq *rq);
[all …]
H A Doce_sysctl.c596 CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_pkts, 0, in oce_add_stats_sysctls_be3()
599 CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_bytes, 0, in oce_add_stats_sysctls_be3()
602 CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_frags, 0, in oce_add_stats_sysctls_be3()
606 &sc->rq[i]->rx_stats.rx_mcast_pkts, 0, in oce_add_stats_sysctls_be3()
610 &sc->rq[i]->rx_stats.rx_ucast_pkts, 0, in oce_add_stats_sysctls_be3()
613 CTLFLAG_RD, &sc->rq[i]->rx_stats.rxcp_err, 0, in oce_add_stats_sysctls_be3()
811 CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_pkts, 0, in oce_add_stats_sysctls_xe201()
821 &sc->rq[i]->rx_stats.rx_mcast_pkts, 0, in oce_add_stats_sysctls_xe201()
825 &sc->rq[i]->rx_stats.rx_ucast_pkts, 0, in oce_add_stats_sysctls_xe201()
975 sc->rq[i]->rx_stats.rx_mcast_pkts; in oce_refresh_queue_stats()
[all …]
/dragonfly/sys/dev/raid/vinum/
H A Dvinumrequest.c262 rq->volplex.plexno, in vinumstart()
273 freerq(rq); in vinumstart()
300 freerq(rq); in vinumstart()
332 sd = &SD[rq->sdno]; in launch_requests()
345 rq->sdno, in launch_requests()
346 rq, in launch_requests()
361 rq, in launch_requests()
370 logrq(loginfo_user_bpl, (union rqinfou) rq->bio, rq->bio); in launch_requests()
464 bre(struct request *rq, in bre() argument
1051 rqg->rq = rq; /* point back to the parent request */ in allocrqg()
[all …]
H A D.gdbinit.vinum2 define rq
3 rqq rq
5 document rq
9 set $rq = (struct request *) $arg0
11 output/x *$rq
13 bpp $rq->bp
14 set $rqg = $rq->rqg
58 output/x rq->prq[0].rqe[0].sdno
84 output/x rq->prq[0].rqe[1].sdno
119 rq->prq[1].rqe[1].b.b_bcount,
[all …]
H A Dvinumdaemon.c49 void recover_io(struct request *rq);
103 struct request *rq = request->info.rq; in vinum_daemon() local
107 rq, in vinum_daemon()
108 (rq->bio->bio_buf->b_cmd == BUF_CMD_READ) ? "Read" : "Write", in vinum_daemon()
109 major((cdev_t)rq->bio->bio_driver_info), in vinum_daemon()
110 minor((cdev_t)rq->bio->bio_driver_info), in vinum_daemon()
111 (long long)rq->bio->bio_offset, in vinum_daemon()
112 rq->bio->bio_buf->b_bcount); in vinum_daemon()
114 recover_io(request->info.rq); /* the failed request */ in vinum_daemon()
198 recover_io(struct request *rq) in recover_io() argument
[all …]
H A Dvinuminterrupt.c68 struct request *rq; in complete_rqe() local
79 rq = rqg->rq; /* and the complete request */ in complete_rqe()
80 ubio = rq->bio; /* user buffer */ in complete_rqe()
100 sd->lasterror = rq->error; in complete_rqe()
135 DRIVE[rqe->driveno].lasterror = rq->error; in complete_rqe()
205 rq->active--; /* one less */ in complete_rqe()
220 if (rq->isplex) { /* plex operation, */ in complete_rqe()
222 ubio->bio_buf->b_error = rq->error; in complete_rqe()
224 di.rq = rq; in complete_rqe()
240 freerq(struct request *rq) in freerq() argument
[all …]
H A Drequest.h133 struct request *rq; /* pointer to the request */ member
261 struct request *rq; /* for daemonrq_ioerror */ member
285 void freerq(struct request *rq);
/dragonfly/sys/dev/drm/scheduler/
H A Dgpu_scheduler.c78 rq->sched = sched; in drm_sched_rq_init()
94 spin_lock(&rq->lock); in drm_sched_rq_add_entity()
96 spin_unlock(&rq->lock); in drm_sched_rq_add_entity()
112 spin_lock(&rq->lock); in drm_sched_rq_remove_entity()
116 spin_unlock(&rq->lock); in drm_sched_rq_remove_entity()
131 spin_lock(&rq->lock); in drm_sched_rq_select_entity()
156 spin_unlock(&rq->lock); in drm_sched_rq_select_entity()
403 if (entity->rq == rq) in drm_sched_entity_set_rq()
406 BUG_ON(!rq); in drm_sched_entity_set_rq()
410 entity->rq = rq; in drm_sched_entity_set_rq()
[all …]
/dragonfly/sbin/vinum/
H A Dlist.c895 rq.devminor = (rq.devminor & 0xff) in vinum_info()
906 rq.bio, in vinum_info()
919 rq.bio, in vinum_info()
930 rq.bio, in vinum_info()
945 rq.bio, in vinum_info()
960 rq.bio, in vinum_info()
975 rq.bio, in vinum_info()
991 rq.bio, in vinum_info()
1002 rq.bio, in vinum_info()
1011 rq.bio, in vinum_info()
[all …]
/dragonfly/sys/dev/drm/i915/
H A Di915_guc_submission.c424 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); in guc_wq_item_append()
508 if (rq && count == 0) { in i915_guc_submit()
538 &rq->fence.flags)) in nested_enable_signaling()
558 nested_enable_signaling(rq); in port_assign()
585 &rq->priotree.link); in i915_guc_dequeue()
599 last = rq; in i915_guc_dequeue()
628 rq = port_request(&port[0]); in i915_guc_irq_handler()
629 while (rq && i915_gem_request_completed(rq)) { in i915_guc_irq_handler()
631 i915_gem_request_put(rq); in i915_guc_irq_handler()
635 rq = port_request(&port[0]); in i915_guc_irq_handler()
[all …]
H A Dintel_lrc.c353 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); in unwind_wa_tail()
354 assert_ring_tail_valid(rq->ring, rq->tail); in unwind_wa_tail()
372 unwind_wa_tail(rq); in unwind_incomplete_requests()
377 &rq->priotree, in unwind_incomplete_requests()
400 status, rq); in execlists_context_status_change()
414 struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; in execlists_update_context()
416 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; in execlists_update_context()
419 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); in execlists_update_context()
451 if (rq) { in execlists_submit_ports()
640 &rq->priotree.link); in execlists_dequeue()
[all …]
H A Dintel_engine_cs.c1635 rq->global_seqno, in print_request()
1637 rq->ctx->hw_id, rq->fence.seqno, in print_request()
1638 rq->priotree.priority, in print_request()
1678 if (rq) { in intel_engine_dump()
1682 rq->head, rq->postfix, rq->tail, in intel_engine_dump()
1683 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, in intel_engine_dump()
1684 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); in intel_engine_dump()
1689 rq ? i915_ggtt_offset(rq->ring->vma) : 0); in intel_engine_dump()
1692 rq ? rq->ring->head : 0); in intel_engine_dump()
1695 rq ? rq->ring->tail : 0); in intel_engine_dump()
[all …]
H A Di915_gem_execbuffer.c830 GEM_BUG_ON(eb->reloc_cache.rq); in eb_destroy()
855 cache->rq = NULL; in reloc_cache_init()
886 cache->rq = NULL; in reloc_gpu_flush()
893 if (cache->rq) in reloc_cache_reset()
1103 if (IS_ERR(rq)) { in __reloc_gpu_alloc()
1104 err = PTR_ERR(rq); in __reloc_gpu_alloc()
1116 err = i915_switch_context(rq); in __reloc_gpu_alloc()
1138 rq->batch = batch; in __reloc_gpu_alloc()
1140 cache->rq = rq; in __reloc_gpu_alloc()
1148 i915_add_request(rq); in __reloc_gpu_alloc()
[all …]
H A Dintel_ringbuffer.h229 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) argument
782 struct drm_i915_gem_request *rq) in intel_wait_init() argument
785 wait->request = rq; in intel_wait_init()
808 const struct drm_i915_gem_request *rq) in intel_wait_update_request() argument
810 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq)); in intel_wait_update_request()
821 const struct drm_i915_gem_request *rq) in intel_wait_check_request() argument
823 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq)); in intel_wait_check_request()
/dragonfly/sys/dev/agp/
H A Dagp.c308 int rq, sba, fw, rate, arqsz, cal; in agp_v3_enable() local
314 rq = AGP_MODE_GET_RQ(mode); in agp_v3_enable()
315 if (AGP_MODE_GET_RQ(tstatus) < rq) in agp_v3_enable()
316 rq = AGP_MODE_GET_RQ(tstatus); in agp_v3_enable()
318 rq = AGP_MODE_GET_RQ(mstatus); in agp_v3_enable()
358 command = AGP_MODE_SET_RQ(0, rq); in agp_v3_enable()
377 int rq, sba, fw, rate; in agp_v2_enable() local
383 rq = AGP_MODE_GET_RQ(mode); in agp_v2_enable()
385 rq = AGP_MODE_GET_RQ(tstatus); in agp_v2_enable()
387 rq = AGP_MODE_GET_RQ(mstatus); in agp_v2_enable()
[all …]
/dragonfly/sys/vfs/smbfs/
H A Dsmbfs_smb.c96 struct smb_rq rq, *rqp = &rq; in smbfs_smb_lockandx() local
189 struct smb_rq rq, *rqp = &rq; in smbfs_smb_statfs() local
225 struct smb_rq rq, *rqp = &rq; in smbfs_smb_setfsize() local
256 struct smb_rq rq, *rqp = &rq; in smbfs_smb_setpattr() local
405 struct smb_rq rq, *rqp = &rq; in smbfs_smb_setftime() local
493 struct smb_rq rq, *rqp = &rq; in smbfs_smb_open() local
546 struct smb_rq rq, *rqp = &rq; in smbfs_smb_close() local
574 struct smb_rq rq, *rqp = &rq; in smbfs_smb_create() local
619 struct smb_rq rq, *rqp = &rq; in smbfs_smb_delete() local
646 struct smb_rq rq, *rqp = &rq; in smbfs_smb_rename() local
[all …]
/dragonfly/sys/sys/
H A Dusched_dfly.h72 TAILQ_HEAD(rq, lwp);
111 struct rq queues[NQS];
112 struct rq rtqueues[NQS];
113 struct rq idqueues[NQS];
/dragonfly/contrib/nvi2/ex/
H A Dex_global.c157 TAILQ_INIT(ecp->rq); in ex_g_setup()
226 if ((rp = TAILQ_LAST(ecp->rq, _rh)) != NULL && in ex_g_setup()
237 TAILQ_INSERT_TAIL(ecp->rq, rp, q); in ex_g_setup()
265 TAILQ_FOREACH_MUTABLE(rp, ecp->rq, q, nrp) { in ex_g_insdel()
293 TAILQ_REMOVE(ecp->rq, rp, q); in ex_g_insdel()
301 TAILQ_INSERT_AFTER(ecp->rq, rp, nrp, q); in ex_g_insdel()
/dragonfly/include/rpc/
H A Dclnt.h200 #define CLNT_CONTROL(cl,rq,in) ((*(cl)->cl_ops->cl_control)(cl,rq,in)) argument
201 #define clnt_control(cl,rq,in) ((*(cl)->cl_ops->cl_control)(cl,rq,in)) argument

1234