Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 156) sorted by relevance

1234567

/openbsd/sys/dev/pci/drm/i915/
H A Di915_request.c428 rq->ring->head = rq->postfix; in i915_request_retire()
449 rq->engine->remove_active_request(rq); in i915_request_retire()
606 if (rq->infix == rq->postfix) in __i915_request_skip()
609 RQ_TRACE(rq, "error: %d\n", rq->fence.error); in __i915_request_skip()
617 rq->infix = rq->postfix; in __i915_request_skip()
646 rq = i915_request_get(rq); in i915_request_mark_eio()
1078 rq->head = rq->ring->emit; in __i915_request_create()
1080 ret = rq->engine->request_alloc(rq); in __i915_request_create()
1893 rq->postfix = intel_ring_offset(rq, cs); in __i915_request_commit()
2316 rq->fence.context, rq->fence.seqno, in i915_request_show()
[all …]
H A Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \ argument
418 dma_fence_put(&rq->fence); in i915_request_put()
513 seqno = __hwsp_seqno(rq); in hwsp_seqno()
521 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); in __i915_request_has_started()
554 if (i915_request_signaled(rq)) in i915_request_started()
579 if (!i915_request_is_active(rq)) in i915_request_is_running()
583 result = __i915_request_has_started(rq) && i915_request_is_active(rq); in i915_request_is_running()
607 return !list_empty(&rq->sched.link); in i915_request_is_ready()
612 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); in __i915_request_is_complete()
619 if (i915_request_signaled(rq)) in i915_request_completed()
[all …]
/openbsd/sys/dev/pci/drm/i915/gt/
H A Dgen8_engine_cs.c46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
230 IS_DG2(rq->i915)) { in mtl_dummy_pipe_control()
476 rq->infix = intel_ring_offset(rq, cs); in gen8_emit_init_breadcrumb()
607 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); in assert_request_valid()
620 rq->wa_tail = intel_ring_offset(rq, cs); in gen8_emit_wa_tail()
653 rq->tail = intel_ring_offset(rq, cs); in gen8_emit_fini_breadcrumb_tail()
654 assert_ring_tail_valid(rq->ring, rq->tail); in gen8_emit_fini_breadcrumb_tail()
661 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); in emit_xcs_breadcrumb()
666 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); in gen8_emit_fini_breadcrumb_xcs()
795 rq->tail = intel_ring_offset(rq, cs); in gen12_emit_fini_breadcrumb_tail()
[all …]
H A Dgen6_engine_cs.c167 *cs++ = rq->fence.seqno; in gen6_emit_breadcrumb_rcs()
172 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_rcs()
173 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_rcs()
337 gen7_stall_cs(rq); in gen7_emit_flush_rcs()
364 *cs++ = rq->fence.seqno; in gen7_emit_breadcrumb_rcs()
369 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_rcs()
370 assert_ring_tail_valid(rq->ring, rq->tail); in gen7_emit_breadcrumb_rcs()
386 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_xcs()
387 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_xcs()
418 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_xcs()
[all …]
H A Dselftest_execlists.c266 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); in live_unlite_restore()
289 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); in live_unlite_restore()
873 err = rq->engine->emit_init_breadcrumb(rq); in semaphore_queue()
2082 intel_context_ban(rq->context, rq); in __cancel_active0()
2141 intel_context_ban(rq[1]->context, rq[1]); in __cancel_active1()
2224 intel_context_ban(rq[2]->context, rq[2]); in __cancel_queued()
2293 intel_context_ban(rq->context, rq); in __cancel_hostile()
2591 ring_size = rq->wa_tail - rq->head; in live_chain_preempt()
2772 err = rq->engine->emit_bb_start(rq, in create_gang()
3188 err = rq->engine->emit_bb_start(rq, in create_gpr_client()
[all …]
H A Dintel_breadcrumbs.c110 if (rq->context != ce) in check_signal_order()
218 &rq->fence.flags)) in signal_irq_work()
252 rq->engine->sched_engine->retire_inflight_request_prio(rq); in signal_irq_work()
254 spin_lock(&rq->lock); in signal_irq_work()
260 i915_request_put(rq); in signal_irq_work()
341 i915_request_get(rq); in irq_signal_request()
393 i915_request_get(rq); in insert_breadcrumb()
454 i915_request_put(rq); in i915_request_cancel_breadcrumb()
472 &rq->fence.flags)) in intel_context_remove_breadcrumbs()
477 i915_request_put(rq); in intel_context_remove_breadcrumbs()
[all …]
H A Dintel_engine_heartbeat.c25 struct i915_request *rq; in next_heartbeat() local
41 if (rq && rq->sched.attr.priority >= I915_PRIORITY_BARRIER && in next_heartbeat()
76 return rq; in heartbeat_create()
90 idle_pulse(rq->engine, rq); in heartbeat_commit()
101 if (!rq) { in show_heartbeat()
110 rq->fence.seqno, in show_heartbeat()
148 if (rq && i915_request_completed(rq)) { in heartbeat()
223 if (IS_ERR(rq)) in heartbeat()
285 if (IS_ERR(rq)) in __intel_engine_pulse()
395 if (IS_ERR(rq)) { in intel_engine_flush_barriers()
[all …]
H A Dselftest_timeline.c521 return rq; in checked_tl_write()
899 struct i915_request *rq = fetch_and_zero(&w->rq); in check_watcher() local
975 return rq; in wrap_timeline()
980 return rq; in wrap_timeline()
1084 switch_tl_lock(rq, watcher[0].rq); in live_hwsp_read()
1090 switch_tl_lock(watcher[0].rq, rq); in live_hwsp_read()
1098 switch_tl_lock(rq, watcher[1].rq); in live_hwsp_read()
1104 switch_tl_lock(watcher[1].rq, rq); in live_hwsp_read()
1115 rq = wrap_timeline(rq); in live_hwsp_read()
1227 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); in live_hwsp_rollover_kernel()
[all …]
H A Dgen2_engine_cs.c38 intel_ring_advance(rq, cs); in gen2_emit_flush()
79 if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5) in gen4_emit_flush_rcs()
87 cs = intel_ring_begin(rq, i); in gen4_emit_flush_rcs()
124 intel_ring_advance(rq, cs); in gen4_emit_flush_rcs()
139 intel_ring_advance(rq, cs); in gen4_emit_flush_vcs()
147 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in __gen2_emit_breadcrumb()
155 *cs++ = rq->fence.seqno; in __gen2_emit_breadcrumb()
161 *cs++ = rq->fence.seqno; in __gen2_emit_breadcrumb()
166 rq->tail = intel_ring_offset(rq, cs); in __gen2_emit_breadcrumb()
167 assert_ring_tail_valid(rq->ring, rq->tail); in __gen2_emit_breadcrumb()
[all …]
H A Dintel_execlists_submission.c456 rq->fence.context, rq->fence.seqno); in reset_active()
711 rq->tail = rq->wa_tail; in execlists_update_context()
749 rq->fence.context, rq->fence.seqno, in dump_port()
938 rq ? execlists_update_context(rq) : 0, in execlists_submit_ports()
1164 if (!rq || __i915_request_is_complete(rq)) in needs_timeslice()
1987 rq->head, rq->tail, in process_csb()
2351 cap->rq = active_request(cap->rq->context->timeline, cap->rq); in execlists_capture()
2352 cap->rq = i915_request_get_rcu(cap->rq); in execlists_capture()
3062 rq = active_request(ce->timeline, rq); in execlists_reset_active()
3360 rq->fence.context, rq->fence.seqno, in kick_execlists()
[all …]
H A Dselftest_hangcheck.c229 err = rq->engine->emit_init_breadcrumb(rq); in hang_create_request()
776 if (rq) { in __igt_reset_engine()
786 if (rq) in __igt_reset_engine()
872 if (!rq) in active_request_put()
1096 if (rq) { in __igt_reset_engines()
1107 if (rq) { in __igt_reset_engines()
1112 rq->fence.seqno, rq->context->guc_id.id); in __igt_reset_engines()
1335 __func__, rq->fence.seqno, hws_seqno(&h, rq)); in igt_reset_wait()
1524 __func__, rq->fence.seqno, hws_seqno(&h, rq)); in __igt_reset_evict_vma()
1847 __func__, rq->fence.seqno, hws_seqno(&h, rq)); in igt_handle_error()
[all …]
H A Dselftest_lrc.c87 if (IS_ERR(rq)) in emit_semaphore_signal()
115 if (IS_ERR(rq)) in context_flush()
124 rq = i915_request_get(rq); in context_flush()
539 if (IS_ERR(rq)) in gpr_make_dirty()
576 return rq; in __gpr_read()
611 return rq; in __gpr_read()
748 return rq; in create_timestamp()
783 return rq; in create_timestamp()
809 err = wait_for_submit(rq->engine, rq, HZ / 2); in __lrc_timestamp()
1127 return rq; in record_registers()
[all …]
H A Dintel_migrate.c754 if (IS_ERR(rq)) { in intel_context_migrate_copy()
765 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_migrate_copy()
801 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
814 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
829 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
838 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
843 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
879 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_copy()
1022 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_migrate_clear()
1041 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_migrate_clear()
[all …]
H A Dselftest_mocs.c40 i915_request_get(rq); in request_add_sync()
41 i915_request_add(rq); in request_add_sync()
44 i915_request_put(rq); in request_add_sync()
53 i915_request_get(rq); in request_add_spin()
54 i915_request_add(rq); in request_add_spin()
57 i915_request_put(rq); in request_add_spin()
221 struct i915_request *rq; in check_mocs_engine() local
229 if (IS_ERR(rq)) in check_mocs_engine()
230 return PTR_ERR(rq); in check_mocs_engine()
332 if (IS_ERR(rq)) { in active_engine_reset()
[all …]
H A Dselftest_migrate.c96 if (rq) { in copy()
107 if (rq) { in copy()
175 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_copy_ccs()
194 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_copy_ccs()
203 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_copy_ccs()
325 rq = NULL; in clear()
638 prev = rq; in live_emit_pte_full_ring()
639 } while (rq->ring->space > (rq->reserved_space + in live_emit_pte_full_ring()
863 if (rq) { in __perf_clear_blt()
945 &rq); in __perf_copy_blt()
[all …]
H A Dintel_engine_pm.c101 struct i915_request *rq = to_request(fence); in duration() local
104 ktime_us_delta(rq->fence.timestamp, in duration()
105 rq->duration.emitted)); in duration()
123 GEM_BUG_ON(rq->context->active_count != 1); in __queue_and_release_pm()
142 __i915_request_queue_bh(rq); in __queue_and_release_pm()
153 struct i915_request *rq; in switch_to_kernel_context() local
212 if (IS_ERR(rq)) in switch_to_kernel_context()
218 i915_request_add_active_barriers(rq); in switch_to_kernel_context()
230 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq)); in switch_to_kernel_context()
231 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration); in switch_to_kernel_context()
[all …]
/openbsd/sys/dev/pci/drm/i915/selftests/
H A Di915_perf.c225 if (IS_ERR(rq)) { in live_noa_delay()
226 err = PTR_ERR(rq); in live_noa_delay()
231 err = rq->engine->emit_init_breadcrumb(rq); in live_noa_delay()
244 err = rq->engine->emit_bb_start(rq, in live_noa_delay()
258 i915_request_get(rq); in live_noa_delay()
259 i915_request_add(rq); in live_noa_delay()
283 i915_request_put(rq); in live_noa_delay()
320 if (IS_ERR(rq)) { in live_noa_gpr()
321 err = PTR_ERR(rq); in live_noa_gpr()
327 err = rq->engine->emit_init_breadcrumb(rq); in live_noa_gpr()
[all …]
H A Digt_spinner.c128 struct i915_request *rq = NULL; in igt_spinner_create_request() local
149 if (IS_ERR(rq)) in igt_spinner_create_request()
150 return ERR_CAST(rq); in igt_spinner_create_request()
178 *batch++ = rq->fence.seqno; in igt_spinner_create_request()
182 if (GRAPHICS_VER(rq->i915) >= 8) in igt_spinner_create_request()
184 else if (IS_HASWELL(rq->i915)) in igt_spinner_create_request()
211 i915_request_add(rq); in igt_spinner_create_request()
213 return err ? ERR_PTR(err) : rq; in igt_spinner_create_request()
252 if (i915_request_is_ready(rq)) in igt_wait_for_spinner()
256 rq->fence.seqno), in igt_wait_for_spinner()
[all …]
H A Di915_request.c410 rq->fence.context, rq->fence.seqno, in __igt_breadcrumbs_smoketest()
426 rq->fence.context, rq->fence.seqno); in __igt_breadcrumbs_smoketest()
1010 return rq->engine->emit_bb_start(rq, in emit_bb_start()
1302 if (!rq) in live_all_engines()
1711 ret = rq->ring->size - rq->reserved_space; in max_batches()
1714 sz = rq->ring->emit - rq->head; in max_batches()
1895 if (IS_ERR(rq)) in switch_to_kernel_sync()
1904 rq = i915_request_get(rq); in switch_to_kernel_sync()
2039 if (IS_ERR(rq)) in measure_semaphore_response()
2787 prev = rq; in s_sync1()
[all …]
/openbsd/sys/dev/pci/drm/i915/gt/uc/
H A Dintel_gsc_uc_heci_cmd_submit.c47 struct i915_request *rq; in intel_gsc_uc_heci_cmd_submit_packet() local
60 if (IS_ERR(rq)) in intel_gsc_uc_heci_cmd_submit_packet()
61 return PTR_ERR(rq); in intel_gsc_uc_heci_cmd_submit_packet()
77 i915_request_get(rq); in intel_gsc_uc_heci_cmd_submit_packet()
82 i915_request_add(rq); in intel_gsc_uc_heci_cmd_submit_packet()
96 i915_request_put(rq); in intel_gsc_uc_heci_cmd_submit_packet()
159 if (IS_ERR(rq)) { in intel_gsc_uc_heci_cmd_submit_nonpriv()
160 err = PTR_ERR(rq); in intel_gsc_uc_heci_cmd_submit_nonpriv()
190 i915_request_get(rq); in intel_gsc_uc_heci_cmd_submit_nonpriv()
195 i915_request_add(rq); in intel_gsc_uc_heci_cmd_submit_nonpriv()
[all …]
H A Dselftest_guc_hangcheck.c17 struct i915_request *rq; in nop_request() local
20 if (IS_ERR(rq)) in nop_request()
21 return rq; in nop_request()
23 i915_request_get(rq); in nop_request()
24 i915_request_add(rq); in nop_request()
26 return rq; in nop_request()
79 if (IS_ERR(rq)) { in intel_hang_guc()
80 ret = PTR_ERR(rq); in intel_hang_guc()
109 i915_request_put(rq); in intel_hang_guc()
128 if (IS_ERR(rq)) { in intel_hang_guc()
[all …]
H A Dselftest_guc.c15 i915_request_get(rq); in request_add_spin()
16 i915_request_add(rq); in request_add_spin()
30 if (IS_ERR(rq)) in nop_user_request()
31 return rq; in nop_user_request()
43 i915_request_get(rq); in nop_user_request()
44 i915_request_add(rq); in nop_user_request()
46 return rq; in nop_user_request()
89 if (IS_ERR(rq)) { in intel_guc_scrub_ctbs()
95 last[i] = rq; in intel_guc_scrub_ctbs()
206 rq = NULL; in intel_guc_steal_guc_ids()
[all …]
H A Dselftest_guc_multi_lrc.c80 struct i915_request *rq, *child_rq; in multi_lrc_nop_request() local
86 if (IS_ERR(rq)) in multi_lrc_nop_request()
87 return rq; in multi_lrc_nop_request()
89 i915_request_get(rq); in multi_lrc_nop_request()
90 i915_request_add(rq); in multi_lrc_nop_request()
103 return rq; in multi_lrc_nop_request()
106 i915_request_put(rq); in multi_lrc_nop_request()
114 struct i915_request *rq; in __intel_guc_multi_lrc_basic() local
127 if (IS_ERR(rq)) { in __intel_guc_multi_lrc_basic()
128 ret = PTR_ERR(rq); in __intel_guc_multi_lrc_basic()
[all …]
/openbsd/sys/dev/pci/drm/i915/pxp/
H A Dintel_pxp_cmd.c90 trace_i915_request_add(rq); in pxp_request_commit()
91 __i915_request_commit(rq); in pxp_request_commit()
92 __i915_request_queue(rq, &attr); in pxp_request_commit()
99 struct i915_request *rq; in intel_pxp_terminate_session() local
107 rq = i915_request_create(ce); in intel_pxp_terminate_session()
108 if (IS_ERR(rq)) in intel_pxp_terminate_session()
109 return PTR_ERR(rq); in intel_pxp_terminate_session()
126 intel_ring_advance(rq, cs); in intel_pxp_terminate_session()
129 i915_request_get(rq); in intel_pxp_terminate_session()
134 pxp_request_commit(rq); in intel_pxp_terminate_session()
[all …]
/openbsd/sys/dev/pci/drm/scheduler/
H A Dsched_main.c92 struct drm_sched_rq *rq = entity->rq; in drm_sched_rq_remove_fifo_locked() local
136 rq->sched = sched; in drm_sched_rq_init()
153 spin_lock(&rq->lock); in drm_sched_rq_add_entity()
158 spin_unlock(&rq->lock); in drm_sched_rq_add_entity()
175 spin_lock(&rq->lock); in drm_sched_rq_remove_entity()
186 spin_unlock(&rq->lock); in drm_sched_rq_remove_entity()
201 spin_lock(&rq->lock); in drm_sched_rq_select_entity_rr()
228 spin_unlock(&rq->lock); in drm_sched_rq_select_entity_rr()
245 spin_lock(&rq->lock); in drm_sched_rq_select_entity_fifo()
256 spin_unlock(&rq->lock); in drm_sched_rq_select_entity_fifo()
[all …]

1234567