drm/i915/gt: Skip over completed active execlists, again

Now that we are careful to always force-restore contexts upon rewinding
(where necessary), we can restore our optimisation to skip over
completed active execlists when dequeuing.

Referenecs: 35f3fd8182 ("drm/i915/execlists: Workaround switching back to a completed context")
References: 8ab3a3812a ("drm/i915/gt: Incrementally check for rewinding")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210120121718.26435-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson
2021-01-20 12:17:18 +00:00
parent aba738262b
commit a2dd2ff50c

View File

@@ -1224,12 +1224,20 @@ static void set_preempt_timeout(struct intel_engine_cs *engine,
active_preempt_timeout(engine, rq));
}
static bool completed(const struct i915_request *rq)
{
if (i915_request_has_sentinel(rq))
return false;
return __i915_request_is_complete(rq);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request *last = *execlists->active;
struct i915_request *last, * const *active;
struct virtual_engine *ve;
struct rb_node *rb;
bool submit = false;
@@ -1266,21 +1274,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case
* of trouble.
*
* In theory we can skip over completed contexts that have not
* yet been processed by events (as those events are in flight):
*
* while ((last = *active) && i915_request_completed(last))
* active++;
*
* However, the GPU cannot handle this as it will ultimately
* find itself trying to jump back into a context it has just
* completed and barf.
*/
active = execlists->active;
while ((last = *active) && completed(last))
active++;
if (last) {
if (__i915_request_is_complete(last)) {
goto check_secondary;
} else if (need_preempt(engine, last)) {
if (need_preempt(engine, last)) {
ENGINE_TRACE(engine,
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
last->fence.context,
@@ -1359,9 +1359,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* we hopefully coalesce several updates into a single
* submission.
*/
check_secondary:
if (!list_is_last(&last->sched.link,
&engine->active.requests)) {
if (active[1]) {
/*
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
@@ -1562,7 +1560,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of ordered contexts.
*/
if (submit &&
memcmp(execlists->active,
memcmp(active,
execlists->pending,
(port - execlists->pending) * sizeof(*port))) {
*port = NULL;
@@ -1570,7 +1568,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists_schedule_in(*port, port - execlists->pending);
WRITE_ONCE(execlists->yield, -1);
set_preempt_timeout(engine, *execlists->active);
set_preempt_timeout(engine, *active);
execlists_submit_ports(engine);
} else {
ring_set_paused(engine, 0);