xref: /linux/drivers/gpu/drm/xe/xe_force_wake.c (revision 69418db6)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_force_wake.h"
7 
8 #include <drm/drm_util.h>
9 
10 #include "regs/xe_gt_regs.h"
11 #include "regs/xe_reg_defs.h"
12 #include "xe_gt.h"
13 #include "xe_gt_printk.h"
14 #include "xe_mmio.h"
15 #include "xe_sriov.h"
16 
17 #define XE_FORCE_WAKE_ACK_TIMEOUT_MS	50
18 
str_wake_sleep(bool wake)19 static const char *str_wake_sleep(bool wake)
20 {
21 	return wake ? "wake" : "sleep";
22 }
23 
domain_init(struct xe_force_wake_domain * domain,enum xe_force_wake_domain_id id,struct xe_reg reg,struct xe_reg ack)24 static void domain_init(struct xe_force_wake_domain *domain,
25 			enum xe_force_wake_domain_id id,
26 			struct xe_reg reg, struct xe_reg ack)
27 {
28 	domain->id = id;
29 	domain->reg_ctl = reg;
30 	domain->reg_ack = ack;
31 	domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
32 	domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
33 }
34 
xe_force_wake_init_gt(struct xe_gt * gt,struct xe_force_wake * fw)35 void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
36 {
37 	struct xe_device *xe = gt_to_xe(gt);
38 
39 	fw->gt = gt;
40 	spin_lock_init(&fw->lock);
41 
42 	/* Assuming gen11+ so assert this assumption is correct */
43 	xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
44 
45 	if (xe->info.graphics_verx100 >= 1270) {
46 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
47 			    XE_FW_DOMAIN_ID_GT,
48 			    FORCEWAKE_GT,
49 			    FORCEWAKE_ACK_GT_MTL);
50 	} else {
51 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
52 			    XE_FW_DOMAIN_ID_GT,
53 			    FORCEWAKE_GT,
54 			    FORCEWAKE_ACK_GT);
55 	}
56 }
57 
xe_force_wake_init_engines(struct xe_gt * gt,struct xe_force_wake * fw)58 void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
59 {
60 	int i, j;
61 
62 	/* Assuming gen11+ so assert this assumption is correct */
63 	xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
64 
65 	if (!xe_gt_is_media_type(gt))
66 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
67 			    XE_FW_DOMAIN_ID_RENDER,
68 			    FORCEWAKE_RENDER,
69 			    FORCEWAKE_ACK_RENDER);
70 
71 	for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
72 		if (!(gt->info.engine_mask & BIT(i)))
73 			continue;
74 
75 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
76 			    XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
77 			    FORCEWAKE_MEDIA_VDBOX(j),
78 			    FORCEWAKE_ACK_MEDIA_VDBOX(j));
79 	}
80 
81 	for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
82 		if (!(gt->info.engine_mask & BIT(i)))
83 			continue;
84 
85 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
86 			    XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
87 			    FORCEWAKE_MEDIA_VEBOX(j),
88 			    FORCEWAKE_ACK_MEDIA_VEBOX(j));
89 	}
90 
91 	if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
92 		domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC],
93 			    XE_FW_DOMAIN_ID_GSC,
94 			    FORCEWAKE_GSC,
95 			    FORCEWAKE_ACK_GSC);
96 }
97 
__domain_ctl(struct xe_gt * gt,struct xe_force_wake_domain * domain,bool wake)98 static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
99 {
100 	if (IS_SRIOV_VF(gt_to_xe(gt)))
101 		return;
102 
103 	xe_mmio_write32(gt, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
104 }
105 
__domain_wait(struct xe_gt * gt,struct xe_force_wake_domain * domain,bool wake)106 static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
107 {
108 	u32 value;
109 	int ret;
110 
111 	if (IS_SRIOV_VF(gt_to_xe(gt)))
112 		return 0;
113 
114 	ret = xe_mmio_wait32(gt, domain->reg_ack, domain->val, wake ? domain->val : 0,
115 			     XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
116 			     &value, true);
117 	if (ret)
118 		xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
119 			  domain->id, str_wake_sleep(wake), ERR_PTR(ret),
120 			  domain->reg_ack.addr, value);
121 	if (value == ~0) {
122 		xe_gt_err(gt,
123 			  "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
124 			  domain->id, str_wake_sleep(wake));
125 		ret = -EIO;
126 	}
127 
128 	return ret;
129 }
130 
domain_wake(struct xe_gt * gt,struct xe_force_wake_domain * domain)131 static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
132 {
133 	__domain_ctl(gt, domain, true);
134 }
135 
domain_wake_wait(struct xe_gt * gt,struct xe_force_wake_domain * domain)136 static int domain_wake_wait(struct xe_gt *gt,
137 			    struct xe_force_wake_domain *domain)
138 {
139 	return __domain_wait(gt, domain, true);
140 }
141 
domain_sleep(struct xe_gt * gt,struct xe_force_wake_domain * domain)142 static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
143 {
144 	__domain_ctl(gt, domain, false);
145 }
146 
domain_sleep_wait(struct xe_gt * gt,struct xe_force_wake_domain * domain)147 static int domain_sleep_wait(struct xe_gt *gt,
148 			     struct xe_force_wake_domain *domain)
149 {
150 	return __domain_wait(gt, domain, false);
151 }
152 
153 #define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
154 	for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
155 		for_each_if((domain__ = ((fw__)->domains + \
156 					 (ffs(tmp__) - 1))) && \
157 					 domain__->reg_ctl.addr)
158 
xe_force_wake_get(struct xe_force_wake * fw,enum xe_force_wake_domains domains)159 int xe_force_wake_get(struct xe_force_wake *fw,
160 		      enum xe_force_wake_domains domains)
161 {
162 	struct xe_gt *gt = fw->gt;
163 	struct xe_force_wake_domain *domain;
164 	enum xe_force_wake_domains tmp, woken = 0;
165 	unsigned long flags;
166 	int ret = 0;
167 
168 	spin_lock_irqsave(&fw->lock, flags);
169 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
170 		if (!domain->ref++) {
171 			woken |= BIT(domain->id);
172 			domain_wake(gt, domain);
173 		}
174 	}
175 	for_each_fw_domain_masked(domain, woken, fw, tmp) {
176 		ret |= domain_wake_wait(gt, domain);
177 	}
178 	fw->awake_domains |= woken;
179 	spin_unlock_irqrestore(&fw->lock, flags);
180 
181 	return ret;
182 }
183 
xe_force_wake_put(struct xe_force_wake * fw,enum xe_force_wake_domains domains)184 int xe_force_wake_put(struct xe_force_wake *fw,
185 		      enum xe_force_wake_domains domains)
186 {
187 	struct xe_gt *gt = fw->gt;
188 	struct xe_force_wake_domain *domain;
189 	enum xe_force_wake_domains tmp, sleep = 0;
190 	unsigned long flags;
191 	int ret = 0;
192 
193 	spin_lock_irqsave(&fw->lock, flags);
194 	for_each_fw_domain_masked(domain, domains, fw, tmp) {
195 		if (!--domain->ref) {
196 			sleep |= BIT(domain->id);
197 			domain_sleep(gt, domain);
198 		}
199 	}
200 	for_each_fw_domain_masked(domain, sleep, fw, tmp) {
201 		ret |= domain_sleep_wait(gt, domain);
202 	}
203 	fw->awake_domains &= ~sleep;
204 	spin_unlock_irqrestore(&fw->lock, flags);
205 
206 	return ret;
207 }
208