1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i9xx_wm.h"
8 #include "intel_display_types.h"
9 #include "intel_wm.h"
10 #include "skl_watermark.h"
11
12 /**
13 * intel_update_watermarks - update FIFO watermark values based on current modes
14 * @i915: i915 device
15 *
16 * Calculate watermark values for the various WM regs based on current mode
17 * and plane configuration.
18 *
19 * There are several cases to deal with here:
20 * - normal (i.e. non-self-refresh)
21 * - self-refresh (SR) mode
22 * - lines are large relative to FIFO size (buffer can hold up to 2)
23 * - lines are small relative to FIFO size (buffer can hold more than 2
24 * lines), so need to account for TLB latency
25 *
26 * The normal calculation is:
27 * watermark = dotclock * bytes per pixel * latency
28 * where latency is platform & configuration dependent (we assume pessimal
29 * values here).
30 *
31 * The SR calculation is:
32 * watermark = (trunc(latency/line time)+1) * surface width *
33 * bytes per pixel
34 * where
35 * line time = htotal / dotclock
36 * surface width = hdisplay for normal plane and 64 for cursor
37 * and latency is assumed to be high, as above.
38 *
39 * The final value programmed to the register should always be rounded up,
40 * and include an extra 2 entries to account for clock crossings.
41 *
42 * We don't use the sprite, so we can ignore that. And on Crestline we have
43 * to set the non-SR watermarks to 8.
44 */
intel_update_watermarks(struct drm_i915_private * i915)45 void intel_update_watermarks(struct drm_i915_private *i915)
46 {
47 if (i915->display.funcs.wm->update_wm)
48 i915->display.funcs.wm->update_wm(i915);
49 }
50
intel_compute_pipe_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)51 int intel_compute_pipe_wm(struct intel_atomic_state *state,
52 struct intel_crtc *crtc)
53 {
54 struct drm_i915_private *i915 = to_i915(state->base.dev);
55
56 if (i915->display.funcs.wm->compute_pipe_wm)
57 return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
58
59 return 0;
60 }
61
intel_compute_intermediate_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)62 int intel_compute_intermediate_wm(struct intel_atomic_state *state,
63 struct intel_crtc *crtc)
64 {
65 struct drm_i915_private *i915 = to_i915(state->base.dev);
66
67 if (!i915->display.funcs.wm->compute_intermediate_wm)
68 return 0;
69
70 if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
71 return 0;
72
73 return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
74 }
75
intel_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)76 bool intel_initial_watermarks(struct intel_atomic_state *state,
77 struct intel_crtc *crtc)
78 {
79 struct drm_i915_private *i915 = to_i915(state->base.dev);
80
81 if (i915->display.funcs.wm->initial_watermarks) {
82 i915->display.funcs.wm->initial_watermarks(state, crtc);
83 return true;
84 }
85
86 return false;
87 }
88
intel_atomic_update_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)89 void intel_atomic_update_watermarks(struct intel_atomic_state *state,
90 struct intel_crtc *crtc)
91 {
92 struct drm_i915_private *i915 = to_i915(state->base.dev);
93
94 if (i915->display.funcs.wm->atomic_update_watermarks)
95 i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
96 }
97
intel_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)98 void intel_optimize_watermarks(struct intel_atomic_state *state,
99 struct intel_crtc *crtc)
100 {
101 struct drm_i915_private *i915 = to_i915(state->base.dev);
102
103 if (i915->display.funcs.wm->optimize_watermarks)
104 i915->display.funcs.wm->optimize_watermarks(state, crtc);
105 }
106
intel_compute_global_watermarks(struct intel_atomic_state * state)107 int intel_compute_global_watermarks(struct intel_atomic_state *state)
108 {
109 struct drm_i915_private *i915 = to_i915(state->base.dev);
110
111 if (i915->display.funcs.wm->compute_global_watermarks)
112 return i915->display.funcs.wm->compute_global_watermarks(state);
113
114 return 0;
115 }
116
intel_wm_get_hw_state(struct drm_i915_private * i915)117 void intel_wm_get_hw_state(struct drm_i915_private *i915)
118 {
119 if (i915->display.funcs.wm->get_hw_state)
120 return i915->display.funcs.wm->get_hw_state(i915);
121 }
122
intel_wm_plane_visible(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)123 bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
124 const struct intel_plane_state *plane_state)
125 {
126 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
127
128 /* FIXME check the 'enable' instead */
129 if (!crtc_state->hw.active)
130 return false;
131
132 /*
133 * Treat cursor with fb as always visible since cursor updates
134 * can happen faster than the vrefresh rate, and the current
135 * watermark code doesn't handle that correctly. Cursor updates
136 * which set/clear the fb or change the cursor size are going
137 * to get throttled by intel_legacy_cursor_update() to work
138 * around this problem with the watermark code.
139 */
140 if (plane->id == PLANE_CURSOR)
141 return plane_state->hw.fb != NULL;
142 else
143 return plane_state->uapi.visible;
144 }
145
intel_print_wm_latency(struct drm_i915_private * dev_priv,const char * name,const u16 wm[])146 void intel_print_wm_latency(struct drm_i915_private *dev_priv,
147 const char *name, const u16 wm[])
148 {
149 int level;
150
151 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
152 unsigned int latency = wm[level];
153
154 if (latency == 0) {
155 drm_dbg_kms(&dev_priv->drm,
156 "%s WM%d latency not provided\n",
157 name, level);
158 continue;
159 }
160
161 /*
162 * - latencies are in us on gen9.
163 * - before then, WM1+ latency values are in 0.5us units
164 */
165 if (DISPLAY_VER(dev_priv) >= 9)
166 latency *= 10;
167 else if (level > 0)
168 latency *= 5;
169
170 drm_dbg_kms(&dev_priv->drm,
171 "%s WM%d latency %u (%u.%u usec)\n", name, level,
172 wm[level], latency / 10, latency % 10);
173 }
174 }
175
intel_wm_init(struct drm_i915_private * i915)176 void intel_wm_init(struct drm_i915_private *i915)
177 {
178 if (DISPLAY_VER(i915) >= 9)
179 skl_wm_init(i915);
180 else
181 i9xx_wm_init(i915);
182 }
183
wm_latency_show(struct seq_file * m,const u16 wm[8])184 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
185 {
186 struct drm_i915_private *dev_priv = m->private;
187 int level;
188
189 drm_modeset_lock_all(&dev_priv->drm);
190
191 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
192 unsigned int latency = wm[level];
193
194 /*
195 * - WM1+ latency values in 0.5us units
196 * - latencies are in us on gen9/vlv/chv
197 */
198 if (DISPLAY_VER(dev_priv) >= 9 ||
199 IS_VALLEYVIEW(dev_priv) ||
200 IS_CHERRYVIEW(dev_priv) ||
201 IS_G4X(dev_priv))
202 latency *= 10;
203 else if (level > 0)
204 latency *= 5;
205
206 seq_printf(m, "WM%d %u (%u.%u usec)\n",
207 level, wm[level], latency / 10, latency % 10);
208 }
209
210 drm_modeset_unlock_all(&dev_priv->drm);
211 }
212
pri_wm_latency_show(struct seq_file * m,void * data)213 static int pri_wm_latency_show(struct seq_file *m, void *data)
214 {
215 struct drm_i915_private *dev_priv = m->private;
216 const u16 *latencies;
217
218 if (DISPLAY_VER(dev_priv) >= 9)
219 latencies = dev_priv->display.wm.skl_latency;
220 else
221 latencies = dev_priv->display.wm.pri_latency;
222
223 wm_latency_show(m, latencies);
224
225 return 0;
226 }
227
spr_wm_latency_show(struct seq_file * m,void * data)228 static int spr_wm_latency_show(struct seq_file *m, void *data)
229 {
230 struct drm_i915_private *dev_priv = m->private;
231 const u16 *latencies;
232
233 if (DISPLAY_VER(dev_priv) >= 9)
234 latencies = dev_priv->display.wm.skl_latency;
235 else
236 latencies = dev_priv->display.wm.spr_latency;
237
238 wm_latency_show(m, latencies);
239
240 return 0;
241 }
242
cur_wm_latency_show(struct seq_file * m,void * data)243 static int cur_wm_latency_show(struct seq_file *m, void *data)
244 {
245 struct drm_i915_private *dev_priv = m->private;
246 const u16 *latencies;
247
248 if (DISPLAY_VER(dev_priv) >= 9)
249 latencies = dev_priv->display.wm.skl_latency;
250 else
251 latencies = dev_priv->display.wm.cur_latency;
252
253 wm_latency_show(m, latencies);
254
255 return 0;
256 }
257
258 #ifdef notyet
259
pri_wm_latency_open(struct inode * inode,struct file * file)260 static int pri_wm_latency_open(struct inode *inode, struct file *file)
261 {
262 struct drm_i915_private *dev_priv = inode->i_private;
263
264 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
265 return -ENODEV;
266
267 return single_open(file, pri_wm_latency_show, dev_priv);
268 }
269
spr_wm_latency_open(struct inode * inode,struct file * file)270 static int spr_wm_latency_open(struct inode *inode, struct file *file)
271 {
272 struct drm_i915_private *dev_priv = inode->i_private;
273
274 if (HAS_GMCH(dev_priv))
275 return -ENODEV;
276
277 return single_open(file, spr_wm_latency_show, dev_priv);
278 }
279
cur_wm_latency_open(struct inode * inode,struct file * file)280 static int cur_wm_latency_open(struct inode *inode, struct file *file)
281 {
282 struct drm_i915_private *dev_priv = inode->i_private;
283
284 if (HAS_GMCH(dev_priv))
285 return -ENODEV;
286
287 return single_open(file, cur_wm_latency_show, dev_priv);
288 }
289
wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp,u16 wm[8])290 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
291 size_t len, loff_t *offp, u16 wm[8])
292 {
293 struct seq_file *m = file->private_data;
294 struct drm_i915_private *dev_priv = m->private;
295 u16 new[8] = { 0 };
296 int level;
297 int ret;
298 char tmp[32];
299
300 if (len >= sizeof(tmp))
301 return -EINVAL;
302
303 if (copy_from_user(tmp, ubuf, len))
304 return -EFAULT;
305
306 tmp[len] = '\0';
307
308 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
309 &new[0], &new[1], &new[2], &new[3],
310 &new[4], &new[5], &new[6], &new[7]);
311 if (ret != dev_priv->display.wm.num_levels)
312 return -EINVAL;
313
314 drm_modeset_lock_all(&dev_priv->drm);
315
316 for (level = 0; level < dev_priv->display.wm.num_levels; level++)
317 wm[level] = new[level];
318
319 drm_modeset_unlock_all(&dev_priv->drm);
320
321 return len;
322 }
323
pri_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)324 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
325 size_t len, loff_t *offp)
326 {
327 struct seq_file *m = file->private_data;
328 struct drm_i915_private *dev_priv = m->private;
329 u16 *latencies;
330
331 if (DISPLAY_VER(dev_priv) >= 9)
332 latencies = dev_priv->display.wm.skl_latency;
333 else
334 latencies = dev_priv->display.wm.pri_latency;
335
336 return wm_latency_write(file, ubuf, len, offp, latencies);
337 }
338
spr_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)339 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
340 size_t len, loff_t *offp)
341 {
342 struct seq_file *m = file->private_data;
343 struct drm_i915_private *dev_priv = m->private;
344 u16 *latencies;
345
346 if (DISPLAY_VER(dev_priv) >= 9)
347 latencies = dev_priv->display.wm.skl_latency;
348 else
349 latencies = dev_priv->display.wm.spr_latency;
350
351 return wm_latency_write(file, ubuf, len, offp, latencies);
352 }
353
cur_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)354 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
355 size_t len, loff_t *offp)
356 {
357 struct seq_file *m = file->private_data;
358 struct drm_i915_private *dev_priv = m->private;
359 u16 *latencies;
360
361 if (DISPLAY_VER(dev_priv) >= 9)
362 latencies = dev_priv->display.wm.skl_latency;
363 else
364 latencies = dev_priv->display.wm.cur_latency;
365
366 return wm_latency_write(file, ubuf, len, offp, latencies);
367 }
368
369 static const struct file_operations i915_pri_wm_latency_fops = {
370 .owner = THIS_MODULE,
371 .open = pri_wm_latency_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = single_release,
375 .write = pri_wm_latency_write
376 };
377
378 static const struct file_operations i915_spr_wm_latency_fops = {
379 .owner = THIS_MODULE,
380 .open = spr_wm_latency_open,
381 .read = seq_read,
382 .llseek = seq_lseek,
383 .release = single_release,
384 .write = spr_wm_latency_write
385 };
386
387 static const struct file_operations i915_cur_wm_latency_fops = {
388 .owner = THIS_MODULE,
389 .open = cur_wm_latency_open,
390 .read = seq_read,
391 .llseek = seq_lseek,
392 .release = single_release,
393 .write = cur_wm_latency_write
394 };
395
396 #endif /* notyet */
397
intel_wm_debugfs_register(struct drm_i915_private * i915)398 void intel_wm_debugfs_register(struct drm_i915_private *i915)
399 {
400 struct drm_minor *minor = i915->drm.primary;
401
402 debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
403 i915, &i915_pri_wm_latency_fops);
404
405 debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
406 i915, &i915_spr_wm_latency_fops);
407
408 debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
409 i915, &i915_cur_wm_latency_fops);
410
411 skl_watermark_debugfs_register(i915);
412 }
413