xref: /linux/drivers/gpu/drm/gma500/gma_display.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright © 2006-2011 Intel Corporation
4  *
5  * Authors:
6  *	Eric Anholt <eric@anholt.net>
7  *	Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/highmem.h>
12 
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_framebuffer.h>
16 #include <drm/drm_vblank.h>
17 
18 #include "framebuffer.h"
19 #include "gem.h"
20 #include "gma_display.h"
21 #include "psb_irq.h"
22 #include "psb_intel_drv.h"
23 #include "psb_intel_reg.h"
24 
25 /*
26  * Returns whether any output on the specified pipe is of the specified type
27  */
28 bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
29 {
30 	struct drm_device *dev = crtc->dev;
31 	struct drm_connector_list_iter conn_iter;
32 	struct drm_connector *connector;
33 
34 	drm_connector_list_iter_begin(dev, &conn_iter);
35 	drm_for_each_connector_iter(connector, &conn_iter) {
36 		if (connector->encoder && connector->encoder->crtc == crtc) {
37 			struct gma_encoder *gma_encoder =
38 						gma_attached_encoder(connector);
39 			if (gma_encoder->type == type) {
40 				drm_connector_list_iter_end(&conn_iter);
41 				return true;
42 			}
43 		}
44 	}
45 	drm_connector_list_iter_end(&conn_iter);
46 
47 	return false;
48 }
49 
50 void gma_wait_for_vblank(struct drm_device *dev)
51 {
52 	/* Wait for 20ms, i.e. one cycle at 50hz. */
53 	mdelay(20);
54 }
55 
56 int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
57 		      struct drm_framebuffer *old_fb)
58 {
59 	struct drm_device *dev = crtc->dev;
60 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
61 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
62 	struct drm_framebuffer *fb = crtc->primary->fb;
63 	struct psb_gem_object *pobj;
64 	int pipe = gma_crtc->pipe;
65 	const struct psb_offset *map = &dev_priv->regmap[pipe];
66 	unsigned long start, offset;
67 	u32 dspcntr;
68 	int ret = 0;
69 
70 	if (!gma_power_begin(dev, true))
71 		return 0;
72 
73 	/* no fb bound */
74 	if (!fb) {
75 		dev_err(dev->dev, "No FB bound\n");
76 		goto gma_pipe_cleaner;
77 	}
78 
79 	pobj = to_psb_gem_object(fb->obj[0]);
80 
81 	/* We are displaying this buffer, make sure it is actually loaded
82 	   into the GTT */
83 	ret = psb_gem_pin(pobj);
84 	if (ret < 0)
85 		goto gma_pipe_set_base_exit;
86 	start = pobj->offset;
87 	offset = y * fb->pitches[0] + x * fb->format->cpp[0];
88 
89 	REG_WRITE(map->stride, fb->pitches[0]);
90 
91 	dspcntr = REG_READ(map->cntr);
92 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
93 
94 	switch (fb->format->cpp[0] * 8) {
95 	case 8:
96 		dspcntr |= DISPPLANE_8BPP;
97 		break;
98 	case 16:
99 		if (fb->format->depth == 15)
100 			dspcntr |= DISPPLANE_15_16BPP;
101 		else
102 			dspcntr |= DISPPLANE_16BPP;
103 		break;
104 	case 24:
105 	case 32:
106 		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
107 		break;
108 	default:
109 		dev_err(dev->dev, "Unknown color depth\n");
110 		ret = -EINVAL;
111 		goto gma_pipe_set_base_exit;
112 	}
113 	REG_WRITE(map->cntr, dspcntr);
114 
115 	dev_dbg(dev->dev,
116 		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
117 
118 	/* FIXME: Investigate whether this really is the base for psb and why
119 		  the linear offset is named base for the other chips. map->surf
120 		  should be the base and map->linoff the offset for all chips */
121 	if (IS_PSB(dev)) {
122 		REG_WRITE(map->base, offset + start);
123 		REG_READ(map->base);
124 	} else {
125 		REG_WRITE(map->base, offset);
126 		REG_READ(map->base);
127 		REG_WRITE(map->surf, start);
128 		REG_READ(map->surf);
129 	}
130 
131 gma_pipe_cleaner:
132 	/* If there was a previous display we can now unpin it */
133 	if (old_fb)
134 		psb_gem_unpin(to_psb_gem_object(old_fb->obj[0]));
135 
136 gma_pipe_set_base_exit:
137 	gma_power_end(dev);
138 	return ret;
139 }
140 
141 /* Loads the palette/gamma unit for the CRTC with the prepared values */
142 void gma_crtc_load_lut(struct drm_crtc *crtc)
143 {
144 	struct drm_device *dev = crtc->dev;
145 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
146 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
147 	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
148 	int palreg = map->palette;
149 	u16 *r, *g, *b;
150 	int i;
151 
152 	/* The clocks have to be on to load the palette. */
153 	if (!crtc->enabled)
154 		return;
155 
156 	r = crtc->gamma_store;
157 	g = r + crtc->gamma_size;
158 	b = g + crtc->gamma_size;
159 
160 	if (gma_power_begin(dev, false)) {
161 		for (i = 0; i < 256; i++) {
162 			REG_WRITE(palreg + 4 * i,
163 				  (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
164 				  (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
165 				  ((*b++ >> 8) + gma_crtc->lut_adj[i]));
166 		}
167 		gma_power_end(dev);
168 	} else {
169 		for (i = 0; i < 256; i++) {
170 			/* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
171 			dev_priv->regs.pipe[0].palette[i] =
172 				(((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
173 				(((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
174 				((*b++ >> 8) + gma_crtc->lut_adj[i]);
175 		}
176 
177 	}
178 }
179 
180 static int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
181 			      u16 *blue, u32 size,
182 			      struct drm_modeset_acquire_ctx *ctx)
183 {
184 	gma_crtc_load_lut(crtc);
185 
186 	return 0;
187 }
188 
189 /*
190  * Sets the power management mode of the pipe and plane.
191  *
192  * This code should probably grow support for turning the cursor off and back
193  * on appropriately at the same time as we're turning the pipe off/on.
194  */
195 void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
196 {
197 	struct drm_device *dev = crtc->dev;
198 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
199 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
200 	int pipe = gma_crtc->pipe;
201 	const struct psb_offset *map = &dev_priv->regmap[pipe];
202 	u32 temp;
203 
204 	/* XXX: When our outputs are all unaware of DPMS modes other than off
205 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
206 	 */
207 
208 	if (IS_CDV(dev))
209 		dev_priv->ops->disable_sr(dev);
210 
211 	switch (mode) {
212 	case DRM_MODE_DPMS_ON:
213 	case DRM_MODE_DPMS_STANDBY:
214 	case DRM_MODE_DPMS_SUSPEND:
215 		if (gma_crtc->active)
216 			break;
217 
218 		gma_crtc->active = true;
219 
220 		/* Enable the DPLL */
221 		temp = REG_READ(map->dpll);
222 		if ((temp & DPLL_VCO_ENABLE) == 0) {
223 			REG_WRITE(map->dpll, temp);
224 			REG_READ(map->dpll);
225 			/* Wait for the clocks to stabilize. */
226 			udelay(150);
227 			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
228 			REG_READ(map->dpll);
229 			/* Wait for the clocks to stabilize. */
230 			udelay(150);
231 			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
232 			REG_READ(map->dpll);
233 			/* Wait for the clocks to stabilize. */
234 			udelay(150);
235 		}
236 
237 		/* Enable the plane */
238 		temp = REG_READ(map->cntr);
239 		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
240 			REG_WRITE(map->cntr,
241 				  temp | DISPLAY_PLANE_ENABLE);
242 			/* Flush the plane changes */
243 			REG_WRITE(map->base, REG_READ(map->base));
244 		}
245 
246 		udelay(150);
247 
248 		/* Enable the pipe */
249 		temp = REG_READ(map->conf);
250 		if ((temp & PIPEACONF_ENABLE) == 0)
251 			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
252 
253 		temp = REG_READ(map->status);
254 		temp &= ~(0xFFFF);
255 		temp |= PIPE_FIFO_UNDERRUN;
256 		REG_WRITE(map->status, temp);
257 		REG_READ(map->status);
258 
259 		gma_crtc_load_lut(crtc);
260 
261 		/* Give the overlay scaler a chance to enable
262 		 * if it's on this pipe */
263 		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
264 
265 		drm_crtc_vblank_on(crtc);
266 		break;
267 	case DRM_MODE_DPMS_OFF:
268 		if (!gma_crtc->active)
269 			break;
270 
271 		gma_crtc->active = false;
272 
273 		/* Give the overlay scaler a chance to disable
274 		 * if it's on this pipe */
275 		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
276 
277 		/* Disable the VGA plane that we never use */
278 		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
279 
280 		/* Turn off vblank interrupts */
281 		drm_crtc_vblank_off(crtc);
282 
283 		/* Wait for vblank for the disable to take effect */
284 		gma_wait_for_vblank(dev);
285 
286 		/* Disable plane */
287 		temp = REG_READ(map->cntr);
288 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
289 			REG_WRITE(map->cntr,
290 				  temp & ~DISPLAY_PLANE_ENABLE);
291 			/* Flush the plane changes */
292 			REG_WRITE(map->base, REG_READ(map->base));
293 			REG_READ(map->base);
294 		}
295 
296 		/* Disable pipe */
297 		temp = REG_READ(map->conf);
298 		if ((temp & PIPEACONF_ENABLE) != 0) {
299 			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
300 			REG_READ(map->conf);
301 		}
302 
303 		/* Wait for vblank for the disable to take effect. */
304 		gma_wait_for_vblank(dev);
305 
306 		udelay(150);
307 
308 		/* Disable DPLL */
309 		temp = REG_READ(map->dpll);
310 		if ((temp & DPLL_VCO_ENABLE) != 0) {
311 			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
312 			REG_READ(map->dpll);
313 		}
314 
315 		/* Wait for the clocks to turn off. */
316 		udelay(150);
317 		break;
318 	}
319 
320 	if (IS_CDV(dev))
321 		dev_priv->ops->update_wm(dev, crtc);
322 
323 	/* Set FIFO watermarks */
324 	REG_WRITE(DSPARB, 0x3F3E);
325 }
326 
327 static int gma_crtc_cursor_set(struct drm_crtc *crtc,
328 			       struct drm_file *file_priv, uint32_t handle,
329 			       uint32_t width, uint32_t height)
330 {
331 	struct drm_device *dev = crtc->dev;
332 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
333 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
334 	int pipe = gma_crtc->pipe;
335 	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
336 	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
337 	uint32_t temp;
338 	size_t addr = 0;
339 	struct psb_gem_object *pobj;
340 	struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj;
341 	struct drm_gem_object *obj;
342 	void *tmp_dst;
343 	int ret = 0, i, cursor_pages;
344 
345 	/* If we didn't get a handle then turn the cursor off */
346 	if (!handle) {
347 		temp = CURSOR_MODE_DISABLE;
348 		if (gma_power_begin(dev, false)) {
349 			REG_WRITE(control, temp);
350 			REG_WRITE(base, 0);
351 			gma_power_end(dev);
352 		}
353 
354 		/* Unpin the old GEM object */
355 		if (gma_crtc->cursor_obj) {
356 			pobj = to_psb_gem_object(gma_crtc->cursor_obj);
357 			psb_gem_unpin(pobj);
358 			drm_gem_object_put(gma_crtc->cursor_obj);
359 			gma_crtc->cursor_obj = NULL;
360 		}
361 		return 0;
362 	}
363 
364 	/* Currently we only support 64x64 cursors */
365 	if (width != 64 || height != 64) {
366 		dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
367 		return -EINVAL;
368 	}
369 
370 	obj = drm_gem_object_lookup(file_priv, handle);
371 	if (!obj) {
372 		ret = -ENOENT;
373 		goto unlock;
374 	}
375 
376 	if (obj->size < width * height * 4) {
377 		dev_dbg(dev->dev, "Buffer is too small\n");
378 		ret = -ENOMEM;
379 		goto unref_cursor;
380 	}
381 
382 	pobj = to_psb_gem_object(obj);
383 
384 	/* Pin the memory into the GTT */
385 	ret = psb_gem_pin(pobj);
386 	if (ret) {
387 		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
388 		goto unref_cursor;
389 	}
390 
391 	if (dev_priv->ops->cursor_needs_phys) {
392 		if (!cursor_pobj) {
393 			dev_err(dev->dev, "No hardware cursor mem available");
394 			ret = -ENOMEM;
395 			goto unref_cursor;
396 		}
397 
398 		cursor_pages = obj->size / PAGE_SIZE;
399 		if (cursor_pages > 4)
400 			cursor_pages = 4; /* Prevent overflow */
401 
402 		/* Copy the cursor to cursor mem */
403 		tmp_dst = dev_priv->vram_addr + cursor_pobj->offset;
404 		for (i = 0; i < cursor_pages; i++) {
405 			memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE);
406 			tmp_dst += PAGE_SIZE;
407 		}
408 
409 		addr = gma_crtc->cursor_addr;
410 	} else {
411 		addr = pobj->offset;
412 		gma_crtc->cursor_addr = addr;
413 	}
414 
415 	temp = 0;
416 	/* set the pipe for the cursor */
417 	temp |= (pipe << 28);
418 	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
419 
420 	if (gma_power_begin(dev, false)) {
421 		REG_WRITE(control, temp);
422 		REG_WRITE(base, addr);
423 		gma_power_end(dev);
424 	}
425 
426 	/* unpin the old bo */
427 	if (gma_crtc->cursor_obj) {
428 		pobj = to_psb_gem_object(gma_crtc->cursor_obj);
429 		psb_gem_unpin(pobj);
430 		drm_gem_object_put(gma_crtc->cursor_obj);
431 	}
432 
433 	gma_crtc->cursor_obj = obj;
434 unlock:
435 	return ret;
436 
437 unref_cursor:
438 	drm_gem_object_put(obj);
439 	return ret;
440 }
441 
442 static int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
443 {
444 	struct drm_device *dev = crtc->dev;
445 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
446 	int pipe = gma_crtc->pipe;
447 	uint32_t temp = 0;
448 	uint32_t addr;
449 
450 	if (x < 0) {
451 		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
452 		x = -x;
453 	}
454 	if (y < 0) {
455 		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
456 		y = -y;
457 	}
458 
459 	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
460 	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
461 
462 	addr = gma_crtc->cursor_addr;
463 
464 	if (gma_power_begin(dev, false)) {
465 		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
466 		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
467 		gma_power_end(dev);
468 	}
469 	return 0;
470 }
471 
472 void gma_crtc_prepare(struct drm_crtc *crtc)
473 {
474 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
475 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
476 }
477 
478 void gma_crtc_commit(struct drm_crtc *crtc)
479 {
480 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
481 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
482 }
483 
484 void gma_crtc_disable(struct drm_crtc *crtc)
485 {
486 	struct psb_gem_object *pobj;
487 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
488 
489 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
490 
491 	if (crtc->primary->fb) {
492 		pobj = to_psb_gem_object(crtc->primary->fb->obj[0]);
493 		psb_gem_unpin(pobj);
494 	}
495 }
496 
497 void gma_crtc_destroy(struct drm_crtc *crtc)
498 {
499 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
500 
501 	if (gma_crtc->cursor_pobj)
502 		drm_gem_object_put(&gma_crtc->cursor_pobj->base);
503 
504 	kfree(gma_crtc->crtc_state);
505 	drm_crtc_cleanup(crtc);
506 	kfree(gma_crtc);
507 }
508 
509 int gma_crtc_page_flip(struct drm_crtc *crtc,
510 		       struct drm_framebuffer *fb,
511 		       struct drm_pending_vblank_event *event,
512 		       uint32_t page_flip_flags,
513 		       struct drm_modeset_acquire_ctx *ctx)
514 {
515 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
516 	struct drm_framebuffer *current_fb = crtc->primary->fb;
517 	struct drm_framebuffer *old_fb = crtc->primary->old_fb;
518 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
519 	struct drm_device *dev = crtc->dev;
520 	unsigned long flags;
521 	int ret;
522 
523 	if (!crtc_funcs->mode_set_base)
524 		return -EINVAL;
525 
526 	/* Using mode_set_base requires the new fb to be set already. */
527 	crtc->primary->fb = fb;
528 
529 	if (event) {
530 		spin_lock_irqsave(&dev->event_lock, flags);
531 
532 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
533 
534 		gma_crtc->page_flip_event = event;
535 		spin_unlock_irqrestore(&dev->event_lock, flags);
536 
537 		/* Call this locked if we want an event at vblank interrupt. */
538 		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
539 		if (ret) {
540 			spin_lock_irqsave(&dev->event_lock, flags);
541 			if (gma_crtc->page_flip_event) {
542 				gma_crtc->page_flip_event = NULL;
543 				drm_crtc_vblank_put(crtc);
544 			}
545 			spin_unlock_irqrestore(&dev->event_lock, flags);
546 		}
547 	} else {
548 		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
549 	}
550 
551 	/* Restore previous fb in case of failure. */
552 	if (ret)
553 		crtc->primary->fb = current_fb;
554 
555 	return ret;
556 }
557 
558 const struct drm_crtc_funcs gma_crtc_funcs = {
559 	.cursor_set = gma_crtc_cursor_set,
560 	.cursor_move = gma_crtc_cursor_move,
561 	.gamma_set = gma_crtc_gamma_set,
562 	.set_config = drm_crtc_helper_set_config,
563 	.destroy = gma_crtc_destroy,
564 	.page_flip = gma_crtc_page_flip,
565 	.enable_vblank = gma_crtc_enable_vblank,
566 	.disable_vblank = gma_crtc_disable_vblank,
567 	.get_vblank_counter = gma_crtc_get_vblank_counter,
568 };
569 
570 /*
571  * Save HW states of given crtc
572  */
573 void gma_crtc_save(struct drm_crtc *crtc)
574 {
575 	struct drm_device *dev = crtc->dev;
576 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
577 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
578 	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
579 	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
580 	uint32_t palette_reg;
581 	int i;
582 
583 	if (!crtc_state) {
584 		dev_err(dev->dev, "No CRTC state found\n");
585 		return;
586 	}
587 
588 	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
589 	crtc_state->savePIPECONF = REG_READ(map->conf);
590 	crtc_state->savePIPESRC = REG_READ(map->src);
591 	crtc_state->saveFP0 = REG_READ(map->fp0);
592 	crtc_state->saveFP1 = REG_READ(map->fp1);
593 	crtc_state->saveDPLL = REG_READ(map->dpll);
594 	crtc_state->saveHTOTAL = REG_READ(map->htotal);
595 	crtc_state->saveHBLANK = REG_READ(map->hblank);
596 	crtc_state->saveHSYNC = REG_READ(map->hsync);
597 	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
598 	crtc_state->saveVBLANK = REG_READ(map->vblank);
599 	crtc_state->saveVSYNC = REG_READ(map->vsync);
600 	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
601 
602 	/* NOTE: DSPSIZE DSPPOS only for psb */
603 	crtc_state->saveDSPSIZE = REG_READ(map->size);
604 	crtc_state->saveDSPPOS = REG_READ(map->pos);
605 
606 	crtc_state->saveDSPBASE = REG_READ(map->base);
607 
608 	palette_reg = map->palette;
609 	for (i = 0; i < 256; ++i)
610 		crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
611 }
612 
613 /*
614  * Restore HW states of given crtc
615  */
616 void gma_crtc_restore(struct drm_crtc *crtc)
617 {
618 	struct drm_device *dev = crtc->dev;
619 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
620 	struct gma_crtc *gma_crtc =  to_gma_crtc(crtc);
621 	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
622 	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
623 	uint32_t palette_reg;
624 	int i;
625 
626 	if (!crtc_state) {
627 		dev_err(dev->dev, "No crtc state\n");
628 		return;
629 	}
630 
631 	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
632 		REG_WRITE(map->dpll,
633 			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
634 		REG_READ(map->dpll);
635 		udelay(150);
636 	}
637 
638 	REG_WRITE(map->fp0, crtc_state->saveFP0);
639 	REG_READ(map->fp0);
640 
641 	REG_WRITE(map->fp1, crtc_state->saveFP1);
642 	REG_READ(map->fp1);
643 
644 	REG_WRITE(map->dpll, crtc_state->saveDPLL);
645 	REG_READ(map->dpll);
646 	udelay(150);
647 
648 	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
649 	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
650 	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
651 	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
652 	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
653 	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
654 	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
655 
656 	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
657 	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
658 
659 	REG_WRITE(map->src, crtc_state->savePIPESRC);
660 	REG_WRITE(map->base, crtc_state->saveDSPBASE);
661 	REG_WRITE(map->conf, crtc_state->savePIPECONF);
662 
663 	gma_wait_for_vblank(dev);
664 
665 	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
666 	REG_WRITE(map->base, crtc_state->saveDSPBASE);
667 
668 	gma_wait_for_vblank(dev);
669 
670 	palette_reg = map->palette;
671 	for (i = 0; i < 256; ++i)
672 		REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
673 }
674 
675 void gma_encoder_prepare(struct drm_encoder *encoder)
676 {
677 	const struct drm_encoder_helper_funcs *encoder_funcs =
678 	    encoder->helper_private;
679 	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
680 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
681 }
682 
683 void gma_encoder_commit(struct drm_encoder *encoder)
684 {
685 	const struct drm_encoder_helper_funcs *encoder_funcs =
686 	    encoder->helper_private;
687 	/* lvds has its own version of commit see psb_intel_lvds_commit */
688 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
689 }
690 
691 void gma_encoder_destroy(struct drm_encoder *encoder)
692 {
693 	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
694 
695 	drm_encoder_cleanup(encoder);
696 	kfree(intel_encoder);
697 }
698 
699 /* Currently there is only a 1:1 mapping of encoders and connectors */
700 struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
701 {
702 	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
703 
704 	return &gma_encoder->base;
705 }
706 
707 void gma_connector_attach_encoder(struct gma_connector *connector,
708 				  struct gma_encoder *encoder)
709 {
710 	connector->encoder = encoder;
711 	drm_connector_attach_encoder(&connector->base,
712 					  &encoder->base);
713 }
714 
715 #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
716 
717 bool gma_pll_is_valid(struct drm_crtc *crtc,
718 		      const struct gma_limit_t *limit,
719 		      struct gma_clock_t *clock)
720 {
721 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
722 		GMA_PLL_INVALID("p1 out of range");
723 	if (clock->p < limit->p.min || limit->p.max < clock->p)
724 		GMA_PLL_INVALID("p out of range");
725 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
726 		GMA_PLL_INVALID("m2 out of range");
727 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
728 		GMA_PLL_INVALID("m1 out of range");
729 	/* On CDV m1 is always 0 */
730 	if (clock->m1 <= clock->m2 && clock->m1 != 0)
731 		GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
732 	if (clock->m < limit->m.min || limit->m.max < clock->m)
733 		GMA_PLL_INVALID("m out of range");
734 	if (clock->n < limit->n.min || limit->n.max < clock->n)
735 		GMA_PLL_INVALID("n out of range");
736 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
737 		GMA_PLL_INVALID("vco out of range");
738 	/* XXX: We may need to be checking "Dot clock"
739 	 * depending on the multiplier, connector, etc.,
740 	 * rather than just a single range.
741 	 */
742 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
743 		GMA_PLL_INVALID("dot out of range");
744 
745 	return true;
746 }
747 
748 bool gma_find_best_pll(const struct gma_limit_t *limit,
749 		       struct drm_crtc *crtc, int target, int refclk,
750 		       struct gma_clock_t *best_clock)
751 {
752 	struct drm_device *dev = crtc->dev;
753 	const struct gma_clock_funcs *clock_funcs =
754 						to_gma_crtc(crtc)->clock_funcs;
755 	struct gma_clock_t clock;
756 	int err = target;
757 
758 	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
759 	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
760 		/*
761 		 * For LVDS, if the panel is on, just rely on its current
762 		 * settings for dual-channel.  We haven't figured out how to
763 		 * reliably set up different single/dual channel state, if we
764 		 * even can.
765 		 */
766 		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
767 		    LVDS_CLKB_POWER_UP)
768 			clock.p2 = limit->p2.p2_fast;
769 		else
770 			clock.p2 = limit->p2.p2_slow;
771 	} else {
772 		if (target < limit->p2.dot_limit)
773 			clock.p2 = limit->p2.p2_slow;
774 		else
775 			clock.p2 = limit->p2.p2_fast;
776 	}
777 
778 	memset(best_clock, 0, sizeof(*best_clock));
779 
780 	/* m1 is always 0 on CDV so the outmost loop will run just once */
781 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
782 		for (clock.m2 = limit->m2.min;
783 		     (clock.m2 < clock.m1 || clock.m1 == 0) &&
784 		      clock.m2 <= limit->m2.max; clock.m2++) {
785 			for (clock.n = limit->n.min;
786 			     clock.n <= limit->n.max; clock.n++) {
787 				for (clock.p1 = limit->p1.min;
788 				     clock.p1 <= limit->p1.max;
789 				     clock.p1++) {
790 					int this_err;
791 
792 					clock_funcs->clock(refclk, &clock);
793 
794 					if (!clock_funcs->pll_is_valid(crtc,
795 								limit, &clock))
796 						continue;
797 
798 					this_err = abs(clock.dot - target);
799 					if (this_err < err) {
800 						*best_clock = clock;
801 						err = this_err;
802 					}
803 				}
804 			}
805 		}
806 	}
807 
808 	return err != target;
809 }
810