xref: /linux/drivers/gpu/drm/gma500/framebuffer.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3  * Copyright (c) 2007-2011, Intel Corporation.
4  * All Rights Reserved.
5  *
6  **************************************************************************/
7 
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/pfn_t.h>
13 #include <linux/mm.h>
14 #include <linux/tty.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/console.h>
19 
20 #include <drm/drmP.h>
21 #include <drm/drm.h>
22 #include <drm/drm_crtc.h>
23 #include <drm/drm_fb_helper.h>
24 #include <drm/drm_gem_framebuffer_helper.h>
25 
26 #include "psb_drv.h"
27 #include "psb_intel_reg.h"
28 #include "psb_intel_drv.h"
29 #include "framebuffer.h"
30 #include "gtt.h"
31 
32 static const struct drm_framebuffer_funcs psb_fb_funcs = {
33 	.destroy = drm_gem_fb_destroy,
34 	.create_handle = drm_gem_fb_create_handle,
35 };
36 
37 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
38 
39 static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
40 			   unsigned blue, unsigned transp,
41 			   struct fb_info *info)
42 {
43 	struct psb_fbdev *fbdev = info->par;
44 	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
45 	uint32_t v;
46 
47 	if (!fb)
48 		return -ENOMEM;
49 
50 	if (regno > 255)
51 		return 1;
52 
53 	red = CMAP_TOHW(red, info->var.red.length);
54 	blue = CMAP_TOHW(blue, info->var.blue.length);
55 	green = CMAP_TOHW(green, info->var.green.length);
56 	transp = CMAP_TOHW(transp, info->var.transp.length);
57 
58 	v = (red << info->var.red.offset) |
59 	    (green << info->var.green.offset) |
60 	    (blue << info->var.blue.offset) |
61 	    (transp << info->var.transp.offset);
62 
63 	if (regno < 16) {
64 		switch (fb->format->cpp[0] * 8) {
65 		case 16:
66 			((uint32_t *) info->pseudo_palette)[regno] = v;
67 			break;
68 		case 24:
69 		case 32:
70 			((uint32_t *) info->pseudo_palette)[regno] = v;
71 			break;
72 		}
73 	}
74 
75 	return 0;
76 }
77 
78 static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
79 {
80 	struct psb_fbdev *fbdev = info->par;
81 	struct psb_framebuffer *psbfb = &fbdev->pfb;
82 	struct drm_device *dev = psbfb->base.dev;
83 	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
84 
85 	/*
86 	 *	We have to poke our nose in here. The core fb code assumes
87 	 *	panning is part of the hardware that can be invoked before
88 	 *	the actual fb is mapped. In our case that isn't quite true.
89 	 */
90 	if (gtt->npage) {
91 		/* GTT roll shifts in 4K pages, we need to shift the right
92 		   number of pages */
93 		int pages = info->fix.line_length >> 12;
94 		psb_gtt_roll(dev, gtt, var->yoffset * pages);
95 	}
96         return 0;
97 }
98 
99 static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100 {
101 	struct vm_area_struct *vma = vmf->vma;
102 	struct psb_framebuffer *psbfb = vma->vm_private_data;
103 	struct drm_device *dev = psbfb->base.dev;
104 	struct drm_psb_private *dev_priv = dev->dev_private;
105 	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
106 	int page_num;
107 	int i;
108 	unsigned long address;
109 	vm_fault_t ret = VM_FAULT_SIGBUS;
110 	unsigned long pfn;
111 	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112 				  gtt->offset;
113 
114 	page_num = vma_pages(vma);
115 	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116 
117 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118 
119 	for (i = 0; i < page_num; i++) {
120 		pfn = (phys_addr >> PAGE_SHIFT);
121 
122 		ret = vmf_insert_mixed(vma, address,
123 				__pfn_to_pfn_t(pfn, PFN_DEV));
124 		if (unlikely(ret & VM_FAULT_ERROR))
125 			break;
126 		address += PAGE_SIZE;
127 		phys_addr += PAGE_SIZE;
128 	}
129 	return ret;
130 }
131 
132 static void psbfb_vm_open(struct vm_area_struct *vma)
133 {
134 }
135 
136 static void psbfb_vm_close(struct vm_area_struct *vma)
137 {
138 }
139 
140 static const struct vm_operations_struct psbfb_vm_ops = {
141 	.fault	= psbfb_vm_fault,
142 	.open	= psbfb_vm_open,
143 	.close	= psbfb_vm_close
144 };
145 
146 static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147 {
148 	struct psb_fbdev *fbdev = info->par;
149 	struct psb_framebuffer *psbfb = &fbdev->pfb;
150 
151 	if (vma->vm_pgoff != 0)
152 		return -EINVAL;
153 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154 		return -EINVAL;
155 
156 	if (!psbfb->addr_space)
157 		psbfb->addr_space = vma->vm_file->f_mapping;
158 	/*
159 	 * If this is a GEM object then info->screen_base is the virtual
160 	 * kernel remapping of the object. FIXME: Review if this is
161 	 * suitable for our mmap work
162 	 */
163 	vma->vm_ops = &psbfb_vm_ops;
164 	vma->vm_private_data = (void *)psbfb;
165 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
166 	return 0;
167 }
168 
169 static struct fb_ops psbfb_ops = {
170 	.owner = THIS_MODULE,
171 	DRM_FB_HELPER_DEFAULT_OPS,
172 	.fb_setcolreg = psbfb_setcolreg,
173 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
174 	.fb_copyarea = psbfb_copyarea,
175 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
176 	.fb_mmap = psbfb_mmap,
177 	.fb_sync = psbfb_sync,
178 };
179 
180 static struct fb_ops psbfb_roll_ops = {
181 	.owner = THIS_MODULE,
182 	DRM_FB_HELPER_DEFAULT_OPS,
183 	.fb_setcolreg = psbfb_setcolreg,
184 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
185 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
186 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
187 	.fb_pan_display = psbfb_pan,
188 	.fb_mmap = psbfb_mmap,
189 };
190 
191 static struct fb_ops psbfb_unaccel_ops = {
192 	.owner = THIS_MODULE,
193 	DRM_FB_HELPER_DEFAULT_OPS,
194 	.fb_setcolreg = psbfb_setcolreg,
195 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
196 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
197 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
198 	.fb_mmap = psbfb_mmap,
199 };
200 
201 /**
202  *	psb_framebuffer_init	-	initialize a framebuffer
203  *	@dev: our DRM device
204  *	@fb: framebuffer to set up
205  *	@mode_cmd: mode description
206  *	@gt: backing object
207  *
208  *	Configure and fill in the boilerplate for our frame buffer. Return
209  *	0 on success or an error code if we fail.
210  */
211 static int psb_framebuffer_init(struct drm_device *dev,
212 					struct psb_framebuffer *fb,
213 					const struct drm_mode_fb_cmd2 *mode_cmd,
214 					struct gtt_range *gt)
215 {
216 	const struct drm_format_info *info;
217 	int ret;
218 
219 	/*
220 	 * Reject unknown formats, YUV formats, and formats with more than
221 	 * 4 bytes per pixel.
222 	 */
223 	info = drm_format_info(mode_cmd->pixel_format);
224 	if (!info || !info->depth || info->cpp[0] > 4)
225 		return -EINVAL;
226 
227 	if (mode_cmd->pitches[0] & 63)
228 		return -EINVAL;
229 
230 	drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
231 	fb->base.obj[0] = &gt->gem;
232 	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
233 	if (ret) {
234 		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
235 		return ret;
236 	}
237 	return 0;
238 }
239 
240 /**
241  *	psb_framebuffer_create	-	create a framebuffer backed by gt
242  *	@dev: our DRM device
243  *	@mode_cmd: the description of the requested mode
244  *	@gt: the backing object
245  *
246  *	Create a framebuffer object backed by the gt, and fill in the
247  *	boilerplate required
248  *
249  *	TODO: review object references
250  */
251 
252 static struct drm_framebuffer *psb_framebuffer_create
253 			(struct drm_device *dev,
254 			 const struct drm_mode_fb_cmd2 *mode_cmd,
255 			 struct gtt_range *gt)
256 {
257 	struct psb_framebuffer *fb;
258 	int ret;
259 
260 	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
261 	if (!fb)
262 		return ERR_PTR(-ENOMEM);
263 
264 	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
265 	if (ret) {
266 		kfree(fb);
267 		return ERR_PTR(ret);
268 	}
269 	return &fb->base;
270 }
271 
272 /**
273  *	psbfb_alloc		-	allocate frame buffer memory
274  *	@dev: the DRM device
275  *	@aligned_size: space needed
276  *
277  *	Allocate the frame buffer. In the usual case we get a GTT range that
278  *	is stolen memory backed and life is simple. If there isn't sufficient
279  *	we fail as we don't have the virtual mapping space to really vmap it
280  *	and the kernel console code can't handle non linear framebuffers.
281  *
282  *	Re-address this as and if the framebuffer layer grows this ability.
283  */
284 static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
285 {
286 	struct gtt_range *backing;
287 	/* Begin by trying to use stolen memory backing */
288 	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
289 	if (backing) {
290 		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
291 		return backing;
292 	}
293 	return NULL;
294 }
295 
296 /**
297  *	psbfb_create		-	create a framebuffer
298  *	@fbdev: the framebuffer device
299  *	@sizes: specification of the layout
300  *
301  *	Create a framebuffer to the specifications provided
302  */
303 static int psbfb_create(struct psb_fbdev *fbdev,
304 				struct drm_fb_helper_surface_size *sizes)
305 {
306 	struct drm_device *dev = fbdev->psb_fb_helper.dev;
307 	struct drm_psb_private *dev_priv = dev->dev_private;
308 	struct fb_info *info;
309 	struct drm_framebuffer *fb;
310 	struct psb_framebuffer *psbfb = &fbdev->pfb;
311 	struct drm_mode_fb_cmd2 mode_cmd;
312 	int size;
313 	int ret;
314 	struct gtt_range *backing;
315 	u32 bpp, depth;
316 	int gtt_roll = 0;
317 	int pitch_lines = 0;
318 
319 	mode_cmd.width = sizes->surface_width;
320 	mode_cmd.height = sizes->surface_height;
321 	bpp = sizes->surface_bpp;
322 	depth = sizes->surface_depth;
323 
324 	/* No 24bit packed */
325 	if (bpp == 24)
326 		bpp = 32;
327 
328 	do {
329 		/*
330 		 * Acceleration via the GTT requires pitch to be
331 		 * power of two aligned. Preferably page but less
332 		 * is ok with some fonts
333 		 */
334         	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
335 
336         	size = mode_cmd.pitches[0] * mode_cmd.height;
337         	size = ALIGN(size, PAGE_SIZE);
338 
339 		/* Allocate the fb in the GTT with stolen page backing */
340 		backing = psbfb_alloc(dev, size);
341 
342 		if (pitch_lines)
343 			pitch_lines *= 2;
344 		else
345 			pitch_lines = 1;
346 		gtt_roll++;
347 	} while (backing == NULL && pitch_lines <= 16);
348 
349 	/* The final pitch we accepted if we succeeded */
350 	pitch_lines /= 2;
351 
352 	if (backing == NULL) {
353 		/*
354 		 *	We couldn't get the space we wanted, fall back to the
355 		 *	display engine requirement instead.  The HW requires
356 		 *	the pitch to be 64 byte aligned
357 		 */
358 
359 		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
360 		pitch_lines = 64;
361 
362 		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
363 
364 		size = mode_cmd.pitches[0] * mode_cmd.height;
365 		size = ALIGN(size, PAGE_SIZE);
366 
367 		/* Allocate the framebuffer in the GTT with stolen page backing */
368 		backing = psbfb_alloc(dev, size);
369 		if (backing == NULL)
370 			return -ENOMEM;
371 	}
372 
373 	memset(dev_priv->vram_addr + backing->offset, 0, size);
374 
375 	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
376 	if (IS_ERR(info)) {
377 		ret = PTR_ERR(info);
378 		goto out;
379 	}
380 
381 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
382 
383 	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
384 	if (ret)
385 		goto out;
386 
387 	fb = &psbfb->base;
388 	psbfb->fbdev = info;
389 
390 	fbdev->psb_fb_helper.fb = fb;
391 
392 	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
393 		info->fbops = &psbfb_ops;
394 	else if (gtt_roll) {	/* GTT rolling seems best */
395 		info->fbops = &psbfb_roll_ops;
396 		info->flags |= FBINFO_HWACCEL_YPAN;
397 	} else	/* Software */
398 		info->fbops = &psbfb_unaccel_ops;
399 
400 	info->fix.smem_start = dev->mode_config.fb_base;
401 	info->fix.smem_len = size;
402 	info->fix.ywrapstep = gtt_roll;
403 	info->fix.ypanstep = 0;
404 
405 	/* Accessed stolen memory directly */
406 	info->screen_base = dev_priv->vram_addr + backing->offset;
407 	info->screen_size = size;
408 
409 	if (dev_priv->gtt.stolen_size) {
410 		info->apertures->ranges[0].base = dev->mode_config.fb_base;
411 		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
412 	}
413 
414 	drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
415 
416 	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
417 	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
418 
419 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
420 
421 	dev_dbg(dev->dev, "allocated %dx%d fb\n",
422 					psbfb->base.width, psbfb->base.height);
423 
424 	return 0;
425 out:
426 	psb_gtt_free_range(dev, backing);
427 	return ret;
428 }
429 
430 /**
431  *	psb_user_framebuffer_create	-	create framebuffer
432  *	@dev: our DRM device
433  *	@filp: client file
434  *	@cmd: mode request
435  *
436  *	Create a new framebuffer backed by a userspace GEM object
437  */
438 static struct drm_framebuffer *psb_user_framebuffer_create
439 			(struct drm_device *dev, struct drm_file *filp,
440 			 const struct drm_mode_fb_cmd2 *cmd)
441 {
442 	struct gtt_range *r;
443 	struct drm_gem_object *obj;
444 
445 	/*
446 	 *	Find the GEM object and thus the gtt range object that is
447 	 *	to back this space
448 	 */
449 	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
450 	if (obj == NULL)
451 		return ERR_PTR(-ENOENT);
452 
453 	/* Let the core code do all the work */
454 	r = container_of(obj, struct gtt_range, gem);
455 	return psb_framebuffer_create(dev, cmd, r);
456 }
457 
458 static int psbfb_probe(struct drm_fb_helper *helper,
459 				struct drm_fb_helper_surface_size *sizes)
460 {
461 	struct psb_fbdev *psb_fbdev =
462 		container_of(helper, struct psb_fbdev, psb_fb_helper);
463 	struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
464 	struct drm_psb_private *dev_priv = dev->dev_private;
465 	int bytespp;
466 
467 	bytespp = sizes->surface_bpp / 8;
468 	if (bytespp == 3)	/* no 24bit packed */
469 		bytespp = 4;
470 
471 	/* If the mode will not fit in 32bit then switch to 16bit to get
472 	   a console on full resolution. The X mode setting server will
473 	   allocate its own 32bit GEM framebuffer */
474 	if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
475 	                dev_priv->vram_stolen_size) {
476                 sizes->surface_bpp = 16;
477                 sizes->surface_depth = 16;
478         }
479 
480 	return psbfb_create(psb_fbdev, sizes);
481 }
482 
483 static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
484 	.fb_probe = psbfb_probe,
485 };
486 
487 static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
488 {
489 	struct psb_framebuffer *psbfb = &fbdev->pfb;
490 
491 	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
492 
493 	drm_fb_helper_fini(&fbdev->psb_fb_helper);
494 	drm_framebuffer_unregister_private(&psbfb->base);
495 	drm_framebuffer_cleanup(&psbfb->base);
496 
497 	if (psbfb->base.obj[0])
498 		drm_gem_object_put_unlocked(psbfb->base.obj[0]);
499 	return 0;
500 }
501 
502 int psb_fbdev_init(struct drm_device *dev)
503 {
504 	struct psb_fbdev *fbdev;
505 	struct drm_psb_private *dev_priv = dev->dev_private;
506 	int ret;
507 
508 	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
509 	if (!fbdev) {
510 		dev_err(dev->dev, "no memory\n");
511 		return -ENOMEM;
512 	}
513 
514 	dev_priv->fbdev = fbdev;
515 
516 	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
517 
518 	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
519 				 INTELFB_CONN_LIMIT);
520 	if (ret)
521 		goto free;
522 
523 	ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
524 	if (ret)
525 		goto fini;
526 
527 	/* disable all the possible outputs/crtcs before entering KMS mode */
528 	drm_helper_disable_unused_functions(dev);
529 
530 	ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
531 	if (ret)
532 		goto fini;
533 
534 	return 0;
535 
536 fini:
537 	drm_fb_helper_fini(&fbdev->psb_fb_helper);
538 free:
539 	kfree(fbdev);
540 	return ret;
541 }
542 
543 static void psb_fbdev_fini(struct drm_device *dev)
544 {
545 	struct drm_psb_private *dev_priv = dev->dev_private;
546 
547 	if (!dev_priv->fbdev)
548 		return;
549 
550 	psb_fbdev_destroy(dev, dev_priv->fbdev);
551 	kfree(dev_priv->fbdev);
552 	dev_priv->fbdev = NULL;
553 }
554 
555 static const struct drm_mode_config_funcs psb_mode_funcs = {
556 	.fb_create = psb_user_framebuffer_create,
557 	.output_poll_changed = drm_fb_helper_output_poll_changed,
558 };
559 
560 static void psb_setup_outputs(struct drm_device *dev)
561 {
562 	struct drm_psb_private *dev_priv = dev->dev_private;
563 	struct drm_connector *connector;
564 
565 	drm_mode_create_scaling_mode_property(dev);
566 
567 	/* It is ok for this to fail - we just don't get backlight control */
568 	if (!dev_priv->backlight_property)
569 		dev_priv->backlight_property = drm_property_create_range(dev, 0,
570 							"backlight", 0, 100);
571 	dev_priv->ops->output_init(dev);
572 
573 	list_for_each_entry(connector, &dev->mode_config.connector_list,
574 			    head) {
575 		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
576 		struct drm_encoder *encoder = &gma_encoder->base;
577 		int crtc_mask = 0, clone_mask = 0;
578 
579 		/* valid crtcs */
580 		switch (gma_encoder->type) {
581 		case INTEL_OUTPUT_ANALOG:
582 			crtc_mask = (1 << 0);
583 			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
584 			break;
585 		case INTEL_OUTPUT_SDVO:
586 			crtc_mask = dev_priv->ops->sdvo_mask;
587 			clone_mask = (1 << INTEL_OUTPUT_SDVO);
588 			break;
589 		case INTEL_OUTPUT_LVDS:
590 		        crtc_mask = dev_priv->ops->lvds_mask;
591 			clone_mask = (1 << INTEL_OUTPUT_LVDS);
592 			break;
593 		case INTEL_OUTPUT_MIPI:
594 			crtc_mask = (1 << 0);
595 			clone_mask = (1 << INTEL_OUTPUT_MIPI);
596 			break;
597 		case INTEL_OUTPUT_MIPI2:
598 			crtc_mask = (1 << 2);
599 			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
600 			break;
601 		case INTEL_OUTPUT_HDMI:
602 		        crtc_mask = dev_priv->ops->hdmi_mask;
603 			clone_mask = (1 << INTEL_OUTPUT_HDMI);
604 			break;
605 		case INTEL_OUTPUT_DISPLAYPORT:
606 			crtc_mask = (1 << 0) | (1 << 1);
607 			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
608 			break;
609 		case INTEL_OUTPUT_EDP:
610 			crtc_mask = (1 << 1);
611 			clone_mask = (1 << INTEL_OUTPUT_EDP);
612 		}
613 		encoder->possible_crtcs = crtc_mask;
614 		encoder->possible_clones =
615 		    gma_connector_clones(dev, clone_mask);
616 	}
617 }
618 
619 void psb_modeset_init(struct drm_device *dev)
620 {
621 	struct drm_psb_private *dev_priv = dev->dev_private;
622 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
623 	int i;
624 
625 	drm_mode_config_init(dev);
626 
627 	dev->mode_config.min_width = 0;
628 	dev->mode_config.min_height = 0;
629 
630 	dev->mode_config.funcs = &psb_mode_funcs;
631 
632 	/* set memory base */
633 	/* Oaktrail and Poulsbo should use BAR 2*/
634 	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
635 					&(dev->mode_config.fb_base));
636 
637 	/* num pipes is 2 for PSB but 1 for Mrst */
638 	for (i = 0; i < dev_priv->num_pipe; i++)
639 		psb_intel_crtc_init(dev, i, mode_dev);
640 
641 	dev->mode_config.max_width = 4096;
642 	dev->mode_config.max_height = 4096;
643 
644 	psb_setup_outputs(dev);
645 
646 	if (dev_priv->ops->errata)
647 	        dev_priv->ops->errata(dev);
648 
649         dev_priv->modeset = true;
650 }
651 
652 void psb_modeset_cleanup(struct drm_device *dev)
653 {
654 	struct drm_psb_private *dev_priv = dev->dev_private;
655 	if (dev_priv->modeset) {
656 		drm_kms_helper_poll_fini(dev);
657 		psb_fbdev_fini(dev);
658 		drm_mode_config_cleanup(dev);
659 	}
660 }
661