1 /*
2  * Copyright © 2007 David Airlie
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *     David Airlie
25  */
26 
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/errno.h>
30 #include <linux/string.h>
31 #include <linux/mm.h>
32 #include <linux/tty.h>
33 #include <linux/sysrq.h>
34 #include <linux/delay.h>
35 #include <linux/init.h>
36 #include <linux/screen_info.h>
37 #include <linux/vga_switcheroo.h>
38 #include <linux/console.h>
39 
40 #include <drm/drm_crtc.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_fb_helper.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_atomic.h>
45 
46 #include "nouveau_drv.h"
47 #include "nouveau_gem.h"
48 #include "nouveau_bo.h"
49 #include "nouveau_fbcon.h"
50 #include "nouveau_chan.h"
51 #include "nouveau_vmm.h"
52 
53 #include "nouveau_crtc.h"
54 
55 MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
56 int nouveau_nofbaccel = 0;
57 module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
58 
59 MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
60 static int nouveau_fbcon_bpp;
61 module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
62 
63 static void
nouveau_fbcon_fillrect(struct fb_info * info,const struct fb_fillrect * rect)64 nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
65 {
66 	struct nouveau_fbdev *fbcon = info->par;
67 	struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
68 	struct nvif_device *device = &drm->client.device;
69 	int ret;
70 
71 	if (info->state != FBINFO_STATE_RUNNING)
72 		return;
73 
74 	ret = -ENODEV;
75 	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
76 	    mutex_trylock(&drm->client.mutex)) {
77 		if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
78 			ret = nv04_fbcon_fillrect(info, rect);
79 		else
80 		if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
81 			ret = nv50_fbcon_fillrect(info, rect);
82 		else
83 			ret = nvc0_fbcon_fillrect(info, rect);
84 		mutex_unlock(&drm->client.mutex);
85 	}
86 
87 	if (ret == 0)
88 		return;
89 
90 	if (ret != -ENODEV)
91 		nouveau_fbcon_gpu_lockup(info);
92 	drm_fb_helper_cfb_fillrect(info, rect);
93 }
94 
95 static void
nouveau_fbcon_copyarea(struct fb_info * info,const struct fb_copyarea * image)96 nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
97 {
98 	struct nouveau_fbdev *fbcon = info->par;
99 	struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
100 	struct nvif_device *device = &drm->client.device;
101 	int ret;
102 
103 	if (info->state != FBINFO_STATE_RUNNING)
104 		return;
105 
106 	ret = -ENODEV;
107 	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
108 	    mutex_trylock(&drm->client.mutex)) {
109 		if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
110 			ret = nv04_fbcon_copyarea(info, image);
111 		else
112 		if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
113 			ret = nv50_fbcon_copyarea(info, image);
114 		else
115 			ret = nvc0_fbcon_copyarea(info, image);
116 		mutex_unlock(&drm->client.mutex);
117 	}
118 
119 	if (ret == 0)
120 		return;
121 
122 	if (ret != -ENODEV)
123 		nouveau_fbcon_gpu_lockup(info);
124 	drm_fb_helper_cfb_copyarea(info, image);
125 }
126 
127 static void
nouveau_fbcon_imageblit(struct fb_info * info,const struct fb_image * image)128 nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
129 {
130 	struct nouveau_fbdev *fbcon = info->par;
131 	struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
132 	struct nvif_device *device = &drm->client.device;
133 	int ret;
134 
135 	if (info->state != FBINFO_STATE_RUNNING)
136 		return;
137 
138 	ret = -ENODEV;
139 	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
140 	    mutex_trylock(&drm->client.mutex)) {
141 		if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
142 			ret = nv04_fbcon_imageblit(info, image);
143 		else
144 		if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
145 			ret = nv50_fbcon_imageblit(info, image);
146 		else
147 			ret = nvc0_fbcon_imageblit(info, image);
148 		mutex_unlock(&drm->client.mutex);
149 	}
150 
151 	if (ret == 0)
152 		return;
153 
154 	if (ret != -ENODEV)
155 		nouveau_fbcon_gpu_lockup(info);
156 	drm_fb_helper_cfb_imageblit(info, image);
157 }
158 
159 static int
nouveau_fbcon_sync(struct fb_info * info)160 nouveau_fbcon_sync(struct fb_info *info)
161 {
162 	struct nouveau_fbdev *fbcon = info->par;
163 	struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
164 	struct nouveau_channel *chan = drm->channel;
165 	int ret;
166 
167 	if (!chan || !chan->accel_done || in_interrupt() ||
168 	    info->state != FBINFO_STATE_RUNNING ||
169 	    info->flags & FBINFO_HWACCEL_DISABLED)
170 		return 0;
171 
172 	if (!mutex_trylock(&drm->client.mutex))
173 		return 0;
174 
175 	ret = nouveau_channel_idle(chan);
176 	mutex_unlock(&drm->client.mutex);
177 	if (ret) {
178 		nouveau_fbcon_gpu_lockup(info);
179 		return 0;
180 	}
181 
182 	chan->accel_done = false;
183 	return 0;
184 }
185 
186 static int
nouveau_fbcon_open(struct fb_info * info,int user)187 nouveau_fbcon_open(struct fb_info *info, int user)
188 {
189 	struct nouveau_fbdev *fbcon = info->par;
190 	struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
191 	int ret = pm_runtime_get_sync(drm->dev->dev);
192 	if (ret < 0 && ret != -EACCES) {
193 		pm_runtime_put(drm->dev->dev);
194 		return ret;
195 	}
196 	return 0;
197 }
198 
199 static int
nouveau_fbcon_release(struct fb_info * info,int user)200 nouveau_fbcon_release(struct fb_info *info, int user)
201 {
202 	struct nouveau_fbdev *fbcon = info->par;
203 	struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
204 	pm_runtime_put(drm->dev->dev);
205 	return 0;
206 }
207 
208 static const struct fb_ops nouveau_fbcon_ops = {
209 	.owner = THIS_MODULE,
210 	DRM_FB_HELPER_DEFAULT_OPS,
211 	.fb_open = nouveau_fbcon_open,
212 	.fb_release = nouveau_fbcon_release,
213 	.fb_fillrect = nouveau_fbcon_fillrect,
214 	.fb_copyarea = nouveau_fbcon_copyarea,
215 	.fb_imageblit = nouveau_fbcon_imageblit,
216 	.fb_sync = nouveau_fbcon_sync,
217 };
218 
219 static const struct fb_ops nouveau_fbcon_sw_ops = {
220 	.owner = THIS_MODULE,
221 	DRM_FB_HELPER_DEFAULT_OPS,
222 	.fb_open = nouveau_fbcon_open,
223 	.fb_release = nouveau_fbcon_release,
224 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
225 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
226 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
227 };
228 
229 void
nouveau_fbcon_accel_save_disable(struct drm_device * dev)230 nouveau_fbcon_accel_save_disable(struct drm_device *dev)
231 {
232 	struct nouveau_drm *drm = nouveau_drm(dev);
233 	if (drm->fbcon && drm->fbcon->helper.fbdev) {
234 		drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
235 		drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
236 	}
237 }
238 
239 void
nouveau_fbcon_accel_restore(struct drm_device * dev)240 nouveau_fbcon_accel_restore(struct drm_device *dev)
241 {
242 	struct nouveau_drm *drm = nouveau_drm(dev);
243 	if (drm->fbcon && drm->fbcon->helper.fbdev) {
244 		drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
245 	}
246 }
247 
248 static void
nouveau_fbcon_accel_fini(struct drm_device * dev)249 nouveau_fbcon_accel_fini(struct drm_device *dev)
250 {
251 	struct nouveau_drm *drm = nouveau_drm(dev);
252 	struct nouveau_fbdev *fbcon = drm->fbcon;
253 	if (fbcon && drm->channel) {
254 		console_lock();
255 		if (fbcon->helper.fbdev)
256 			fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
257 		console_unlock();
258 		nouveau_channel_idle(drm->channel);
259 		nvif_object_dtor(&fbcon->twod);
260 		nvif_object_dtor(&fbcon->blit);
261 		nvif_object_dtor(&fbcon->gdi);
262 		nvif_object_dtor(&fbcon->patt);
263 		nvif_object_dtor(&fbcon->rop);
264 		nvif_object_dtor(&fbcon->clip);
265 		nvif_object_dtor(&fbcon->surf2d);
266 	}
267 }
268 
269 static void
nouveau_fbcon_accel_init(struct drm_device * dev)270 nouveau_fbcon_accel_init(struct drm_device *dev)
271 {
272 	struct nouveau_drm *drm = nouveau_drm(dev);
273 	struct nouveau_fbdev *fbcon = drm->fbcon;
274 	struct fb_info *info = fbcon->helper.fbdev;
275 	int ret;
276 
277 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
278 		ret = nv04_fbcon_accel_init(info);
279 	else
280 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
281 		ret = nv50_fbcon_accel_init(info);
282 	else
283 		ret = nvc0_fbcon_accel_init(info);
284 
285 	if (ret == 0)
286 		info->fbops = &nouveau_fbcon_ops;
287 }
288 
289 static void
nouveau_fbcon_zfill(struct drm_device * dev,struct nouveau_fbdev * fbcon)290 nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
291 {
292 	struct fb_info *info = fbcon->helper.fbdev;
293 	struct fb_fillrect rect;
294 
295 	/* Clear the entire fbcon.  The drm will program every connector
296 	 * with it's preferred mode.  If the sizes differ, one display will
297 	 * quite likely have garbage around the console.
298 	 */
299 	rect.dx = rect.dy = 0;
300 	rect.width = info->var.xres_virtual;
301 	rect.height = info->var.yres_virtual;
302 	rect.color = 0;
303 	rect.rop = ROP_COPY;
304 	info->fbops->fb_fillrect(info, &rect);
305 }
306 
307 static int
nouveau_fbcon_create(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)308 nouveau_fbcon_create(struct drm_fb_helper *helper,
309 		     struct drm_fb_helper_surface_size *sizes)
310 {
311 	struct nouveau_fbdev *fbcon =
312 		container_of(helper, struct nouveau_fbdev, helper);
313 	struct drm_device *dev = fbcon->helper.dev;
314 	struct nouveau_drm *drm = nouveau_drm(dev);
315 	struct nvif_device *device = &drm->client.device;
316 	struct fb_info *info;
317 	struct drm_framebuffer *fb;
318 	struct nouveau_channel *chan;
319 	struct nouveau_bo *nvbo;
320 	struct drm_mode_fb_cmd2 mode_cmd = {};
321 	int ret;
322 
323 	mode_cmd.width = sizes->surface_width;
324 	mode_cmd.height = sizes->surface_height;
325 
326 	mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
327 	mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
328 
329 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
330 							  sizes->surface_depth);
331 
332 	ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
333 			      mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
334 			      0, 0x0000, &nvbo);
335 	if (ret) {
336 		NV_ERROR(drm, "failed to allocate framebuffer\n");
337 		goto out;
338 	}
339 
340 	ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
341 	if (ret)
342 		goto out_unref;
343 
344 	ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
345 	if (ret) {
346 		NV_ERROR(drm, "failed to pin fb: %d\n", ret);
347 		goto out_unref;
348 	}
349 
350 	ret = nouveau_bo_map(nvbo);
351 	if (ret) {
352 		NV_ERROR(drm, "failed to map fb: %d\n", ret);
353 		goto out_unpin;
354 	}
355 
356 	chan = nouveau_nofbaccel ? NULL : drm->channel;
357 	if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
358 		ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
359 		if (ret) {
360 			NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
361 			chan = NULL;
362 		}
363 	}
364 
365 	info = drm_fb_helper_alloc_fbi(helper);
366 	if (IS_ERR(info)) {
367 		ret = PTR_ERR(info);
368 		goto out_unlock;
369 	}
370 
371 	/* setup helper */
372 	fbcon->helper.fb = fb;
373 
374 	if (!chan)
375 		info->flags = FBINFO_HWACCEL_DISABLED;
376 	else
377 		info->flags = FBINFO_HWACCEL_COPYAREA |
378 			      FBINFO_HWACCEL_FILLRECT |
379 			      FBINFO_HWACCEL_IMAGEBLIT;
380 	info->fbops = &nouveau_fbcon_sw_ops;
381 	info->fix.smem_start = nvbo->bo.mem.bus.offset;
382 	info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
383 
384 	info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
385 	info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
386 
387 	drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
388 
389 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
390 
391 	if (chan)
392 		nouveau_fbcon_accel_init(dev);
393 	nouveau_fbcon_zfill(dev, fbcon);
394 
395 	/* To allow resizeing without swapping buffers */
396 	NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
397 		fb->width, fb->height, nvbo->offset, nvbo);
398 
399 	if (dev_is_pci(dev->dev))
400 		vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
401 
402 	return 0;
403 
404 out_unlock:
405 	if (chan)
406 		nouveau_vma_del(&fbcon->vma);
407 	nouveau_bo_unmap(nvbo);
408 out_unpin:
409 	nouveau_bo_unpin(nvbo);
410 out_unref:
411 	nouveau_bo_ref(NULL, &nvbo);
412 out:
413 	return ret;
414 }
415 
416 static int
nouveau_fbcon_destroy(struct drm_device * dev,struct nouveau_fbdev * fbcon)417 nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
418 {
419 	struct drm_framebuffer *fb = fbcon->helper.fb;
420 	struct nouveau_bo *nvbo;
421 
422 	drm_fb_helper_unregister_fbi(&fbcon->helper);
423 	drm_fb_helper_fini(&fbcon->helper);
424 
425 	if (fb && fb->obj[0]) {
426 		nvbo = nouveau_gem_object(fb->obj[0]);
427 		nouveau_vma_del(&fbcon->vma);
428 		nouveau_bo_unmap(nvbo);
429 		nouveau_bo_unpin(nvbo);
430 		drm_framebuffer_put(fb);
431 	}
432 
433 	return 0;
434 }
435 
nouveau_fbcon_gpu_lockup(struct fb_info * info)436 void nouveau_fbcon_gpu_lockup(struct fb_info *info)
437 {
438 	struct nouveau_fbdev *fbcon = info->par;
439 	struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
440 
441 	NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
442 	info->flags |= FBINFO_HWACCEL_DISABLED;
443 }
444 
445 static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
446 	.fb_probe = nouveau_fbcon_create,
447 };
448 
449 static void
nouveau_fbcon_set_suspend_work(struct work_struct * work)450 nouveau_fbcon_set_suspend_work(struct work_struct *work)
451 {
452 	struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
453 	int state = READ_ONCE(drm->fbcon_new_state);
454 
455 	if (state == FBINFO_STATE_RUNNING)
456 		pm_runtime_get_sync(drm->dev->dev);
457 
458 	console_lock();
459 	if (state == FBINFO_STATE_RUNNING)
460 		nouveau_fbcon_accel_restore(drm->dev);
461 	drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
462 	if (state != FBINFO_STATE_RUNNING)
463 		nouveau_fbcon_accel_save_disable(drm->dev);
464 	console_unlock();
465 
466 	if (state == FBINFO_STATE_RUNNING) {
467 		nouveau_fbcon_hotplug_resume(drm->fbcon);
468 		pm_runtime_mark_last_busy(drm->dev->dev);
469 		pm_runtime_put_sync(drm->dev->dev);
470 	}
471 }
472 
473 void
nouveau_fbcon_set_suspend(struct drm_device * dev,int state)474 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
475 {
476 	struct nouveau_drm *drm = nouveau_drm(dev);
477 
478 	if (!drm->fbcon)
479 		return;
480 
481 	drm->fbcon_new_state = state;
482 	/* Since runtime resume can happen as a result of a sysfs operation,
483 	 * it's possible we already have the console locked. So handle fbcon
484 	 * init/deinit from a seperate work thread
485 	 */
486 	schedule_work(&drm->fbcon_work);
487 }
488 
489 void
nouveau_fbcon_output_poll_changed(struct drm_device * dev)490 nouveau_fbcon_output_poll_changed(struct drm_device *dev)
491 {
492 	struct nouveau_drm *drm = nouveau_drm(dev);
493 	struct nouveau_fbdev *fbcon = drm->fbcon;
494 	int ret;
495 
496 	if (!fbcon)
497 		return;
498 
499 	mutex_lock(&fbcon->hotplug_lock);
500 
501 	ret = pm_runtime_get(dev->dev);
502 	if (ret == 1 || ret == -EACCES) {
503 		drm_fb_helper_hotplug_event(&fbcon->helper);
504 
505 		pm_runtime_mark_last_busy(dev->dev);
506 		pm_runtime_put_autosuspend(dev->dev);
507 	} else if (ret == 0) {
508 		/* If the GPU was already in the process of suspending before
509 		 * this event happened, then we can't block here as we'll
510 		 * deadlock the runtime pmops since they wait for us to
511 		 * finish. So, just defer this event for when we runtime
512 		 * resume again. It will be handled by fbcon_work.
513 		 */
514 		NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
515 		fbcon->hotplug_waiting = true;
516 		pm_runtime_put_noidle(drm->dev->dev);
517 	} else {
518 		DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
519 			 ret);
520 	}
521 
522 	mutex_unlock(&fbcon->hotplug_lock);
523 }
524 
525 void
nouveau_fbcon_hotplug_resume(struct nouveau_fbdev * fbcon)526 nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
527 {
528 	struct nouveau_drm *drm;
529 
530 	if (!fbcon)
531 		return;
532 	drm = nouveau_drm(fbcon->helper.dev);
533 
534 	mutex_lock(&fbcon->hotplug_lock);
535 	if (fbcon->hotplug_waiting) {
536 		fbcon->hotplug_waiting = false;
537 
538 		NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
539 		drm_fb_helper_hotplug_event(&fbcon->helper);
540 	}
541 	mutex_unlock(&fbcon->hotplug_lock);
542 }
543 
544 int
nouveau_fbcon_init(struct drm_device * dev)545 nouveau_fbcon_init(struct drm_device *dev)
546 {
547 	struct nouveau_drm *drm = nouveau_drm(dev);
548 	struct nouveau_fbdev *fbcon;
549 	int preferred_bpp = nouveau_fbcon_bpp;
550 	int ret;
551 
552 	if (!dev->mode_config.num_crtc ||
553 	    (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
554 		return 0;
555 
556 	fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
557 	if (!fbcon)
558 		return -ENOMEM;
559 
560 	drm->fbcon = fbcon;
561 	INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
562 	mutex_init(&fbcon->hotplug_lock);
563 
564 	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
565 
566 	ret = drm_fb_helper_init(dev, &fbcon->helper);
567 	if (ret)
568 		goto free;
569 
570 	if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
571 		if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
572 			preferred_bpp = 8;
573 		else
574 		if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
575 			preferred_bpp = 16;
576 		else
577 			preferred_bpp = 32;
578 	}
579 
580 	/* disable all the possible outputs/crtcs before entering KMS mode */
581 	if (!drm_drv_uses_atomic_modeset(dev))
582 		drm_helper_disable_unused_functions(dev);
583 
584 	ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
585 	if (ret)
586 		goto fini;
587 
588 	if (fbcon->helper.fbdev)
589 		fbcon->helper.fbdev->pixmap.buf_align = 4;
590 	return 0;
591 
592 fini:
593 	drm_fb_helper_fini(&fbcon->helper);
594 free:
595 	kfree(fbcon);
596 	drm->fbcon = NULL;
597 	return ret;
598 }
599 
600 void
nouveau_fbcon_fini(struct drm_device * dev)601 nouveau_fbcon_fini(struct drm_device *dev)
602 {
603 	struct nouveau_drm *drm = nouveau_drm(dev);
604 
605 	if (!drm->fbcon)
606 		return;
607 
608 	nouveau_fbcon_accel_fini(dev);
609 	nouveau_fbcon_destroy(dev, drm->fbcon);
610 	kfree(drm->fbcon);
611 	drm->fbcon = NULL;
612 }
613