1 /* 2 * Copyright © 2007 David Airlie 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * David Airlie 25 */ 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/fb.h> 29 #include <linux/pm_runtime.h> 30 31 #include <drm/drmP.h> 32 #include <drm/drm_crtc.h> 33 #include <drm/drm_crtc_helper.h> 34 #include <drm/amdgpu_drm.h> 35 #include "amdgpu.h" 36 #include "cikd.h" 37 38 #include <drm/drm_fb_helper.h> 39 40 #include <linux/vga_switcheroo.h> 41 42 #include "amdgpu_display.h" 43 44 /* object hierarchy - 45 this contains a helper + a amdgpu fb 46 the helper contains a pointer to amdgpu framebuffer baseclass. 47 */ 48 49 #if 0 50 static int 51 amdgpufb_open(struct fb_info *info, int user) 52 { 53 struct amdgpu_fbdev *rfbdev = info->par; 54 struct amdgpu_device *adev = rfbdev->adev; 55 int ret = pm_runtime_get_sync(adev->ddev->dev); 56 if (ret < 0 && ret != -EACCES) { 57 pm_runtime_mark_last_busy(adev->ddev->dev); 58 pm_runtime_put_autosuspend(adev->ddev->dev); 59 return ret; 60 } 61 return 0; 62 } 63 64 static int 65 amdgpufb_release(struct fb_info *info, int user) 66 { 67 struct amdgpu_fbdev *rfbdev = info->par; 68 struct amdgpu_device *adev = rfbdev->adev; 69 70 pm_runtime_mark_last_busy(adev->ddev->dev); 71 pm_runtime_put_autosuspend(adev->ddev->dev); 72 return 0; 73 } 74 #endif 75 76 static struct fb_ops amdgpufb_ops = { 77 #if 0 78 .owner = THIS_MODULE, 79 #endif 80 DRM_FB_HELPER_DEFAULT_OPS, 81 #if 0 82 .fb_open = amdgpufb_open, 83 .fb_release = amdgpufb_release, 84 .fb_fillrect = drm_fb_helper_cfb_fillrect, 85 .fb_copyarea = drm_fb_helper_cfb_copyarea, 86 .fb_imageblit = drm_fb_helper_cfb_imageblit, 87 #endif 88 }; 89 90 91 int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled) 92 { 93 int aligned = width; 94 int pitch_mask = 0; 95 96 switch (cpp) { 97 case 1: 98 pitch_mask = 255; 99 break; 100 case 2: 101 pitch_mask = 127; 102 break; 103 case 3: 104 case 4: 105 pitch_mask = 63; 106 break; 107 } 108 109 aligned += pitch_mask; 110 aligned &= ~pitch_mask; 111 return aligned * cpp; 112 } 113 114 static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) 115 { 116 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 117 int ret; 118 119 ret = amdgpu_bo_reserve(abo, true); 120 if (likely(ret == 0)) { 121 amdgpu_bo_kunmap(abo); 122 amdgpu_bo_unpin(abo); 123 amdgpu_bo_unreserve(abo); 124 } 125 drm_gem_object_put_unlocked(gobj); 126 } 127 128 static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, 129 struct drm_mode_fb_cmd2 *mode_cmd, 130 struct drm_gem_object **gobj_p) 131 { 132 struct amdgpu_device *adev = rfbdev->adev; 133 struct drm_gem_object *gobj = NULL; 134 struct amdgpu_bo *abo = NULL; 135 bool fb_tiled = false; /* useful for testing */ 136 u32 tiling_flags = 0, domain; 137 int ret; 138 int aligned_size, size; 139 int height = mode_cmd->height; 140 u32 cpp; 141 142 cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); 143 144 /* need to align pitch with crtc limits */ 145 mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, 146 fb_tiled); 147 domain = amdgpu_display_supported_domains(adev); 148 149 height = ALIGN(mode_cmd->height, 8); 150 size = mode_cmd->pitches[0] * height; 151 aligned_size = ALIGN(size, PAGE_SIZE); 152 ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, 153 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 154 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 155 AMDGPU_GEM_CREATE_VRAM_CLEARED, 156 ttm_bo_type_kernel, NULL, &gobj); 157 if (ret) { 158 pr_err("failed to allocate framebuffer (%d)\n", aligned_size); 159 return -ENOMEM; 160 } 161 abo = gem_to_amdgpu_bo(gobj); 162 163 if (fb_tiled) 164 tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); 165 166 ret = amdgpu_bo_reserve(abo, false); 167 if (unlikely(ret != 0)) 168 goto out_unref; 169 170 if (tiling_flags) { 171 ret = amdgpu_bo_set_tiling_flags(abo, 172 tiling_flags); 173 if (ret) 174 dev_err(adev->dev, "FB failed to set tiling flags\n"); 175 } 176 177 178 ret = amdgpu_bo_pin(abo, domain); 179 if (ret) { 180 amdgpu_bo_unreserve(abo); 181 goto out_unref; 182 } 183 184 ret = amdgpu_ttm_alloc_gart(&abo->tbo); 185 if (ret) { 186 amdgpu_bo_unreserve(abo); 187 dev_err(adev->dev, "%p bind failed\n", abo); 188 goto out_unref; 189 } 190 191 ret = amdgpu_bo_kmap(abo, NULL); 192 amdgpu_bo_unreserve(abo); 193 if (ret) { 194 goto out_unref; 195 } 196 197 *gobj_p = gobj; 198 return 0; 199 out_unref: 200 amdgpufb_destroy_pinned_object(gobj); 201 *gobj_p = NULL; 202 return ret; 203 } 204 205 static int amdgpufb_create(struct drm_fb_helper *helper, 206 struct drm_fb_helper_surface_size *sizes) 207 { 208 struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; 209 struct amdgpu_device *adev = rfbdev->adev; 210 struct fb_info *info; 211 struct drm_framebuffer *fb = NULL; 212 struct drm_mode_fb_cmd2 mode_cmd; 213 struct drm_gem_object *gobj = NULL; 214 struct amdgpu_bo *abo = NULL; 215 int ret; 216 device_t vga_dev = device_get_parent(adev->dev->bsddev); 217 218 mode_cmd.width = sizes->surface_width; 219 mode_cmd.height = sizes->surface_height; 220 221 if (sizes->surface_bpp == 24) 222 sizes->surface_bpp = 32; 223 224 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 225 sizes->surface_depth); 226 227 ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); 228 if (ret) { 229 DRM_ERROR("failed to create fbcon object %d\n", ret); 230 return ret; 231 } 232 233 abo = gem_to_amdgpu_bo(gobj); 234 235 /* okay we have an object now allocate the framebuffer */ 236 info = drm_fb_helper_alloc_fbi(helper); 237 if (IS_ERR(info)) { 238 ret = PTR_ERR(info); 239 goto out; 240 } 241 242 info->par = rfbdev; 243 #if 0 244 info->skip_vt_switch = true; 245 #endif 246 247 ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb, 248 &mode_cmd, gobj); 249 if (ret) { 250 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 251 goto out; 252 } 253 254 fb = &rfbdev->rfb.base; 255 256 /* setup helper */ 257 rfbdev->helper.fb = fb; 258 259 #ifdef __DragonFly__ 260 info->width = sizes->fb_width; 261 info->height = sizes->fb_height; 262 info->stride = fb->pitches[0]; 263 info->depth = sizes->surface_bpp; 264 info->is_vga_boot_display = vga_pci_is_boot_display(vga_dev); 265 info->fbops = amdgpufb_ops; 266 267 unsigned long tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; 268 info->vaddr = (vm_offset_t)amdgpu_bo_kptr(abo); 269 info->paddr = adev->gmc.aper_base + tmp; 270 #else 271 272 strcpy(info->fix.id, "amdgpudrmfb"); 273 274 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); 275 276 info->fbops = &amdgpufb_ops; 277 278 tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; 279 info->fix.smem_start = adev->gmc.aper_base + tmp; 280 info->fix.smem_len = amdgpu_bo_size(abo); 281 info->screen_base = amdgpu_bo_kptr(abo); 282 info->screen_size = amdgpu_bo_size(abo); 283 284 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); 285 286 /* setup aperture base/size for vesafb takeover */ 287 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; 288 info->apertures->ranges[0].size = adev->gmc.aper_size; 289 290 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 291 292 if (info->screen_base == NULL) { 293 ret = -ENOSPC; 294 goto out; 295 } 296 #endif 297 298 DRM_INFO("fb mappable at 0x%lX\n", info->paddr); 299 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base); 300 DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); 301 DRM_INFO("fb depth is %d\n", fb->format->depth); 302 DRM_INFO(" pitch is %d\n", fb->pitches[0]); 303 304 vga_switcheroo_client_fb_set(adev->ddev->pdev, info); 305 return 0; 306 307 out: 308 if (abo) { 309 310 } 311 if (fb && ret) { 312 drm_gem_object_put_unlocked(gobj); 313 drm_framebuffer_unregister_private(fb); 314 drm_framebuffer_cleanup(fb); 315 kfree(fb); 316 } 317 return ret; 318 } 319 320 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) 321 { 322 struct amdgpu_framebuffer *rfb = &rfbdev->rfb; 323 int i; 324 325 drm_fb_helper_unregister_fbi(&rfbdev->helper); 326 327 if (rfb->base.obj[0]) { 328 for (i = 0; i < rfb->base.format->num_planes; i++) 329 drm_gem_object_put(rfb->base.obj[0]); 330 amdgpufb_destroy_pinned_object(rfb->base.obj[0]); 331 rfb->base.obj[0] = NULL; 332 drm_framebuffer_unregister_private(&rfb->base); 333 drm_framebuffer_cleanup(&rfb->base); 334 } 335 drm_fb_helper_fini(&rfbdev->helper); 336 337 return 0; 338 } 339 340 static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { 341 .fb_probe = amdgpufb_create, 342 }; 343 344 int amdgpu_fbdev_init(struct amdgpu_device *adev) 345 { 346 struct amdgpu_fbdev *rfbdev; 347 int bpp_sel = 32; 348 int ret; 349 350 /* don't init fbdev on hw without DCE */ 351 if (!adev->mode_info.mode_config_initialized) 352 return 0; 353 354 /* don't init fbdev if there are no connectors */ 355 if (list_empty(&adev->ddev->mode_config.connector_list)) 356 return 0; 357 358 /* select 8 bpp console on low vram cards */ 359 if (adev->gmc.real_vram_size <= (32*1024*1024)) 360 bpp_sel = 8; 361 362 rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); 363 if (!rfbdev) 364 return -ENOMEM; 365 366 rfbdev->adev = adev; 367 adev->mode_info.rfbdev = rfbdev; 368 369 drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, 370 &amdgpu_fb_helper_funcs); 371 372 ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, 373 AMDGPUFB_CONN_LIMIT); 374 if (ret) { 375 kfree(rfbdev); 376 return ret; 377 } 378 379 drm_fb_helper_single_add_all_connectors(&rfbdev->helper); 380 381 /* disable all the possible outputs/crtcs before entering KMS mode */ 382 if (!amdgpu_device_has_dc_support(adev)) 383 drm_helper_disable_unused_functions(adev->ddev); 384 385 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); 386 return 0; 387 } 388 389 void amdgpu_fbdev_fini(struct amdgpu_device *adev) 390 { 391 if (!adev->mode_info.rfbdev) 392 return; 393 394 amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); 395 kfree(adev->mode_info.rfbdev); 396 adev->mode_info.rfbdev = NULL; 397 } 398 399 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) 400 { 401 #if 0 402 if (adev->mode_info.rfbdev) 403 drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, 404 state); 405 #endif 406 } 407 408 int amdgpu_fbdev_total_size(struct amdgpu_device *adev) 409 { 410 struct amdgpu_bo *robj; 411 int size = 0; 412 413 if (!adev->mode_info.rfbdev) 414 return 0; 415 416 robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]); 417 size += amdgpu_bo_size(robj); 418 return size; 419 } 420 421 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) 422 { 423 if (!adev->mode_info.rfbdev) 424 return false; 425 if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])) 426 return true; 427 return false; 428 } 429