1 /* 2 * Copyright © 2007 David Airlie 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * David Airlie 25 * 26 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_fb.c 254885 2013-08-25 19:37:15Z dumbbell $ 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc.h> 31 #include <drm/drm_crtc_helper.h> 32 #include <uapi_drm/radeon_drm.h> 33 #include "radeon.h" 34 35 #include <drm/drm_fb_helper.h> 36 37 /* object hierarchy - 38 this contains a helper + a radeon fb 39 the helper contains a pointer to radeon framebuffer baseclass. 40 */ 41 struct radeon_fbdev { 42 struct drm_fb_helper helper; 43 struct radeon_framebuffer rfb; 44 struct list_head fbdev_list; 45 struct radeon_device *rdev; 46 }; 47 48 #ifdef DUMBBELL_WIP 49 static struct fb_ops radeonfb_ops = { 50 .owner = THIS_MODULE, 51 .fb_check_var = drm_fb_helper_check_var, 52 .fb_set_par = drm_fb_helper_set_par, 53 .fb_fillrect = cfb_fillrect, 54 .fb_copyarea = cfb_copyarea, 55 .fb_imageblit = cfb_imageblit, 56 .fb_pan_display = drm_fb_helper_pan_display, 57 .fb_blank = drm_fb_helper_blank, 58 .fb_setcmap = drm_fb_helper_setcmap, 59 .fb_debug_enter = drm_fb_helper_debug_enter, 60 .fb_debug_leave = drm_fb_helper_debug_leave, 61 }; 62 #endif /* DUMBBELL_WIP */ 63 64 65 int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) 66 { 67 int aligned = width; 68 int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; 69 int pitch_mask = 0; 70 71 switch (bpp / 8) { 72 case 1: 73 pitch_mask = align_large ? 255 : 127; 74 break; 75 case 2: 76 pitch_mask = align_large ? 127 : 31; 77 break; 78 case 3: 79 case 4: 80 pitch_mask = align_large ? 63 : 15; 81 break; 82 } 83 84 aligned += pitch_mask; 85 aligned &= ~pitch_mask; 86 return aligned; 87 } 88 89 static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) 90 { 91 struct radeon_bo *rbo = gem_to_radeon_bo(gobj); 92 int ret; 93 94 ret = radeon_bo_reserve(rbo, false); 95 if (likely(ret == 0)) { 96 radeon_bo_kunmap(rbo); 97 radeon_bo_unpin(rbo); 98 radeon_bo_unreserve(rbo); 99 } 100 drm_gem_object_unreference_unlocked(gobj); 101 } 102 103 static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, 104 struct drm_mode_fb_cmd2 *mode_cmd, 105 struct drm_gem_object **gobj_p) 106 { 107 struct radeon_device *rdev = rfbdev->rdev; 108 struct drm_gem_object *gobj = NULL; 109 struct radeon_bo *rbo = NULL; 110 bool fb_tiled = false; /* useful for testing */ 111 u32 tiling_flags = 0; 112 int ret; 113 int aligned_size, size; 114 int height = mode_cmd->height; 115 u32 bpp, depth; 116 117 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); 118 119 /* need to align pitch with crtc limits */ 120 mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp, 121 fb_tiled) * ((bpp + 1) / 8); 122 123 if (rdev->family >= CHIP_R600) 124 height = roundup2(mode_cmd->height, 8); 125 size = mode_cmd->pitches[0] * height; 126 aligned_size = roundup2(size, PAGE_SIZE); 127 ret = radeon_gem_object_create(rdev, aligned_size, 0, 128 RADEON_GEM_DOMAIN_VRAM, 129 false, true, 130 &gobj); 131 if (ret) { 132 DRM_ERROR("failed to allocate framebuffer (%d)\n", 133 aligned_size); 134 return -ENOMEM; 135 } 136 rbo = gem_to_radeon_bo(gobj); 137 138 if (fb_tiled) 139 tiling_flags = RADEON_TILING_MACRO; 140 141 #ifdef __BIG_ENDIAN 142 switch (bpp) { 143 case 32: 144 tiling_flags |= RADEON_TILING_SWAP_32BIT; 145 break; 146 case 16: 147 tiling_flags |= RADEON_TILING_SWAP_16BIT; 148 default: 149 break; 150 } 151 #endif 152 153 if (tiling_flags) { 154 ret = radeon_bo_set_tiling_flags(rbo, 155 tiling_flags | RADEON_TILING_SURFACE, 156 mode_cmd->pitches[0]); 157 if (ret) 158 dev_err(rdev->dev, "FB failed to set tiling flags\n"); 159 } 160 161 162 ret = radeon_bo_reserve(rbo, false); 163 if (unlikely(ret != 0)) 164 goto out_unref; 165 /* Only 27 bit offset for legacy CRTC */ 166 ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 167 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, 168 NULL); 169 if (ret) { 170 radeon_bo_unreserve(rbo); 171 goto out_unref; 172 } 173 if (fb_tiled) 174 radeon_bo_check_tiling(rbo, 0, 0); 175 ret = radeon_bo_kmap(rbo, NULL); 176 radeon_bo_unreserve(rbo); 177 if (ret) { 178 goto out_unref; 179 } 180 181 *gobj_p = gobj; 182 return 0; 183 out_unref: 184 radeonfb_destroy_pinned_object(gobj); 185 *gobj_p = NULL; 186 return ret; 187 } 188 189 static int radeonfb_create(struct radeon_fbdev *rfbdev, 190 struct drm_fb_helper_surface_size *sizes) 191 { 192 struct radeon_device *rdev = rfbdev->rdev; 193 #ifdef DUMBBELL_WIP 194 struct fb_info *info; 195 #endif /* DUMBBELL_WIP */ 196 struct drm_framebuffer *fb = NULL; 197 struct drm_mode_fb_cmd2 mode_cmd; 198 struct drm_gem_object *gobj = NULL; 199 struct radeon_bo *rbo = NULL; 200 #ifdef DUMBBELL_WIP 201 device_t device = rdev->dev; 202 #endif /* DUMBBELL_WIP */ 203 int ret; 204 #ifdef DUMBBELL_WIP 205 unsigned long tmp; 206 #endif /* DUMBBELL_WIP */ 207 208 mode_cmd.width = sizes->surface_width; 209 mode_cmd.height = sizes->surface_height; 210 211 /* avivo can't scanout real 24bpp */ 212 if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) 213 sizes->surface_bpp = 32; 214 215 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 216 sizes->surface_depth); 217 218 ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); 219 if (ret) { 220 DRM_ERROR("failed to create fbcon object %d\n", ret); 221 return ret; 222 } 223 224 rbo = gem_to_radeon_bo(gobj); 225 226 #ifdef DUMBBELL_WIP 227 /* okay we have an object now allocate the framebuffer */ 228 info = framebuffer_alloc(0, device); 229 if (info == NULL) { 230 ret = -ENOMEM; 231 goto out_unref; 232 } 233 234 info->par = rfbdev; 235 #endif /* DUMBBELL_WIP */ 236 237 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 238 if (ret) { 239 DRM_ERROR("failed to initalise framebuffer %d\n", ret); 240 goto out_unref; 241 } 242 243 fb = &rfbdev->rfb.base; 244 245 /* setup helper */ 246 rfbdev->helper.fb = fb; 247 #ifdef DUMBBELL_WIP 248 rfbdev->helper.fbdev = info; 249 250 memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); 251 252 strcpy(info->fix.id, "radeondrmfb"); 253 254 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 255 256 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 257 info->fbops = &radeonfb_ops; 258 259 tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; 260 info->fix.smem_start = rdev->mc.aper_base + tmp; 261 info->fix.smem_len = radeon_bo_size(rbo); 262 info->screen_base = rbo->kptr; 263 info->screen_size = radeon_bo_size(rbo); 264 265 drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); 266 267 /* setup aperture base/size for vesafb takeover */ 268 info->apertures = alloc_apertures(1); 269 if (!info->apertures) { 270 ret = -ENOMEM; 271 goto out_unref; 272 } 273 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; 274 info->apertures->ranges[0].size = rdev->mc.aper_size; 275 276 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 277 278 if (info->screen_base == NULL) { 279 ret = -ENOSPC; 280 goto out_unref; 281 } 282 283 ret = fb_alloc_cmap(&info->cmap, 256, 0); 284 if (ret) { 285 ret = -ENOMEM; 286 goto out_unref; 287 } 288 289 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 290 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); 291 DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); 292 DRM_INFO("fb depth is %d\n", fb->depth); 293 DRM_INFO(" pitch is %d\n", fb->pitches[0]); 294 295 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); 296 #endif /* DUMBBELL_WIP */ 297 return 0; 298 299 out_unref: 300 if (rbo) { 301 302 } 303 if (fb && ret) { 304 drm_gem_object_unreference(gobj); 305 drm_framebuffer_cleanup(fb); 306 drm_free(fb, M_DRM); /* XXX malloc'd in radeon_user_framebuffer_create? */ 307 } 308 return ret; 309 } 310 311 static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper, 312 struct drm_fb_helper_surface_size *sizes) 313 { 314 struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; 315 int new_fb = 0; 316 int ret; 317 318 if (!helper->fb) { 319 ret = radeonfb_create(rfbdev, sizes); 320 if (ret) 321 return ret; 322 new_fb = 1; 323 } 324 return new_fb; 325 } 326 327 void radeon_fb_output_poll_changed(struct radeon_device *rdev) 328 { 329 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); 330 } 331 332 static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) 333 { 334 #ifdef DUMBBELL_WIP 335 struct fb_info *info; 336 #endif /* DUMBBELL_WIP */ 337 struct radeon_framebuffer *rfb = &rfbdev->rfb; 338 339 #ifdef DUMBBELL_WIP 340 if (rfbdev->helper.fbdev) { 341 info = rfbdev->helper.fbdev; 342 343 unregister_framebuffer(info); 344 if (info->cmap.len) 345 fb_dealloc_cmap(&info->cmap); 346 framebuffer_release(info); 347 } 348 #endif /* DUMBBELL_WIP */ 349 350 if (rfb->obj) { 351 DRM_UNLOCK(dev); /* Work around lock recursion. dumbbell@ */ 352 radeonfb_destroy_pinned_object(rfb->obj); 353 DRM_LOCK(dev); 354 rfb->obj = NULL; 355 } 356 drm_fb_helper_fini(&rfbdev->helper); 357 drm_framebuffer_cleanup(&rfb->base); 358 359 return 0; 360 } 361 362 static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { 363 .gamma_set = radeon_crtc_fb_gamma_set, 364 .gamma_get = radeon_crtc_fb_gamma_get, 365 .fb_probe = radeon_fb_find_or_create_single, 366 }; 367 368 int radeon_fbdev_init(struct radeon_device *rdev) 369 { 370 struct radeon_fbdev *rfbdev; 371 int bpp_sel = 32; 372 int ret; 373 374 /* select 8 bpp console on RN50 or 16MB cards */ 375 if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) 376 bpp_sel = 8; 377 378 rfbdev = kmalloc(sizeof(struct radeon_fbdev), M_DRM, 379 M_WAITOK | M_ZERO); 380 if (!rfbdev) 381 return -ENOMEM; 382 383 rfbdev->rdev = rdev; 384 rdev->mode_info.rfbdev = rfbdev; 385 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 386 387 ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, 388 rdev->num_crtc, 389 RADEONFB_CONN_LIMIT); 390 if (ret) { 391 drm_free(rfbdev, M_DRM); 392 return ret; 393 } 394 395 drm_fb_helper_single_add_all_connectors(&rfbdev->helper); 396 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); 397 return 0; 398 } 399 400 void radeon_fbdev_fini(struct radeon_device *rdev) 401 { 402 if (!rdev->mode_info.rfbdev) 403 return; 404 405 radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); 406 drm_free(rdev->mode_info.rfbdev, M_DRM); 407 rdev->mode_info.rfbdev = NULL; 408 } 409 410 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) 411 { 412 #ifdef DUMBBELL_WIP 413 fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); 414 #endif /* DUMBBELL_WIP */ 415 } 416 417 int radeon_fbdev_total_size(struct radeon_device *rdev) 418 { 419 struct radeon_bo *robj; 420 int size = 0; 421 422 robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); 423 size += radeon_bo_size(robj); 424 return size; 425 } 426 427 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) 428 { 429 if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) 430 return true; 431 return false; 432 } 433