1 /* 2 * Legacy: Generic DRM Buffer Management 3 * 4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 9 * Author: Gareth Hughes <gareth@valinux.com> 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the "Software"), 13 * to deal in the Software without restriction, including without limitation 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 15 * and/or sell copies of the Software, and to permit persons to whom the 16 * Software is furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the next 19 * paragraph) shall be included in all copies or substantial portions of the 20 * Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 28 * OTHER DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include <linux/vmalloc.h> 32 #include <linux/slab.h> 33 #include <linux/log2.h> 34 #include <linux/export.h> 35 #include <asm/shmparam.h> 36 #include <drm/drmP.h> 37 #include "drm_legacy.h" 38 39 #include <sys/conf.h> 40 #include <sys/mman.h> 41 #include <vm/vm_map.h> 42 43 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 44 struct drm_local_map *map) 45 { 46 struct drm_map_list *entry; 47 list_for_each_entry(entry, &dev->maplist, head) { 48 /* 49 * Because the kernel-userspace ABI is fixed at a 32-bit offset 50 * while PCI resources may live above that, we only compare the 51 * lower 32 bits of the map offset for maps of type 52 * _DRM_FRAMEBUFFER or _DRM_REGISTERS. 53 * It is assumed that if a driver have more than one resource 54 * of each type, the lower 32 bits are different. 55 */ 56 if (!entry->map || 57 map->type != entry->map->type || 58 entry->master != dev->master) 59 continue; 60 switch (map->type) { 61 case _DRM_SHM: 62 if (map->flags != _DRM_CONTAINS_LOCK) 63 break; 64 return entry; 65 case _DRM_REGISTERS: 66 case _DRM_FRAME_BUFFER: 67 if ((entry->map->offset & 0xffffffff) == 68 (map->offset & 0xffffffff)) 69 return entry; 70 default: /* Make gcc happy */ 71 ; 72 } 73 if (entry->map->offset == map->offset) 74 return entry; 75 } 76 77 return NULL; 78 } 79 80 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 81 unsigned long user_token, int hashed_handle, int shm) 82 { 83 int use_hashed_handle, shift; 84 unsigned long add; 85 86 #if (BITS_PER_LONG == 64) 87 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); 88 #elif (BITS_PER_LONG == 32) 89 use_hashed_handle = hashed_handle; 90 #else 91 #error Unsupported long size. Neither 64 nor 32 bits. 92 #endif 93 94 if (!use_hashed_handle) { 95 int ret; 96 hash->key = user_token >> PAGE_SHIFT; 97 ret = drm_ht_insert_item(&dev->map_hash, hash); 98 if (ret != -EINVAL) 99 return ret; 100 } 101 102 shift = 0; 103 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; 104 if (shm && (SHMLBA > PAGE_SIZE)) { 105 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; 106 107 /* For shared memory, we have to preserve the SHMLBA 108 * bits of the eventual vma->vm_pgoff value during 109 * mmap(). Otherwise we run into cache aliasing problems 110 * on some platforms. On these platforms, the pgoff of 111 * a mmap() request is used to pick a suitable virtual 112 * address for the mmap() region such that it will not 113 * cause cache aliasing problems. 114 * 115 * Therefore, make sure the SHMLBA relevant bits of the 116 * hash value we use are equal to those in the original 117 * kernel virtual address. 118 */ 119 shift = bits; 120 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); 121 } 122 123 return drm_ht_just_insert_please(&dev->map_hash, hash, 124 user_token, 32 - PAGE_SHIFT - 3, 125 shift, add); 126 } 127 128 /** 129 * Core function to create a range of memory available for mapping by a 130 * non-root process. 131 * 132 * Adjusts the memory offset to its absolute value according to the mapping 133 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 134 * applicable and if supported by the kernel. 135 */ 136 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, 137 unsigned int size, enum drm_map_type type, 138 enum drm_map_flags flags, 139 struct drm_map_list ** maplist) 140 { 141 struct drm_local_map *map; 142 struct drm_map_list *list; 143 drm_dma_handle_t *dmah; 144 unsigned long user_token; 145 int ret; 146 147 map = kmalloc(sizeof(*map), M_DRM, GFP_KERNEL); 148 if (!map) 149 return -ENOMEM; 150 151 map->offset = offset; 152 map->size = size; 153 map->flags = flags; 154 map->type = type; 155 156 /* Only allow shared memory to be removable since we only keep enough 157 * book keeping information about shared memory to allow for removal 158 * when processes fork. 159 */ 160 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 161 kfree(map); 162 return -EINVAL; 163 } 164 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 165 (unsigned long long)map->offset, map->size, map->type); 166 167 /* page-align _DRM_SHM maps. They are allocated here so there is no security 168 * hole created by that and it works around various broken drivers that use 169 * a non-aligned quantity to map the SAREA. --BenH 170 */ 171 if (map->type == _DRM_SHM) 172 map->size = PAGE_ALIGN(map->size); 173 174 if ((map->offset & (~(resource_size_t) LINUX_PAGE_MASK)) || (map->size & (~LINUX_PAGE_MASK))) { 175 kfree(map); 176 return -EINVAL; 177 } 178 map->mtrr = -1; 179 map->handle = NULL; 180 181 switch (map->type) { 182 case _DRM_REGISTERS: 183 case _DRM_FRAME_BUFFER: 184 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) 185 if (map->offset + (map->size-1) < map->offset || 186 map->offset < virt_to_phys(high_memory)) { 187 kfree(map); 188 return -EINVAL; 189 } 190 #endif 191 /* Some drivers preinitialize some maps, without the X Server 192 * needing to be aware of it. Therefore, we just return success 193 * when the server tries to create a duplicate map. 194 */ 195 list = drm_find_matching_map(dev, map); 196 if (list != NULL) { 197 if (list->map->size != map->size) { 198 DRM_DEBUG("Matching maps of type %d with " 199 "mismatched sizes, (%ld vs %ld)\n", 200 map->type, map->size, 201 list->map->size); 202 list->map->size = map->size; 203 } 204 205 kfree(map); 206 *maplist = list; 207 return 0; 208 } 209 210 if (map->type == _DRM_FRAME_BUFFER || 211 (map->flags & _DRM_WRITE_COMBINING)) { 212 map->mtrr = 213 arch_phys_wc_add(map->offset, map->size); 214 } 215 if (map->type == _DRM_REGISTERS) { 216 if (map->flags & _DRM_WRITE_COMBINING) 217 map->handle = ioremap_wc(map->offset, 218 map->size); 219 else 220 map->handle = ioremap(map->offset, map->size); 221 if (!map->handle) { 222 kfree(map); 223 return -ENOMEM; 224 } 225 } 226 227 break; 228 case _DRM_SHM: 229 list = drm_find_matching_map(dev, map); 230 if (list != NULL) { 231 if(list->map->size != map->size) { 232 DRM_DEBUG("Matching maps of type %d with " 233 "mismatched sizes, (%ld vs %ld)\n", 234 map->type, map->size, list->map->size); 235 list->map->size = map->size; 236 } 237 238 kfree(map); 239 *maplist = list; 240 return 0; 241 } 242 map->handle = vmalloc_user(map->size); 243 DRM_DEBUG("%lu %d %p\n", 244 map->size, order_base_2(map->size), map->handle); 245 if (!map->handle) { 246 kfree(map); 247 return -ENOMEM; 248 } 249 map->offset = (unsigned long)map->handle; 250 if (map->flags & _DRM_CONTAINS_LOCK) { 251 /* Prevent a 2nd X Server from creating a 2nd lock */ 252 if (dev->master->lock.hw_lock != NULL) { 253 vfree(map->handle); 254 kfree(map); 255 return -EBUSY; 256 } 257 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ 258 } 259 break; 260 case _DRM_AGP: { 261 #if 0 262 struct drm_agp_mem *entry; 263 int valid = 0; 264 265 if (!dev->agp) { 266 kfree(map); 267 return -EINVAL; 268 } 269 #ifdef __alpha__ 270 map->offset += dev->hose->mem_space->start; 271 #endif 272 /* In some cases (i810 driver), user space may have already 273 * added the AGP base itself, because dev->agp->base previously 274 * only got set during AGP enable. So, only add the base 275 * address if the map's offset isn't already within the 276 * aperture. 277 */ 278 if (map->offset < dev->agp->base || 279 map->offset > dev->agp->base + 280 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { 281 map->offset += dev->agp->base; 282 } 283 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 284 285 /* This assumes the DRM is in total control of AGP space. 286 * It's not always the case as AGP can be in the control 287 * of user space (i.e. i810 driver). So this loop will get 288 * skipped and we double check that dev->agp->memory is 289 * actually set as well as being invalid before EPERM'ing 290 */ 291 list_for_each_entry(entry, &dev->agp->memory, head) { 292 if ((map->offset >= entry->bound) && 293 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { 294 valid = 1; 295 break; 296 } 297 } 298 if (!list_empty(&dev->agp->memory) && !valid) { 299 kfree(map); 300 return -EPERM; 301 } 302 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 303 (unsigned long long)map->offset, map->size); 304 305 break; 306 #endif 307 return -EINVAL; /* AGP hardware is no longer supported */ 308 } 309 case _DRM_SCATTER_GATHER: 310 if (!dev->sg) { 311 kfree(map); 312 return -EINVAL; 313 } 314 map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset); 315 map->offset = dev->sg->vaddr + offset; 316 break; 317 case _DRM_CONSISTENT: 318 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 319 * As we're limiting the address to 2^32-1 (or less), 320 * casting it down to 32 bits is no problem, but we 321 * need to point to a 64bit variable first. */ 322 dmah = drm_pci_alloc(dev, map->size, map->size); 323 if (!dmah) { 324 kfree(map); 325 return -ENOMEM; 326 } 327 map->handle = dmah->vaddr; 328 map->offset = (unsigned long)dmah->busaddr; 329 kfree(dmah); 330 break; 331 default: 332 kfree(map); 333 return -EINVAL; 334 } 335 336 list = kzalloc(sizeof(*list), GFP_KERNEL); 337 if (!list) { 338 if (map->type == _DRM_REGISTERS) 339 iounmap(map->handle); 340 kfree(map); 341 return -EINVAL; 342 } 343 list->map = map; 344 345 mutex_lock(&dev->struct_mutex); 346 list_add(&list->head, &dev->maplist); 347 348 /* Assign a 32-bit handle */ 349 /* We do it here so that dev->struct_mutex protects the increment */ 350 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 351 map->offset; 352 ret = drm_map_handle(dev, &list->hash, user_token, 0, 353 (map->type == _DRM_SHM)); 354 if (ret) { 355 if (map->type == _DRM_REGISTERS) 356 iounmap(map->handle); 357 kfree(map); 358 kfree(list); 359 mutex_unlock(&dev->struct_mutex); 360 return ret; 361 } 362 363 list->user_token = list->hash.key << PAGE_SHIFT; 364 mutex_unlock(&dev->struct_mutex); 365 366 if (!(map->flags & _DRM_DRIVER)) 367 list->master = dev->master; 368 *maplist = list; 369 return 0; 370 } 371 372 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset, 373 unsigned int size, enum drm_map_type type, 374 enum drm_map_flags flags, struct drm_local_map **map_ptr) 375 { 376 struct drm_map_list *list; 377 int rc; 378 379 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 380 if (!rc) 381 *map_ptr = list->map; 382 return rc; 383 } 384 EXPORT_SYMBOL(drm_legacy_addmap); 385 386 /** 387 * Ioctl to specify a range of memory that is available for mapping by a 388 * non-root process. 389 * 390 * \param inode device inode. 391 * \param file_priv DRM file private. 392 * \param cmd command. 393 * \param arg pointer to a drm_map structure. 394 * \return zero on success or a negative value on error. 395 * 396 */ 397 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, 398 struct drm_file *file_priv) 399 { 400 struct drm_map *map = data; 401 struct drm_map_list *maplist; 402 int err; 403 404 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 405 return -EPERM; 406 407 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 408 !drm_core_check_feature(dev, DRIVER_LEGACY)) 409 return -EINVAL; 410 411 err = drm_addmap_core(dev, map->offset, map->size, map->type, 412 map->flags, &maplist); 413 414 if (err) 415 return err; 416 417 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 418 map->handle = (void *)(unsigned long)maplist->user_token; 419 420 /* 421 * It appears that there are no users of this value whatsoever -- 422 * drmAddMap just discards it. Let's not encourage its use. 423 * (Keeping drm_addmap_core's returned mtrr value would be wrong -- 424 * it's not a real mtrr index anymore.) 425 */ 426 map->mtrr = -1; 427 428 return 0; 429 } 430 431 /* 432 * Get a mapping information. 433 * 434 * \param inode device inode. 435 * \param file_priv DRM file private. 436 * \param cmd command. 437 * \param arg user argument, pointing to a drm_map structure. 438 * 439 * \return zero on success or a negative number on failure. 440 * 441 * Searches for the mapping with the specified offset and copies its information 442 * into userspace 443 */ 444 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, 445 struct drm_file *file_priv) 446 { 447 struct drm_map *map = data; 448 struct drm_map_list *r_list = NULL; 449 struct list_head *list; 450 int idx; 451 int i; 452 453 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 454 !drm_core_check_feature(dev, DRIVER_LEGACY)) 455 return -EINVAL; 456 457 idx = map->offset; 458 if (idx < 0) 459 return -EINVAL; 460 461 i = 0; 462 mutex_lock(&dev->struct_mutex); 463 list_for_each(list, &dev->maplist) { 464 if (i == idx) { 465 r_list = list_entry(list, struct drm_map_list, head); 466 break; 467 } 468 i++; 469 } 470 if (!r_list || !r_list->map) { 471 mutex_unlock(&dev->struct_mutex); 472 return -EINVAL; 473 } 474 475 map->offset = r_list->map->offset; 476 map->size = r_list->map->size; 477 map->type = r_list->map->type; 478 map->flags = r_list->map->flags; 479 map->handle = (void *)(unsigned long) r_list->user_token; 480 map->mtrr = r_list->map->mtrr; 481 482 mutex_unlock(&dev->struct_mutex); 483 484 return 0; 485 } 486 487 /** 488 * Remove a map private from list and deallocate resources if the mapping 489 * isn't in use. 490 * 491 * Searches the map on drm_device::maplist, removes it from the list, see if 492 * its being used, and free any associate resource (such as MTRR's) if it's not 493 * being on use. 494 * 495 * \sa drm_legacy_addmap 496 */ 497 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 498 { 499 struct drm_map_list *r_list = NULL, *list_t; 500 drm_dma_handle_t dmah; 501 int found = 0; 502 struct drm_master *master; 503 504 /* Find the list entry for the map and remove it */ 505 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 506 if (r_list->map == map) { 507 master = r_list->master; 508 list_del(&r_list->head); 509 drm_ht_remove_key(&dev->map_hash, 510 r_list->user_token >> PAGE_SHIFT); 511 kfree(r_list); 512 found = 1; 513 break; 514 } 515 } 516 517 if (!found) 518 return -EINVAL; 519 520 switch (map->type) { 521 case _DRM_REGISTERS: 522 iounmap(map->handle); 523 /* FALLTHROUGH */ 524 case _DRM_FRAME_BUFFER: 525 arch_phys_wc_del(map->mtrr); 526 break; 527 case _DRM_SHM: 528 vfree(map->handle); 529 if (master) { 530 if (dev->sigdata.lock == master->lock.hw_lock) 531 dev->sigdata.lock = NULL; 532 master->lock.hw_lock = NULL; /* SHM removed */ 533 master->lock.file_priv = NULL; 534 wake_up_interruptible_all(&master->lock.lock_queue); 535 } 536 break; 537 case _DRM_AGP: 538 case _DRM_SCATTER_GATHER: 539 break; 540 case _DRM_CONSISTENT: 541 dmah.vaddr = map->handle; 542 dmah.busaddr = map->offset; 543 dmah.size = map->size; 544 __drm_legacy_pci_free(dev, &dmah); 545 break; 546 } 547 kfree(map); 548 549 return 0; 550 } 551 EXPORT_SYMBOL(drm_legacy_rmmap_locked); 552 553 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 554 { 555 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 556 !drm_core_check_feature(dev, DRIVER_LEGACY)) 557 return; 558 559 mutex_lock(&dev->struct_mutex); 560 drm_legacy_rmmap_locked(dev, map); 561 mutex_unlock(&dev->struct_mutex); 562 } 563 EXPORT_SYMBOL(drm_legacy_rmmap); 564 565 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) 566 { 567 struct drm_map_list *r_list, *list_temp; 568 569 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 570 return; 571 572 mutex_lock(&dev->struct_mutex); 573 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 574 if (r_list->master == master) { 575 drm_legacy_rmmap_locked(dev, r_list->map); 576 r_list = NULL; 577 } 578 } 579 mutex_unlock(&dev->struct_mutex); 580 } 581 582 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 583 * the last close of the device, and this is necessary for cleanup when things 584 * exit uncleanly. Therefore, having userland manually remove mappings seems 585 * like a pointless exercise since they're going away anyway. 586 * 587 * One use case might be after addmap is allowed for normal users for SHM and 588 * gets used by drivers that the server doesn't need to care about. This seems 589 * unlikely. 590 * 591 * \param inode device inode. 592 * \param file_priv DRM file private. 593 * \param cmd command. 594 * \param arg pointer to a struct drm_map structure. 595 * \return zero on success or a negative value on error. 596 */ 597 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, 598 struct drm_file *file_priv) 599 { 600 struct drm_map *request = data; 601 struct drm_local_map *map = NULL; 602 struct drm_map_list *r_list; 603 int ret; 604 605 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 606 !drm_core_check_feature(dev, DRIVER_LEGACY)) 607 return -EINVAL; 608 609 mutex_lock(&dev->struct_mutex); 610 list_for_each_entry(r_list, &dev->maplist, head) { 611 if (r_list->map && 612 r_list->user_token == (unsigned long)request->handle && 613 r_list->map->flags & _DRM_REMOVABLE) { 614 map = r_list->map; 615 break; 616 } 617 } 618 619 /* List has wrapped around to the head pointer, or its empty we didn't 620 * find anything. 621 */ 622 if (list_empty(&dev->maplist) || !map) { 623 mutex_unlock(&dev->struct_mutex); 624 return -EINVAL; 625 } 626 627 /* Register and framebuffer maps are permanent */ 628 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 629 mutex_unlock(&dev->struct_mutex); 630 return 0; 631 } 632 633 ret = drm_legacy_rmmap_locked(dev, map); 634 635 mutex_unlock(&dev->struct_mutex); 636 637 return ret; 638 } 639 640 /** 641 * Cleanup after an error on one of the addbufs() functions. 642 * 643 * \param dev DRM device. 644 * \param entry buffer entry where the error occurred. 645 * 646 * Frees any pages and buffers associated with the given entry. 647 */ 648 static void drm_cleanup_buf_error(struct drm_device * dev, 649 struct drm_buf_entry * entry) 650 { 651 int i; 652 653 if (entry->seg_count) { 654 for (i = 0; i < entry->seg_count; i++) { 655 if (entry->seglist[i]) { 656 drm_pci_free(dev, entry->seglist[i]); 657 } 658 } 659 kfree(entry->seglist); 660 661 entry->seg_count = 0; 662 } 663 664 if (entry->buf_count) { 665 for (i = 0; i < entry->buf_count; i++) { 666 kfree(entry->buflist[i].dev_private); 667 } 668 kfree(entry->buflist); 669 670 entry->buf_count = 0; 671 } 672 } 673 674 #if IS_ENABLED(CONFIG_AGP) 675 /** 676 * Add AGP buffers for DMA transfers. 677 * 678 * \param dev struct drm_device to which the buffers are to be added. 679 * \param request pointer to a struct drm_buf_desc describing the request. 680 * \return zero on success or a negative number on failure. 681 * 682 * After some sanity checks creates a drm_buf structure for each buffer and 683 * reallocates the buffer list of the same size order to accommodate the new 684 * buffers. 685 */ 686 int drm_legacy_addbufs_agp(struct drm_device *dev, 687 struct drm_buf_desc *request) 688 { 689 struct drm_device_dma *dma = dev->dma; 690 struct drm_buf_entry *entry; 691 struct drm_agp_mem *agp_entry; 692 struct drm_buf *buf; 693 unsigned long offset; 694 unsigned long agp_offset; 695 int count; 696 int order; 697 int size; 698 int alignment; 699 int page_order; 700 int total; 701 int byte_count; 702 int i, valid; 703 struct drm_buf **temp_buflist; 704 705 if (!dma) 706 return -EINVAL; 707 708 count = request->count; 709 order = order_base_2(request->size); 710 size = 1 << order; 711 712 alignment = (request->flags & _DRM_PAGE_ALIGN) 713 ? PAGE_ALIGN(size) : size; 714 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 715 total = PAGE_SIZE << page_order; 716 717 byte_count = 0; 718 agp_offset = dev->agp->base + request->agp_start; 719 720 DRM_DEBUG("count: %d\n", count); 721 DRM_DEBUG("order: %d\n", order); 722 DRM_DEBUG("size: %d\n", size); 723 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 724 DRM_DEBUG("alignment: %d\n", alignment); 725 DRM_DEBUG("page_order: %d\n", page_order); 726 DRM_DEBUG("total: %d\n", total); 727 728 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 729 return -EINVAL; 730 731 /* Make sure buffers are located in AGP memory that we own */ 732 valid = 0; 733 list_for_each_entry(agp_entry, &dev->agp->memory, head) { 734 if ((agp_offset >= agp_entry->bound) && 735 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 736 valid = 1; 737 break; 738 } 739 } 740 if (!list_empty(&dev->agp->memory) && !valid) { 741 DRM_DEBUG("zone invalid\n"); 742 return -EINVAL; 743 } 744 lockmgr(&dev->buf_lock, LK_EXCLUSIVE); 745 if (dev->buf_use) { 746 lockmgr(&dev->buf_lock, LK_RELEASE); 747 return -EBUSY; 748 } 749 atomic_inc(&dev->buf_alloc); 750 lockmgr(&dev->buf_lock, LK_RELEASE); 751 752 mutex_lock(&dev->struct_mutex); 753 entry = &dma->bufs[order]; 754 if (entry->buf_count) { 755 mutex_unlock(&dev->struct_mutex); 756 atomic_dec(&dev->buf_alloc); 757 return -ENOMEM; /* May only call once for each order */ 758 } 759 760 if (count < 0 || count > 4096) { 761 mutex_unlock(&dev->struct_mutex); 762 atomic_dec(&dev->buf_alloc); 763 return -EINVAL; 764 } 765 766 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 767 if (!entry->buflist) { 768 mutex_unlock(&dev->struct_mutex); 769 atomic_dec(&dev->buf_alloc); 770 return -ENOMEM; 771 } 772 773 entry->buf_size = size; 774 entry->page_order = page_order; 775 776 offset = 0; 777 778 while (entry->buf_count < count) { 779 buf = &entry->buflist[entry->buf_count]; 780 buf->idx = dma->buf_count + entry->buf_count; 781 buf->total = alignment; 782 buf->order = order; 783 buf->used = 0; 784 785 buf->offset = (dma->byte_count + offset); 786 buf->bus_address = agp_offset + offset; 787 buf->address = (void *)(agp_offset + offset); 788 buf->next = NULL; 789 buf->waiting = 0; 790 buf->pending = 0; 791 buf->file_priv = NULL; 792 793 buf->dev_priv_size = dev->driver->dev_priv_size; 794 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 795 if (!buf->dev_private) { 796 /* Set count correctly so we free the proper amount. */ 797 entry->buf_count = count; 798 drm_cleanup_buf_error(dev, entry); 799 mutex_unlock(&dev->struct_mutex); 800 atomic_dec(&dev->buf_alloc); 801 return -ENOMEM; 802 } 803 804 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 805 806 offset += alignment; 807 entry->buf_count++; 808 byte_count += PAGE_SIZE << page_order; 809 } 810 811 DRM_DEBUG("byte_count: %d\n", byte_count); 812 813 temp_buflist = krealloc(dma->buflist, 814 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 815 M_DRM, GFP_KERNEL); 816 if (!temp_buflist) { 817 /* Free the entry because it isn't valid */ 818 drm_cleanup_buf_error(dev, entry); 819 mutex_unlock(&dev->struct_mutex); 820 atomic_dec(&dev->buf_alloc); 821 return -ENOMEM; 822 } 823 dma->buflist = temp_buflist; 824 825 for (i = 0; i < entry->buf_count; i++) { 826 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 827 } 828 829 dma->buf_count += entry->buf_count; 830 dma->seg_count += entry->seg_count; 831 dma->page_count += byte_count >> PAGE_SHIFT; 832 dma->byte_count += byte_count; 833 834 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 835 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 836 837 mutex_unlock(&dev->struct_mutex); 838 839 request->count = entry->buf_count; 840 request->size = size; 841 842 dma->flags = _DRM_DMA_USE_AGP; 843 844 atomic_dec(&dev->buf_alloc); 845 return 0; 846 } 847 EXPORT_SYMBOL(drm_legacy_addbufs_agp); 848 #endif /* CONFIG_AGP */ 849 850 int drm_legacy_addbufs_pci(struct drm_device *dev, 851 struct drm_buf_desc *request) 852 { 853 struct drm_device_dma *dma = dev->dma; 854 int count; 855 int order; 856 int size; 857 int total; 858 int page_order; 859 struct drm_buf_entry *entry; 860 drm_dma_handle_t *dmah; 861 struct drm_buf *buf; 862 int alignment; 863 unsigned long offset; 864 int i; 865 int byte_count; 866 int page_count; 867 unsigned long *temp_pagelist; 868 struct drm_buf **temp_buflist; 869 870 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 871 return -EINVAL; 872 873 if (!dma) 874 return -EINVAL; 875 876 if (!capable(CAP_SYS_ADMIN)) 877 return -EPERM; 878 879 count = request->count; 880 order = order_base_2(request->size); 881 size = 1 << order; 882 883 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 884 request->count, request->size, size, order); 885 886 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 887 return -EINVAL; 888 889 alignment = (request->flags & _DRM_PAGE_ALIGN) 890 ? PAGE_ALIGN(size) : size; 891 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 892 total = PAGE_SIZE << page_order; 893 894 lockmgr(&dev->buf_lock, LK_EXCLUSIVE); 895 if (dev->buf_use) { 896 lockmgr(&dev->buf_lock, LK_RELEASE); 897 return -EBUSY; 898 } 899 atomic_inc(&dev->buf_alloc); 900 lockmgr(&dev->buf_lock, LK_RELEASE); 901 902 mutex_lock(&dev->struct_mutex); 903 entry = &dma->bufs[order]; 904 if (entry->buf_count) { 905 mutex_unlock(&dev->struct_mutex); 906 atomic_dec(&dev->buf_alloc); 907 return -ENOMEM; /* May only call once for each order */ 908 } 909 910 if (count < 0 || count > 4096) { 911 mutex_unlock(&dev->struct_mutex); 912 atomic_dec(&dev->buf_alloc); 913 return -EINVAL; 914 } 915 916 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 917 if (!entry->buflist) { 918 mutex_unlock(&dev->struct_mutex); 919 atomic_dec(&dev->buf_alloc); 920 return -ENOMEM; 921 } 922 923 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); 924 if (!entry->seglist) { 925 kfree(entry->buflist); 926 mutex_unlock(&dev->struct_mutex); 927 atomic_dec(&dev->buf_alloc); 928 return -ENOMEM; 929 } 930 931 /* Keep the original pagelist until we know all the allocations 932 * have succeeded 933 */ 934 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), 935 sizeof(*dma->pagelist), 936 GFP_KERNEL); 937 if (!temp_pagelist) { 938 kfree(entry->buflist); 939 kfree(entry->seglist); 940 mutex_unlock(&dev->struct_mutex); 941 atomic_dec(&dev->buf_alloc); 942 return -ENOMEM; 943 } 944 memcpy(temp_pagelist, 945 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 946 DRM_DEBUG("pagelist: %d entries\n", 947 dma->page_count + (count << page_order)); 948 949 entry->buf_size = size; 950 entry->page_order = page_order; 951 byte_count = 0; 952 page_count = 0; 953 954 while (entry->buf_count < count) { 955 956 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); 957 958 if (!dmah) { 959 /* Set count correctly so we free the proper amount. */ 960 entry->buf_count = count; 961 entry->seg_count = count; 962 drm_cleanup_buf_error(dev, entry); 963 kfree(temp_pagelist); 964 mutex_unlock(&dev->struct_mutex); 965 atomic_dec(&dev->buf_alloc); 966 return -ENOMEM; 967 } 968 entry->seglist[entry->seg_count++] = dmah; 969 for (i = 0; i < (1 << page_order); i++) { 970 DRM_DEBUG("page %d @ 0x%08lx\n", 971 dma->page_count + page_count, 972 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 973 temp_pagelist[dma->page_count + page_count++] 974 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 975 } 976 for (offset = 0; 977 offset + size <= total && entry->buf_count < count; 978 offset += alignment, ++entry->buf_count) { 979 buf = &entry->buflist[entry->buf_count]; 980 buf->idx = dma->buf_count + entry->buf_count; 981 buf->total = alignment; 982 buf->order = order; 983 buf->used = 0; 984 buf->offset = (dma->byte_count + byte_count + offset); 985 buf->address = (void *)(dmah->vaddr + offset); 986 buf->bus_address = dmah->busaddr + offset; 987 buf->next = NULL; 988 buf->waiting = 0; 989 buf->pending = 0; 990 buf->file_priv = NULL; 991 992 buf->dev_priv_size = dev->driver->dev_priv_size; 993 buf->dev_private = kzalloc(buf->dev_priv_size, 994 GFP_KERNEL); 995 if (!buf->dev_private) { 996 /* Set count correctly so we free the proper amount. */ 997 entry->buf_count = count; 998 entry->seg_count = count; 999 drm_cleanup_buf_error(dev, entry); 1000 kfree(temp_pagelist); 1001 mutex_unlock(&dev->struct_mutex); 1002 atomic_dec(&dev->buf_alloc); 1003 return -ENOMEM; 1004 } 1005 1006 DRM_DEBUG("buffer %d @ %p\n", 1007 entry->buf_count, buf->address); 1008 } 1009 byte_count += PAGE_SIZE << page_order; 1010 } 1011 1012 temp_buflist = krealloc(dma->buflist, 1013 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 1014 M_DRM, GFP_KERNEL); 1015 if (!temp_buflist) { 1016 /* Free the entry because it isn't valid */ 1017 drm_cleanup_buf_error(dev, entry); 1018 kfree(temp_pagelist); 1019 mutex_unlock(&dev->struct_mutex); 1020 atomic_dec(&dev->buf_alloc); 1021 return -ENOMEM; 1022 } 1023 dma->buflist = temp_buflist; 1024 1025 for (i = 0; i < entry->buf_count; i++) { 1026 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1027 } 1028 1029 /* No allocations failed, so now we can replace the original pagelist 1030 * with the new one. 1031 */ 1032 if (dma->page_count) { 1033 kfree(dma->pagelist); 1034 } 1035 dma->pagelist = temp_pagelist; 1036 1037 dma->buf_count += entry->buf_count; 1038 dma->seg_count += entry->seg_count; 1039 dma->page_count += entry->seg_count << page_order; 1040 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 1041 1042 mutex_unlock(&dev->struct_mutex); 1043 1044 request->count = entry->buf_count; 1045 request->size = size; 1046 1047 if (request->flags & _DRM_PCI_BUFFER_RO) 1048 dma->flags = _DRM_DMA_USE_PCI_RO; 1049 1050 atomic_dec(&dev->buf_alloc); 1051 return 0; 1052 1053 } 1054 EXPORT_SYMBOL(drm_legacy_addbufs_pci); 1055 1056 static int drm_legacy_addbufs_sg(struct drm_device *dev, 1057 struct drm_buf_desc *request) 1058 { 1059 struct drm_device_dma *dma = dev->dma; 1060 struct drm_buf_entry *entry; 1061 struct drm_buf *buf; 1062 unsigned long offset; 1063 unsigned long agp_offset; 1064 int count; 1065 int order; 1066 int size; 1067 int alignment; 1068 int page_order; 1069 int total; 1070 int byte_count; 1071 int i; 1072 struct drm_buf **temp_buflist; 1073 1074 if (!drm_core_check_feature(dev, DRIVER_SG)) 1075 return -EINVAL; 1076 1077 if (!dma) 1078 return -EINVAL; 1079 1080 if (!capable(CAP_SYS_ADMIN)) 1081 return -EPERM; 1082 1083 count = request->count; 1084 order = order_base_2(request->size); 1085 size = 1 << order; 1086 1087 alignment = (request->flags & _DRM_PAGE_ALIGN) 1088 ? PAGE_ALIGN(size) : size; 1089 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1090 total = PAGE_SIZE << page_order; 1091 1092 byte_count = 0; 1093 agp_offset = request->agp_start; 1094 1095 DRM_DEBUG("count: %d\n", count); 1096 DRM_DEBUG("order: %d\n", order); 1097 DRM_DEBUG("size: %d\n", size); 1098 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1099 DRM_DEBUG("alignment: %d\n", alignment); 1100 DRM_DEBUG("page_order: %d\n", page_order); 1101 DRM_DEBUG("total: %d\n", total); 1102 1103 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1104 return -EINVAL; 1105 1106 lockmgr(&dev->buf_lock, LK_EXCLUSIVE); 1107 if (dev->buf_use) { 1108 lockmgr(&dev->buf_lock, LK_RELEASE); 1109 return -EBUSY; 1110 } 1111 atomic_inc(&dev->buf_alloc); 1112 lockmgr(&dev->buf_lock, LK_RELEASE); 1113 1114 mutex_lock(&dev->struct_mutex); 1115 entry = &dma->bufs[order]; 1116 if (entry->buf_count) { 1117 mutex_unlock(&dev->struct_mutex); 1118 atomic_dec(&dev->buf_alloc); 1119 return -ENOMEM; /* May only call once for each order */ 1120 } 1121 1122 if (count < 0 || count > 4096) { 1123 mutex_unlock(&dev->struct_mutex); 1124 atomic_dec(&dev->buf_alloc); 1125 return -EINVAL; 1126 } 1127 1128 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 1129 if (!entry->buflist) { 1130 mutex_unlock(&dev->struct_mutex); 1131 atomic_dec(&dev->buf_alloc); 1132 return -ENOMEM; 1133 } 1134 1135 entry->buf_size = size; 1136 entry->page_order = page_order; 1137 1138 offset = 0; 1139 1140 while (entry->buf_count < count) { 1141 buf = &entry->buflist[entry->buf_count]; 1142 buf->idx = dma->buf_count + entry->buf_count; 1143 buf->total = alignment; 1144 buf->order = order; 1145 buf->used = 0; 1146 1147 buf->offset = (dma->byte_count + offset); 1148 buf->bus_address = agp_offset + offset; 1149 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr); 1150 buf->next = NULL; 1151 buf->waiting = 0; 1152 buf->pending = 0; 1153 buf->file_priv = NULL; 1154 1155 buf->dev_priv_size = dev->driver->dev_priv_size; 1156 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1157 if (!buf->dev_private) { 1158 /* Set count correctly so we free the proper amount. */ 1159 entry->buf_count = count; 1160 drm_cleanup_buf_error(dev, entry); 1161 mutex_unlock(&dev->struct_mutex); 1162 atomic_dec(&dev->buf_alloc); 1163 return -ENOMEM; 1164 } 1165 1166 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1167 1168 offset += alignment; 1169 entry->buf_count++; 1170 byte_count += PAGE_SIZE << page_order; 1171 } 1172 1173 DRM_DEBUG("byte_count: %d\n", byte_count); 1174 1175 temp_buflist = krealloc(dma->buflist, 1176 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 1177 M_DRM, GFP_KERNEL); 1178 if (!temp_buflist) { 1179 /* Free the entry because it isn't valid */ 1180 drm_cleanup_buf_error(dev, entry); 1181 mutex_unlock(&dev->struct_mutex); 1182 atomic_dec(&dev->buf_alloc); 1183 return -ENOMEM; 1184 } 1185 dma->buflist = temp_buflist; 1186 1187 for (i = 0; i < entry->buf_count; i++) { 1188 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1189 } 1190 1191 dma->buf_count += entry->buf_count; 1192 dma->seg_count += entry->seg_count; 1193 dma->page_count += byte_count >> PAGE_SHIFT; 1194 dma->byte_count += byte_count; 1195 1196 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1197 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1198 1199 mutex_unlock(&dev->struct_mutex); 1200 1201 request->count = entry->buf_count; 1202 request->size = size; 1203 1204 dma->flags = _DRM_DMA_USE_SG; 1205 1206 atomic_dec(&dev->buf_alloc); 1207 return 0; 1208 } 1209 1210 /** 1211 * Add buffers for DMA transfers (ioctl). 1212 * 1213 * \param inode device inode. 1214 * \param file_priv DRM file private. 1215 * \param cmd command. 1216 * \param arg pointer to a struct drm_buf_desc request. 1217 * \return zero on success or a negative number on failure. 1218 * 1219 * According with the memory type specified in drm_buf_desc::flags and the 1220 * build options, it dispatches the call either to addbufs_agp(), 1221 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1222 * PCI memory respectively. 1223 */ 1224 int drm_legacy_addbufs(struct drm_device *dev, void *data, 1225 struct drm_file *file_priv) 1226 { 1227 struct drm_buf_desc *request = data; 1228 int ret; 1229 1230 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1231 return -EINVAL; 1232 1233 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1234 return -EINVAL; 1235 1236 #if IS_ENABLED(CONFIG_AGP) 1237 if (request->flags & _DRM_AGP_BUFFER) 1238 ret = drm_legacy_addbufs_agp(dev, request); 1239 else 1240 #endif 1241 if (request->flags & _DRM_SG_BUFFER) 1242 ret = drm_legacy_addbufs_sg(dev, request); 1243 else if (request->flags & _DRM_FB_BUFFER) 1244 ret = -EINVAL; 1245 else 1246 ret = drm_legacy_addbufs_pci(dev, request); 1247 1248 return ret; 1249 } 1250 1251 /** 1252 * Get information about the buffer mappings. 1253 * 1254 * This was originally mean for debugging purposes, or by a sophisticated 1255 * client library to determine how best to use the available buffers (e.g., 1256 * large buffers can be used for image transfer). 1257 * 1258 * \param inode device inode. 1259 * \param file_priv DRM file private. 1260 * \param cmd command. 1261 * \param arg pointer to a drm_buf_info structure. 1262 * \return zero on success or a negative number on failure. 1263 * 1264 * Increments drm_device::buf_use while holding the drm_device::buf_lock 1265 * lock, preventing of allocating more buffers after this call. Information 1266 * about each requested buffer is then copied into user space. 1267 */ 1268 int __drm_legacy_infobufs(struct drm_device *dev, 1269 void *data, int *p, 1270 int (*f)(void *, int, struct drm_buf_entry *)) 1271 { 1272 struct drm_device_dma *dma = dev->dma; 1273 int i; 1274 int count; 1275 1276 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1277 return -EINVAL; 1278 1279 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1280 return -EINVAL; 1281 1282 if (!dma) 1283 return -EINVAL; 1284 1285 lockmgr(&dev->buf_lock, LK_EXCLUSIVE); 1286 if (atomic_read(&dev->buf_alloc)) { 1287 lockmgr(&dev->buf_lock, LK_RELEASE); 1288 return -EBUSY; 1289 } 1290 ++dev->buf_use; /* Can't allocate more after this call */ 1291 lockmgr(&dev->buf_lock, LK_RELEASE); 1292 1293 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1294 if (dma->bufs[i].buf_count) 1295 ++count; 1296 } 1297 1298 DRM_DEBUG("count = %d\n", count); 1299 1300 if (*p >= count) { 1301 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1302 struct drm_buf_entry *from = &dma->bufs[i]; 1303 if (from->buf_count) { 1304 if (f(data, count, from) < 0) 1305 return -EFAULT; 1306 DRM_DEBUG("%d %d %d %d %d\n", 1307 i, 1308 dma->bufs[i].buf_count, 1309 dma->bufs[i].buf_size, 1310 dma->bufs[i].low_mark, 1311 dma->bufs[i].high_mark); 1312 ++count; 1313 } 1314 } 1315 } 1316 *p = count; 1317 1318 return 0; 1319 } 1320 1321 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) 1322 { 1323 struct drm_buf_info *request = data; 1324 struct drm_buf_desc __user *to = &request->list[count]; 1325 struct drm_buf_desc v = {.count = from->buf_count, 1326 .size = from->buf_size, 1327 .low_mark = from->low_mark, 1328 .high_mark = from->high_mark}; 1329 return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); 1330 } 1331 1332 int drm_legacy_infobufs(struct drm_device *dev, void *data, 1333 struct drm_file *file_priv) 1334 { 1335 struct drm_buf_info *request = data; 1336 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); 1337 } 1338 1339 /** 1340 * Specifies a low and high water mark for buffer allocation 1341 * 1342 * \param inode device inode. 1343 * \param file_priv DRM file private. 1344 * \param cmd command. 1345 * \param arg a pointer to a drm_buf_desc structure. 1346 * \return zero on success or a negative number on failure. 1347 * 1348 * Verifies that the size order is bounded between the admissible orders and 1349 * updates the respective drm_device_dma::bufs entry low and high water mark. 1350 * 1351 * \note This ioctl is deprecated and mostly never used. 1352 */ 1353 int drm_legacy_markbufs(struct drm_device *dev, void *data, 1354 struct drm_file *file_priv) 1355 { 1356 struct drm_device_dma *dma = dev->dma; 1357 struct drm_buf_desc *request = data; 1358 int order; 1359 struct drm_buf_entry *entry; 1360 1361 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1362 return -EINVAL; 1363 1364 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1365 return -EINVAL; 1366 1367 if (!dma) 1368 return -EINVAL; 1369 1370 DRM_DEBUG("%d, %d, %d\n", 1371 request->size, request->low_mark, request->high_mark); 1372 order = order_base_2(request->size); 1373 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1374 return -EINVAL; 1375 entry = &dma->bufs[order]; 1376 1377 if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1378 return -EINVAL; 1379 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1380 return -EINVAL; 1381 1382 entry->low_mark = request->low_mark; 1383 entry->high_mark = request->high_mark; 1384 1385 return 0; 1386 } 1387 1388 /** 1389 * Unreserve the buffers in list, previously reserved using drmDMA. 1390 * 1391 * \param inode device inode. 1392 * \param file_priv DRM file private. 1393 * \param cmd command. 1394 * \param arg pointer to a drm_buf_free structure. 1395 * \return zero on success or a negative number on failure. 1396 * 1397 * Calls free_buffer() for each used buffer. 1398 * This function is primarily used for debugging. 1399 */ 1400 int drm_legacy_freebufs(struct drm_device *dev, void *data, 1401 struct drm_file *file_priv) 1402 { 1403 struct drm_device_dma *dma = dev->dma; 1404 struct drm_buf_free *request = data; 1405 int i; 1406 int idx; 1407 struct drm_buf *buf; 1408 1409 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1410 return -EINVAL; 1411 1412 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1413 return -EINVAL; 1414 1415 if (!dma) 1416 return -EINVAL; 1417 1418 DRM_DEBUG("%d\n", request->count); 1419 for (i = 0; i < request->count; i++) { 1420 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1421 return -EFAULT; 1422 if (idx < 0 || idx >= dma->buf_count) { 1423 DRM_ERROR("Index %d (of %d max)\n", 1424 idx, dma->buf_count - 1); 1425 return -EINVAL; 1426 } 1427 buf = dma->buflist[idx]; 1428 if (buf->file_priv != file_priv) { 1429 DRM_ERROR("Process %d freeing buffer not owned\n", 1430 DRM_CURRENTPID); 1431 return -EINVAL; 1432 } 1433 drm_legacy_free_buffer(dev, buf); 1434 } 1435 1436 return 0; 1437 } 1438 1439 /** 1440 * Maps all of the DMA buffers into client-virtual space (ioctl). 1441 * 1442 * \param inode device inode. 1443 * \param file_priv DRM file private. 1444 * \param cmd command. 1445 * \param arg pointer to a drm_buf_map structure. 1446 * \return zero on success or a negative number on failure. 1447 * 1448 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information 1449 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with 1450 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1451 * drm_mmap_dma(). 1452 */ 1453 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, 1454 void __user **v, 1455 int (*f)(void *, int, unsigned long, 1456 struct drm_buf *), 1457 struct drm_file *file_priv) 1458 { 1459 #ifndef __DragonFly__ 1460 struct drm_device_dma *dma = dev->dma; 1461 int retcode = 0; 1462 unsigned long virtual; 1463 int i; 1464 1465 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1466 return -EINVAL; 1467 1468 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1469 return -EINVAL; 1470 1471 if (!dma) 1472 return -EINVAL; 1473 1474 lockmgr(&dev->buf_lock, LK_EXCLUSIVE); 1475 if (atomic_read(&dev->buf_alloc)) { 1476 lockmgr(&dev->buf_lock, LK_RELEASE); 1477 return -EBUSY; 1478 } 1479 dev->buf_use++; /* Can't allocate more after this call */ 1480 lockmgr(&dev->buf_lock, LK_RELEASE); 1481 1482 if (*p >= dma->buf_count) { 1483 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1484 || (drm_core_check_feature(dev, DRIVER_SG) 1485 && (dma->flags & _DRM_DMA_USE_SG))) { 1486 struct drm_local_map *map = dev->agp_buffer_map; 1487 unsigned long token = dev->agp_buffer_token; 1488 1489 if (!map) { 1490 retcode = -EINVAL; 1491 goto done; 1492 } 1493 virtual = vm_mmap(file_priv->filp, 0, map->size, 1494 PROT_READ | PROT_WRITE, 1495 MAP_SHARED, 1496 token, NULL); 1497 } else { 1498 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, 1499 PROT_READ | PROT_WRITE, 1500 MAP_SHARED, 1501 0, NULL); 1502 } 1503 if (virtual > -1024UL) { 1504 /* Real error */ 1505 retcode = (signed long)virtual; 1506 goto done; 1507 } 1508 *v = (void __user *)virtual; 1509 1510 for (i = 0; i < dma->buf_count; i++) { 1511 if (f(data, i, virtual, dma->buflist[i]) < 0) { 1512 retcode = -EFAULT; 1513 goto done; 1514 } 1515 } 1516 } 1517 done: 1518 *p = dma->buf_count; 1519 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); 1520 1521 return retcode; 1522 #else 1523 return -EINVAL; 1524 #endif 1525 } 1526 1527 static int map_one_buf(void *data, int idx, unsigned long virtual, 1528 struct drm_buf *buf) 1529 { 1530 #ifndef __DragonFly__ 1531 struct drm_buf_map *request = data; 1532 unsigned long address = virtual + buf->offset; /* *** */ 1533 1534 if (copy_to_user(&request->list[idx].idx, &buf->idx, 1535 sizeof(request->list[0].idx))) 1536 return -EFAULT; 1537 if (copy_to_user(&request->list[idx].total, &buf->total, 1538 sizeof(request->list[0].total))) 1539 return -EFAULT; 1540 if (clear_user(&request->list[idx].used, sizeof(int))) 1541 return -EFAULT; 1542 if (copy_to_user(&request->list[idx].address, &address, 1543 sizeof(address))) 1544 return -EFAULT; 1545 #endif 1546 return 0; 1547 } 1548 1549 int drm_legacy_mapbufs(struct drm_device *dev, void *data, 1550 struct drm_file *file_priv) 1551 { 1552 struct drm_buf_map *request = data; 1553 return __drm_legacy_mapbufs(dev, data, &request->count, 1554 &request->virtual, map_one_buf, 1555 file_priv); 1556 } 1557 1558 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, 1559 struct drm_file *file_priv) 1560 { 1561 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1562 return -EINVAL; 1563 1564 if (dev->driver->dma_ioctl) 1565 return dev->driver->dma_ioctl(dev, data, file_priv); 1566 else 1567 return -EINVAL; 1568 } 1569 1570 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) 1571 { 1572 struct drm_map_list *entry; 1573 1574 list_for_each_entry(entry, &dev->maplist, head) { 1575 if (entry->map && entry->map->type == _DRM_SHM && 1576 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 1577 return entry->map; 1578 } 1579 } 1580 return NULL; 1581 } 1582 EXPORT_SYMBOL(drm_legacy_getsarea); 1583