1 /* 2 * Copyright 2003 José Fonseca. 3 * Copyright 2003 Leif Delgass. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include <linux/pci.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/export.h> 28 #include <drm/drmP.h> 29 #include "drm_internal.h" 30 #include "drm_legacy.h" 31 32 /** 33 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. 34 * @dev: DRM device 35 * @size: size of block to allocate 36 * @align: alignment of block 37 * 38 * Return: A handle to the allocated memory block on success or NULL on 39 * failure. 40 */ 41 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 42 { 43 drm_dma_handle_t *dmah; 44 unsigned long addr; 45 size_t sz; 46 47 /* pci_alloc_consistent only guarantees alignment to the smallest 48 * PAGE_SIZE order which is greater than or equal to the requested size. 49 * Return NULL here for now to make sure nobody tries for larger alignment 50 */ 51 if (align > size) 52 return NULL; 53 54 dmah = kmalloc(sizeof(drm_dma_handle_t), M_DRM, M_WAITOK | M_NULLOK); 55 if (!dmah) 56 return NULL; 57 58 dmah->size = size; 59 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL); 60 61 if (dmah->vaddr == NULL) { 62 kfree(dmah); 63 return NULL; 64 } 65 66 memset(dmah->vaddr, 0, size); 67 68 /* XXX - Is virt_to_page() legal for consistent mem? */ 69 /* Reserve */ 70 for (addr = (unsigned long)dmah->vaddr, sz = size; 71 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 72 #if 0 73 SetPageReserved(virt_to_page((void *)addr)); 74 #endif 75 } 76 77 return dmah; 78 } 79 80 EXPORT_SYMBOL(drm_pci_alloc); 81 82 /* 83 * Free a PCI consistent memory block without freeing its descriptor. 84 * 85 * This function is for internal use in the Linux-specific DRM core code. 86 */ 87 void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 88 { 89 unsigned long addr; 90 size_t sz; 91 92 if (dmah->vaddr) { 93 /* XXX - Is virt_to_page() legal for consistent mem? */ 94 /* Unreserve */ 95 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 96 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 97 #if 0 98 ClearPageReserved(virt_to_page((void *)addr)); 99 #endif 100 } 101 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 102 dmah->busaddr); 103 } 104 } 105 106 /** 107 * drm_pci_free - Free a PCI consistent memory block 108 * @dev: DRM device 109 * @dmah: handle to memory block 110 */ 111 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 112 { 113 __drm_legacy_pci_free(dev, dmah); 114 kfree(dmah); 115 } 116 117 EXPORT_SYMBOL(drm_pci_free); 118 119 #ifdef CONFIG_PCI 120 121 static int drm_get_pci_domain(struct drm_device *dev) 122 { 123 #ifndef __alpha__ 124 /* For historical reasons, drm_get_pci_domain() is busticated 125 * on most archs and has to remain so for userspace interface 126 * < 1.4, except on alpha which was right from the beginning 127 */ 128 if (dev->if_version < 0x10004) 129 return 0; 130 #endif /* __alpha__ */ 131 132 #if 0 133 return pci_domain_nr(dev->pdev->bus); 134 #else 135 return dev->pci_domain; 136 #endif 137 } 138 139 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 140 { 141 master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d", 142 drm_get_pci_domain(dev), 143 dev->pdev->bus->number, 144 PCI_SLOT(dev->pdev->devfn), 145 PCI_FUNC(dev->pdev->devfn)); 146 if (!master->unique) 147 return -ENOMEM; 148 149 master->unique_len = strlen(master->unique); 150 return 0; 151 } 152 EXPORT_SYMBOL(drm_pci_set_busid); 153 154 int drm_pci_set_unique(struct drm_device *dev, 155 struct drm_master *master, 156 struct drm_unique *u) 157 { 158 int domain, bus, slot, func, ret; 159 160 master->unique_len = u->unique_len; 161 master->unique = kmalloc(master->unique_len + 1, M_DRM, M_NOWAIT); 162 if (!master->unique) { 163 ret = -ENOMEM; 164 goto err; 165 } 166 167 if (copy_from_user(master->unique, u->unique, master->unique_len)) { 168 ret = -EFAULT; 169 goto err; 170 } 171 172 master->unique[master->unique_len] = '\0'; 173 174 /* Return error if the busid submitted doesn't match the device's actual 175 * busid. 176 */ 177 ret = ksscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 178 if (ret != 3) { 179 ret = -EINVAL; 180 goto err; 181 } 182 183 domain = bus >> 8; 184 bus &= 0xff; 185 186 if ((domain != drm_get_pci_domain(dev)) || 187 (bus != dev->pdev->bus->number) || 188 (slot != PCI_SLOT(dev->pdev->devfn)) || 189 (func != PCI_FUNC(dev->pdev->devfn))) { 190 ret = -EINVAL; 191 goto err; 192 } 193 return 0; 194 err: 195 return ret; 196 } 197 198 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 199 { 200 if ((p->busnum >> 8) != dev->pci_domain || 201 (p->busnum & 0xff) != dev->pci_bus || 202 p->devnum != dev->pci_slot || p->funcnum != dev->pci_func) 203 return -EINVAL; 204 205 p->irq = dev->pdev->irq; 206 207 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 208 p->irq); 209 return 0; 210 } 211 212 /** 213 * drm_irq_by_busid - Get interrupt from bus ID 214 * @dev: DRM device 215 * @data: IOCTL parameter pointing to a drm_irq_busid structure 216 * @file_priv: DRM file private. 217 * 218 * Finds the PCI device with the specified bus id and gets its IRQ number. 219 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 220 * to that of the device that this DRM instance attached to. 221 * 222 * Return: 0 on success or a negative error code on failure. 223 */ 224 int drm_irq_by_busid(struct drm_device *dev, void *data, 225 struct drm_file *file_priv) 226 { 227 struct drm_irq_busid *p = data; 228 229 if (drm_core_check_feature(dev, DRIVER_MODESET)) 230 return -EINVAL; 231 232 /* UMS was only ever support on PCI devices. */ 233 if (WARN_ON(!dev->pdev)) 234 return -EINVAL; 235 236 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 237 return -EINVAL; 238 239 return drm_pci_irq_by_busid(dev, p); 240 } 241 242 /** 243 * drm_pci_init - Register matching PCI devices with the DRM subsystem 244 * @driver: DRM device driver 245 * @pdriver: PCI device driver 246 * 247 * Initializes a drm_device structures, registering the stubs and initializing 248 * the AGP device. 249 * 250 * NOTE: This function is deprecated. Modern modesetting drm drivers should use 251 * pci_register_driver() directly, this function only provides shadow-binding 252 * support for old legacy drivers on top of that core pci function. 253 * 254 * Return: 0 on success or a negative error code on failure. 255 */ 256 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 257 { 258 #if 0 259 struct pci_dev *pdev = NULL; 260 const struct pci_device_id *pid; 261 int i; 262 #endif 263 264 DRM_DEBUG("\n"); 265 266 if (driver->driver_features & DRIVER_MODESET) 267 return pci_register_driver(pdriver); 268 269 #if 0 270 /* If not using KMS, fall back to stealth mode manual scanning. */ 271 INIT_LIST_HEAD(&driver->legacy_dev_list); 272 for (i = 0; pdriver->id_table[i].vendor != 0; i++) { 273 pid = &pdriver->id_table[i]; 274 275 /* Loop around setting up a DRM device for each PCI device 276 * matching our ID and device class. If we had the internal 277 * function that pci_get_subsys and pci_get_class used, we'd 278 * be able to just pass pid in instead of doing a two-stage 279 * thing. 280 */ 281 pdev = NULL; 282 while ((pdev = 283 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 284 pid->subdevice, pdev)) != NULL) { 285 if ((pdev->class & pid->class_mask) != pid->class) 286 continue; 287 288 /* stealth mode requires a manual probe */ 289 pci_dev_get(pdev); 290 drm_get_pci_dev(pdev, pid, driver); 291 } 292 } 293 #endif 294 return 0; 295 } 296 297 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) 298 { 299 device_t root; 300 int pos; 301 u32 lnkcap = 0, lnkcap2 = 0; 302 303 *mask = 0; 304 if (!dev->pdev) 305 return -EINVAL; 306 307 root = device_get_parent(dev->dev->bsddev); 308 309 /* we've been informed via and serverworks don't make the cut */ 310 if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA || 311 pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS) 312 return -EINVAL; 313 314 pos = 0; 315 pci_find_extcap(root, PCIY_EXPRESS, &pos); 316 if (!pos) 317 return -EINVAL; 318 319 lnkcap = pci_read_config(root, pos + PCIER_LINKCAP, 4); 320 lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4); 321 322 lnkcap &= PCIEM_LNKCAP_SPEED_MASK; 323 lnkcap2 &= 0xfe; 324 325 #define PCI_EXP_LNKCAP_SLS_2_5GB PCIEM_LNKCAP_SPEED_2_5 326 #define PCI_EXP_LNKCAP_SLS_5_0GB PCIEM_LNKCAP_SPEED_5 327 #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */ 328 #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */ 329 #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */ 330 331 if (lnkcap2) { /* PCIe r3.0-compliant */ 332 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 333 *mask |= DRM_PCIE_SPEED_25; 334 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 335 *mask |= DRM_PCIE_SPEED_50; 336 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 337 *mask |= DRM_PCIE_SPEED_80; 338 } else { /* pre-r3.0 */ 339 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) 340 *mask |= DRM_PCIE_SPEED_25; 341 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) 342 *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50); 343 } 344 345 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", pci_get_vendor(root), pci_get_device(root), lnkcap, lnkcap2); 346 return 0; 347 } 348 EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); 349 350 #if 0 351 int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw) 352 { 353 struct pci_dev *root; 354 u32 lnkcap; 355 356 *mlw = 0; 357 if (!dev->pdev) 358 return -EINVAL; 359 360 root = dev->pdev->bus->self; 361 362 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); 363 364 *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 365 366 DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap); 367 return 0; 368 } 369 EXPORT_SYMBOL(drm_pcie_get_max_link_width); 370 #endif 371 372 #else 373 374 375 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 376 { 377 return -1; 378 } 379 380 void drm_pci_agp_destroy(struct drm_device *dev) {} 381 382 int drm_irq_by_busid(struct drm_device *dev, void *data, 383 struct drm_file *file_priv) 384 { 385 return -EINVAL; 386 } 387 388 int drm_pci_set_unique(struct drm_device *dev, 389 struct drm_master *master, 390 struct drm_unique *u) 391 { 392 return -EINVAL; 393 } 394 #endif 395 396 EXPORT_SYMBOL(drm_pci_init); 397