xref: /freebsd/sys/dev/drm2/drm_os_freebsd.c (revision 685dc743)
1 
2 #include <sys/cdefs.h>
3 #include <dev/drm2/drmP.h>
4 
5 #include <dev/agp/agpreg.h>
6 #include <dev/pci/pcireg.h>
7 
8 MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
9 MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
10 MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
11 MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
12 MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures");
13 MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
14 MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
15 MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
16 MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
17 MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
18 MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
19 MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
20 MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
21 MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
22 MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
23 MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
24 MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
25     "DRM CTXBITMAP Data Structures");
26 MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
27 MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
28 MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
29 MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
30 MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
31 
32 const char *fb_mode_option = NULL;
33 
34 #define NSEC_PER_USEC	1000L
35 #define NSEC_PER_SEC	1000000000L
36 
37 int64_t
timeval_to_ns(const struct timeval * tv)38 timeval_to_ns(const struct timeval *tv)
39 {
40 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
41 		tv->tv_usec * NSEC_PER_USEC;
42 }
43 
44 struct timeval
ns_to_timeval(const int64_t nsec)45 ns_to_timeval(const int64_t nsec)
46 {
47         struct timeval tv;
48 	long rem;
49 
50 	if (nsec == 0) {
51 		tv.tv_sec = 0;
52 		tv.tv_usec = 0;
53 		return (tv);
54 	}
55 
56         tv.tv_sec = nsec / NSEC_PER_SEC;
57 	rem = nsec % NSEC_PER_SEC;
58         if (rem < 0) {
59                 tv.tv_sec--;
60                 rem += NSEC_PER_SEC;
61         }
62 	tv.tv_usec = rem / 1000;
63         return (tv);
64 }
65 
66 /* Copied from OFED. */
67 unsigned long drm_linux_timer_hz_mask;
68 
69 static void
drm_linux_timer_init(void * arg)70 drm_linux_timer_init(void *arg)
71 {
72 
73         /*
74          * Compute an internal HZ value which can divide 2**32 to
75          * avoid timer rounding problems when the tick value wraps
76          * around 2**32:
77          */
78         drm_linux_timer_hz_mask = 1;
79         while (drm_linux_timer_hz_mask < (unsigned long)hz)
80                 drm_linux_timer_hz_mask *= 2;
81         drm_linux_timer_hz_mask--;
82 }
83 SYSINIT(drm_linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, drm_linux_timer_init, NULL);
84 
85 static const drm_pci_id_list_t *
drm_find_description(int vendor,int device,const drm_pci_id_list_t * idlist)86 drm_find_description(int vendor, int device, const drm_pci_id_list_t *idlist)
87 {
88 	int i = 0;
89 
90 	for (i = 0; idlist[i].vendor != 0; i++) {
91 		if ((idlist[i].vendor == vendor) &&
92 		    ((idlist[i].device == device) ||
93 		    (idlist[i].device == 0))) {
94 			return (&idlist[i]);
95 		}
96 	}
97 	return (NULL);
98 }
99 
100 /*
101  * drm_probe_helper: called by a driver at the end of its probe
102  * method.
103  */
104 int
drm_probe_helper(device_t kdev,const drm_pci_id_list_t * idlist)105 drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist)
106 {
107 	const drm_pci_id_list_t *id_entry;
108 	int vendor, device;
109 
110 	vendor = pci_get_vendor(kdev);
111 	device = pci_get_device(kdev);
112 
113 	if (pci_get_class(kdev) != PCIC_DISPLAY ||
114 	    (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
115 	     pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
116 		return (-ENXIO);
117 
118 	id_entry = drm_find_description(vendor, device, idlist);
119 	if (id_entry != NULL) {
120 		if (device_get_desc(kdev) == NULL) {
121 			DRM_DEBUG("%s desc: %s\n",
122 			    device_get_nameunit(kdev), id_entry->name);
123 			device_set_desc(kdev, id_entry->name);
124 		}
125 #if !defined(__arm__)
126 		DRM_OBSOLETE(kdev);
127 #endif
128 		return (-BUS_PROBE_GENERIC);
129 	}
130 
131 	return (-ENXIO);
132 }
133 
134 /*
135  * drm_attach_helper: called by a driver at the end of its attach
136  * method.
137  */
138 int
drm_attach_helper(device_t kdev,const drm_pci_id_list_t * idlist,struct drm_driver * driver)139 drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist,
140     struct drm_driver *driver)
141 {
142 	struct drm_device *dev;
143 	int vendor, device;
144 	int ret;
145 
146 	dev = device_get_softc(kdev);
147 
148 	vendor = pci_get_vendor(kdev);
149 	device = pci_get_device(kdev);
150 	dev->id_entry = drm_find_description(vendor, device, idlist);
151 
152 	ret = drm_get_pci_dev(kdev, dev, driver);
153 
154 	return (ret);
155 }
156 
157 int
drm_generic_suspend(device_t kdev)158 drm_generic_suspend(device_t kdev)
159 {
160 	struct drm_device *dev;
161 	int error;
162 
163 	DRM_DEBUG_KMS("Starting suspend\n");
164 
165 	dev = device_get_softc(kdev);
166 	if (dev->driver->suspend) {
167 		pm_message_t state;
168 
169 		state.event = PM_EVENT_SUSPEND;
170 		error = -dev->driver->suspend(dev, state);
171 		if (error)
172 			goto out;
173 	}
174 
175 	error = bus_generic_suspend(kdev);
176 
177 out:
178 	DRM_DEBUG_KMS("Finished suspend: %d\n", error);
179 
180 	return error;
181 }
182 
183 int
drm_generic_resume(device_t kdev)184 drm_generic_resume(device_t kdev)
185 {
186 	struct drm_device *dev;
187 	int error;
188 
189 	DRM_DEBUG_KMS("Starting resume\n");
190 
191 	dev = device_get_softc(kdev);
192 	if (dev->driver->resume) {
193 		error = -dev->driver->resume(dev);
194 		if (error)
195 			goto out;
196 	}
197 
198 	error = bus_generic_resume(kdev);
199 
200 out:
201 	DRM_DEBUG_KMS("Finished resume: %d\n", error);
202 
203 	return error;
204 }
205 
206 int
drm_generic_detach(device_t kdev)207 drm_generic_detach(device_t kdev)
208 {
209 	struct drm_device *dev;
210 	int i;
211 
212 	dev = device_get_softc(kdev);
213 
214 	drm_put_dev(dev);
215 
216 	/* Clean up PCI resources allocated by drm_bufs.c.  We're not really
217 	 * worried about resource consumption while the DRM is inactive (between
218 	 * lastclose and firstopen or unload) because these aren't actually
219 	 * taking up KVA, just keeping the PCI resource allocated.
220 	 */
221 	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
222 		if (dev->pcir[i] == NULL)
223 			continue;
224 		bus_release_resource(dev->dev, SYS_RES_MEMORY,
225 		    dev->pcirid[i], dev->pcir[i]);
226 		dev->pcir[i] = NULL;
227 	}
228 
229 	if (pci_disable_busmaster(dev->dev))
230 		DRM_ERROR("Request to disable bus-master failed.\n");
231 
232 	return (0);
233 }
234 
235 int
drm_add_busid_modesetting(struct drm_device * dev,struct sysctl_ctx_list * ctx,struct sysctl_oid * top)236 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
237     struct sysctl_oid *top)
238 {
239 	struct sysctl_oid *oid;
240 
241 	snprintf(dev->busid_str, sizeof(dev->busid_str),
242 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
243 	     dev->pci_slot, dev->pci_func);
244 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
245 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
246 	if (oid == NULL)
247 		return (-ENOMEM);
248 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
249 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
250 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
251 	if (oid == NULL)
252 		return (-ENOMEM);
253 
254 	return (0);
255 }
256 
257 static int
drm_device_find_capability(struct drm_device * dev,int cap)258 drm_device_find_capability(struct drm_device *dev, int cap)
259 {
260 
261 	return (pci_find_cap(dev->dev, cap, NULL) == 0);
262 }
263 
264 int
drm_pci_device_is_agp(struct drm_device * dev)265 drm_pci_device_is_agp(struct drm_device *dev)
266 {
267 	if (dev->driver->device_is_agp != NULL) {
268 		int ret;
269 
270 		/* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
271 		 * AGP, 2 = fall back to PCI capability
272 		 */
273 		ret = (*dev->driver->device_is_agp)(dev);
274 		if (ret != DRM_MIGHT_BE_AGP)
275 			return ret;
276 	}
277 
278 	return (drm_device_find_capability(dev, PCIY_AGP));
279 }
280 
281 int
drm_pci_device_is_pcie(struct drm_device * dev)282 drm_pci_device_is_pcie(struct drm_device *dev)
283 {
284 
285 	return (drm_device_find_capability(dev, PCIY_EXPRESS));
286 }
287 
288 static bool
dmi_found(const struct dmi_system_id * dsi)289 dmi_found(const struct dmi_system_id *dsi)
290 {
291 	char *hw_vendor, *hw_prod;
292 	int i, slot;
293 	bool res;
294 
295 	hw_vendor = kern_getenv("smbios.planar.maker");
296 	hw_prod = kern_getenv("smbios.planar.product");
297 	res = true;
298 	for (i = 0; i < nitems(dsi->matches); i++) {
299 		slot = dsi->matches[i].slot;
300 		switch (slot) {
301 		case DMI_NONE:
302 			break;
303 		case DMI_SYS_VENDOR:
304 		case DMI_BOARD_VENDOR:
305 			if (hw_vendor != NULL &&
306 			    !strcmp(hw_vendor, dsi->matches[i].substr)) {
307 				break;
308 			} else {
309 				res = false;
310 				goto out;
311 			}
312 		case DMI_PRODUCT_NAME:
313 		case DMI_BOARD_NAME:
314 			if (hw_prod != NULL &&
315 			    !strcmp(hw_prod, dsi->matches[i].substr)) {
316 				break;
317 			} else {
318 				res = false;
319 				goto out;
320 			}
321 		default:
322 			res = false;
323 			goto out;
324 		}
325 	}
326 out:
327 	freeenv(hw_vendor);
328 	freeenv(hw_prod);
329 
330 	return (res);
331 }
332 
333 bool
dmi_check_system(const struct dmi_system_id * sysid)334 dmi_check_system(const struct dmi_system_id *sysid)
335 {
336 	const struct dmi_system_id *dsi;
337 	bool res;
338 
339 	for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
340 		if (dmi_found(dsi)) {
341 			res = true;
342 			if (dsi->callback != NULL && dsi->callback(dsi))
343 				break;
344 		}
345 	}
346 	return (res);
347 }
348 
349 #if __OS_HAS_MTRR
350 int
drm_mtrr_add(unsigned long offset,unsigned long size,unsigned int flags)351 drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags)
352 {
353 	int act;
354 	struct mem_range_desc mrdesc;
355 
356 	mrdesc.mr_base = offset;
357 	mrdesc.mr_len = size;
358 	mrdesc.mr_flags = flags;
359 	act = MEMRANGE_SET_UPDATE;
360 	strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
361 	return (-mem_range_attr_set(&mrdesc, &act));
362 }
363 
364 int
drm_mtrr_del(int handle __unused,unsigned long offset,unsigned long size,unsigned int flags)365 drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size,
366     unsigned int flags)
367 {
368 	int act;
369 	struct mem_range_desc mrdesc;
370 
371 	mrdesc.mr_base = offset;
372 	mrdesc.mr_len = size;
373 	mrdesc.mr_flags = flags;
374 	act = MEMRANGE_SET_REMOVE;
375 	strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
376 	return (-mem_range_attr_set(&mrdesc, &act));
377 }
378 #endif
379 
380 void
drm_clflush_pages(vm_page_t * pages,unsigned long num_pages)381 drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
382 {
383 
384 #if defined(__i386__) || defined(__amd64__)
385 	pmap_invalidate_cache_pages(pages, num_pages);
386 #else
387 	DRM_ERROR("drm_clflush_pages not implemented on this architecture");
388 #endif
389 }
390 
391 void
drm_clflush_virt_range(char * addr,unsigned long length)392 drm_clflush_virt_range(char *addr, unsigned long length)
393 {
394 
395 #if defined(__i386__) || defined(__amd64__)
396 	pmap_force_invalidate_cache_range((vm_offset_t)addr,
397 	    (vm_offset_t)addr + length);
398 #else
399 	DRM_ERROR("drm_clflush_virt_range not implemented on this architecture");
400 #endif
401 }
402 
403 void
hex_dump_to_buffer(const void * buf,size_t len,int rowsize,int groupsize,char * linebuf,size_t linebuflen,bool ascii __unused)404 hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
405     char *linebuf, size_t linebuflen, bool ascii __unused)
406 {
407 	int i, j, c;
408 
409 	i = j = 0;
410 
411 	while (i < len && j <= linebuflen) {
412 		c = ((const char *)buf)[i];
413 
414 		if (i != 0) {
415 			if (i % rowsize == 0) {
416 				/* Newline required. */
417 				sprintf(linebuf + j, "\n");
418 				++j;
419 			} else if (i % groupsize == 0) {
420 				/* Space required. */
421 				sprintf(linebuf + j, " ");
422 				++j;
423 			}
424 		}
425 
426 		if (j > linebuflen - 4)
427 			break;
428 
429 		sprintf(linebuf + j, "%02X", c);
430 		j += 2;
431 
432 		++i;
433 	}
434 
435 	if (j <= linebuflen)
436 		sprintf(linebuf + j, "\n");
437 }
438 
439 #if DRM_LINUX
440 
441 #include <sys/sysproto.h>
442 
443 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
444 
445 #define LINUX_IOCTL_DRM_MIN		0x6400
446 #define LINUX_IOCTL_DRM_MAX		0x64ff
447 
448 static linux_ioctl_function_t drm_linux_ioctl;
449 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
450     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
451 
452 /* The bits for in/out are switched on Linux */
453 #define LINUX_IOC_IN	IOC_OUT
454 #define LINUX_IOC_OUT	IOC_IN
455 
456 static int
drm_linux_ioctl(DRM_STRUCTPROC * p,struct linux_ioctl_args * args)457 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
458 {
459 	int error;
460 	int cmd = args->cmd;
461 
462 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
463 	if (cmd & LINUX_IOC_IN)
464 		args->cmd |= IOC_IN;
465 	if (cmd & LINUX_IOC_OUT)
466 		args->cmd |= IOC_OUT;
467 
468 	error = ioctl(p, (struct ioctl_args *)args);
469 
470 	return error;
471 }
472 #endif /* DRM_LINUX */
473 
474 static int
drm_modevent(module_t mod,int type,void * data)475 drm_modevent(module_t mod, int type, void *data)
476 {
477 
478 	switch (type) {
479 	case MOD_LOAD:
480 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
481 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet);
482 		break;
483 	}
484 	return (0);
485 }
486 
487 static moduledata_t drm_mod = {
488 	"drmn",
489 	drm_modevent,
490 	0
491 };
492 
493 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
494 MODULE_VERSION(drmn, 1);
495 MODULE_DEPEND(drmn, agp, 1, 1, 1);
496 MODULE_DEPEND(drmn, pci, 1, 1, 1);
497 MODULE_DEPEND(drmn, mem, 1, 1, 1);
498 MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
499