1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 
5 #include "qat_freebsd.h"
6 #include "adf_cfg.h"
7 #include "adf_common_drv.h"
8 #include "adf_accel_devices.h"
9 #include "icp_qat_uclo.h"
10 #include "icp_qat_fw.h"
11 #include "icp_qat_fw_init_admin.h"
12 #include "adf_cfg_strings.h"
13 #include "adf_uio_control.h"
14 #include "adf_uio_cleanup.h"
15 #include "adf_uio.h"
16 #include "adf_transport_access_macros.h"
17 #include "adf_transport_internal.h"
18 
19 #define ADF_DEV_PROCESSES_NAME "qat_dev_processes"
20 #define ADF_DEV_STATE_NAME "qat_dev_state"
21 
22 #define ADF_STATE_CALLOUT_TIME 10
23 
24 static const char *mtx_name = "state_callout_mtx";
25 
26 static d_open_t adf_processes_open;
27 static void adf_processes_release(void *data);
28 static d_read_t adf_processes_read;
29 static d_write_t adf_processes_write;
30 
31 static d_open_t adf_state_open;
32 static void adf_state_release(void *data);
33 static d_read_t adf_state_read;
34 static int adf_state_kqfilter(struct cdev *dev, struct knote *kn);
35 static int adf_state_kqread_event(struct knote *kn, long hint);
36 static void adf_state_kqread_detach(struct knote *kn);
37 
38 static struct callout callout;
39 static struct mtx mtx;
40 static struct service_hndl adf_state_hndl;
41 
42 struct entry_proc_events {
43 	struct adf_state_priv_data *proc_events;
44 
45 	SLIST_ENTRY(entry_proc_events) entries_proc_events;
46 };
47 
48 struct entry_state {
49 	struct adf_state state;
50 
51 	STAILQ_ENTRY(entry_state) entries_state;
52 };
53 
54 SLIST_HEAD(proc_events_head, entry_proc_events);
55 STAILQ_HEAD(state_head, entry_state);
56 
57 static struct proc_events_head proc_events_head;
58 
59 struct adf_processes_priv_data {
60 	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
61 	int read_flag;
62 	struct list_head list;
63 };
64 
65 struct adf_state_priv_data {
66 	struct cdev *cdev;
67 	struct selinfo rsel;
68 	struct state_head state_head;
69 };
70 
71 static struct cdevsw adf_processes_cdevsw = {
72 	.d_version = D_VERSION,
73 	.d_open = adf_processes_open,
74 	.d_read = adf_processes_read,
75 	.d_write = adf_processes_write,
76 	.d_name = ADF_DEV_PROCESSES_NAME,
77 };
78 
79 static struct cdevsw adf_state_cdevsw = {
80 	.d_version = D_VERSION,
81 	.d_open = adf_state_open,
82 	.d_read = adf_state_read,
83 	.d_kqfilter = adf_state_kqfilter,
84 	.d_name = ADF_DEV_STATE_NAME,
85 };
86 
87 static struct filterops adf_state_read_filterops = {
88 	.f_isfd = 1,
89 	.f_attach = NULL,
90 	.f_detach = adf_state_kqread_detach,
91 	.f_event = adf_state_kqread_event,
92 };
93 
94 static struct cdev *adf_processes_dev;
95 static struct cdev *adf_state_dev;
96 
97 static LINUX_LIST_HEAD(processes_list);
98 
99 struct sx processes_list_sema;
100 SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list");
101 
102 static void
103 adf_chr_drv_destroy(void)
104 {
105 	destroy_dev(adf_processes_dev);
106 }
107 
108 static int
109 adf_chr_drv_create(void)
110 {
111 
112 	adf_processes_dev = make_dev(&adf_processes_cdevsw,
113 				     0,
114 				     UID_ROOT,
115 				     GID_WHEEL,
116 				     0600,
117 				     ADF_DEV_PROCESSES_NAME);
118 	if (adf_processes_dev == NULL) {
119 		printf("QAT: failed to create device\n");
120 		goto err_cdev_del;
121 	}
122 	return 0;
123 err_cdev_del:
124 	return EFAULT;
125 }
126 
127 static int
128 adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
129 {
130 	int i = 0, devices = 0;
131 	struct adf_accel_dev *accel_dev = NULL;
132 	struct adf_processes_priv_data *prv_data = NULL;
133 	int error = 0;
134 
135 	for (i = 0; i < ADF_MAX_DEVICES; i++) {
136 		accel_dev = adf_devmgr_get_dev_by_id(i);
137 		if (!accel_dev)
138 			continue;
139 		if (!adf_dev_started(accel_dev))
140 			continue;
141 		devices++;
142 	}
143 	if (!devices) {
144 		printf("QAT: No active devices found.\n");
145 		return ENXIO;
146 	}
147 	prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
148 	if (!prv_data)
149 		return ENOMEM;
150 	INIT_LIST_HEAD(&prv_data->list);
151 	error = devfs_set_cdevpriv(prv_data, adf_processes_release);
152 	if (error) {
153 		free(prv_data, M_QAT);
154 		return error;
155 	}
156 
157 	return 0;
158 }
159 
160 static int
161 adf_get_first_started_dev(void)
162 {
163 	int i = 0;
164 	struct adf_accel_dev *accel_dev = NULL;
165 
166 	for (i = 0; i < ADF_MAX_DEVICES; i++) {
167 		accel_dev = adf_devmgr_get_dev_by_id(i);
168 		if (!accel_dev)
169 			continue;
170 		if (adf_dev_started(accel_dev))
171 			return i;
172 	}
173 
174 	return -1;
175 }
176 
177 static int
178 adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag)
179 {
180 	struct adf_processes_priv_data *prv_data = NULL;
181 	struct adf_processes_priv_data *pdata = NULL;
182 	int dev_num = 0, pr_num = 0;
183 	struct list_head *lpos = NULL;
184 	char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 };
185 	struct adf_accel_dev *accel_dev = NULL;
186 	struct adf_cfg_section *section_ptr = NULL;
187 	bool pr_name_available = 1;
188 	uint32_t num_accel_devs = 0;
189 	int error = 0;
190 	ssize_t count;
191 	int dev_id;
192 
193 	error = devfs_get_cdevpriv((void **)&prv_data);
194 	if (error) {
195 		printf("QAT: invalid file descriptor\n");
196 		return error;
197 	}
198 
199 	if (prv_data->read_flag == 1) {
200 		printf("QAT: can only write once\n");
201 		return EBADF;
202 	}
203 	count = uio->uio_resid;
204 	if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
205 		printf("QAT: wrong size %d\n", (int)count);
206 		return EIO;
207 	}
208 
209 	error = uiomove(usr_name, count, uio);
210 	if (error) {
211 		printf("QAT: can't copy data\n");
212 		return error;
213 	}
214 
215 	/* Lock other processes and try to find out the process name */
216 	if (sx_xlock_sig(&processes_list_sema)) {
217 		printf("QAT: can't aquire process info lock\n");
218 		return EBADF;
219 	}
220 
221 	dev_id = adf_get_first_started_dev();
222 	if (-1 == dev_id) {
223 		pr_err("QAT: could not find started device\n");
224 		sx_xunlock(&processes_list_sema);
225 		return -EIO;
226 	}
227 
228 	accel_dev = adf_devmgr_get_dev_by_id(dev_id);
229 	if (!accel_dev) {
230 		pr_err("QAT: could not find started device\n");
231 		sx_xunlock(&processes_list_sema);
232 		return -EIO;
233 	}
234 
235 	/* If there is nothing there then take the first name and return */
236 	if (list_empty(&processes_list)) {
237 		snprintf(prv_data->name,
238 			 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
239 			 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
240 			 usr_name,
241 			 0);
242 		list_add(&prv_data->list, &processes_list);
243 		sx_xunlock(&processes_list_sema);
244 		prv_data->read_flag = 1;
245 		return 0;
246 	}
247 
248 	/* If there are processes running then search for a first free name */
249 	adf_devmgr_get_num_dev(&num_accel_devs);
250 	for (dev_num = 0; dev_num < num_accel_devs; dev_num++) {
251 		accel_dev = adf_devmgr_get_dev_by_id(dev_num);
252 		if (!accel_dev)
253 			continue;
254 
255 		if (!adf_dev_started(accel_dev))
256 			continue; /* to next device */
257 
258 		for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev);
259 		     pr_num++) {
260 			snprintf(prv_data->name,
261 				 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
262 				 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
263 				 usr_name,
264 				 pr_num);
265 			pr_name_available = 1;
266 			/* Figure out if section exists in the config table */
267 			section_ptr =
268 			    adf_cfg_sec_find(accel_dev, prv_data->name);
269 			if (NULL == section_ptr) {
270 				/* This section name doesn't exist */
271 				pr_name_available = 0;
272 				/* As process_num enumerates from 0, once we get
273 				 * to one which doesn't exist no further ones
274 				 * will exist. On to next device
275 				 */
276 				break;
277 			}
278 			/* Figure out if it's been taken already */
279 			list_for_each(lpos, &processes_list)
280 			{
281 				pdata =
282 				    list_entry(lpos,
283 					       struct adf_processes_priv_data,
284 					       list);
285 				if (!strncmp(
286 					pdata->name,
287 					prv_data->name,
288 					ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
289 					pr_name_available = 0;
290 					break;
291 				}
292 			}
293 			if (pr_name_available)
294 				break;
295 		}
296 		if (pr_name_available)
297 			break;
298 	}
299 	/*
300 	 * If we have a valid name that is not on
301 	 * the list take it and add to the list
302 	 */
303 	if (pr_name_available) {
304 		list_add(&prv_data->list, &processes_list);
305 		sx_xunlock(&processes_list_sema);
306 		prv_data->read_flag = 1;
307 		return 0;
308 	}
309 	/* If not then the process needs to wait */
310 	sx_xunlock(&processes_list_sema);
311 	explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES);
312 	prv_data->read_flag = 0;
313 	return 1;
314 }
315 
316 static int
317 adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag)
318 {
319 	struct adf_processes_priv_data *prv_data = NULL;
320 	int error = 0;
321 
322 	error = devfs_get_cdevpriv((void **)&prv_data);
323 	if (error) {
324 		printf("QAT: invalid file descriptor\n");
325 		return error;
326 	}
327 
328 	/*
329 	 * If there is a name that the process can use then give it
330 	 * to the proocess.
331 	 */
332 	if (prv_data->read_flag) {
333 		error = uiomove(prv_data->name,
334 				strnlen(prv_data->name,
335 					ADF_CFG_MAX_SECTION_LEN_IN_BYTES),
336 				uio);
337 		if (error) {
338 			printf("QAT: failed to copy data to user\n");
339 			return error;
340 		}
341 		return 0;
342 	}
343 
344 	return EIO;
345 }
346 
347 static void
348 adf_processes_release(void *data)
349 {
350 	struct adf_processes_priv_data *prv_data = NULL;
351 
352 	prv_data = (struct adf_processes_priv_data *)data;
353 	sx_xlock(&processes_list_sema);
354 	list_del(&prv_data->list);
355 	sx_xunlock(&processes_list_sema);
356 	free(prv_data, M_QAT);
357 }
358 
359 int
360 adf_processes_dev_register(void)
361 {
362 	return adf_chr_drv_create();
363 }
364 
365 void
366 adf_processes_dev_unregister(void)
367 {
368 	adf_chr_drv_destroy();
369 }
370 
371 static void
372 adf_state_callout_notify_ev(void *arg)
373 {
374 	int notified = 0;
375 	struct adf_state_priv_data *priv = NULL;
376 	struct entry_proc_events *proc_events = NULL;
377 
378 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
379 		if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
380 			notified = 1;
381 			priv = proc_events->proc_events;
382 			wakeup(priv);
383 			selwakeup(&priv->rsel);
384 			KNOTE_UNLOCKED(&priv->rsel.si_note, 0);
385 		}
386 	}
387 	if (notified)
388 		callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
389 }
390 
391 static void
392 adf_state_set(int dev, enum adf_event event)
393 {
394 	struct adf_accel_dev *accel_dev = NULL;
395 	struct state_head *head = NULL;
396 	struct entry_proc_events *proc_events = NULL;
397 	struct entry_state *state = NULL;
398 
399 	accel_dev = adf_devmgr_get_dev_by_id(dev);
400 	if (!accel_dev)
401 		return;
402 	mtx_lock(&mtx);
403 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
404 		state = NULL;
405 		head = &proc_events->proc_events->state_head;
406 		state = malloc(sizeof(struct entry_state),
407 			       M_QAT,
408 			       M_NOWAIT | M_ZERO);
409 		if (!state)
410 			continue;
411 		state->state.dev_state = event;
412 		state->state.dev_id = dev;
413 		STAILQ_INSERT_TAIL(head, state, entries_state);
414 		if (event == ADF_EVENT_STOP) {
415 			state = NULL;
416 			state = malloc(sizeof(struct entry_state),
417 				       M_QAT,
418 				       M_NOWAIT | M_ZERO);
419 			if (!state)
420 				continue;
421 			state->state.dev_state = ADF_EVENT_SHUTDOWN;
422 			state->state.dev_id = dev;
423 			STAILQ_INSERT_TAIL(head, state, entries_state);
424 		}
425 	}
426 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
427 	mtx_unlock(&mtx);
428 }
429 
430 static int
431 adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event)
432 {
433 	int ret = 0;
434 
435 #if defined(QAT_UIO) && defined(QAT_DBG)
436 	if (event > ADF_EVENT_DBG_SHUTDOWN)
437 		return -EINVAL;
438 #else
439 	if (event > ADF_EVENT_ERROR)
440 		return -EINVAL;
441 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
442 
443 	switch (event) {
444 	case ADF_EVENT_INIT:
445 		return ret;
446 	case ADF_EVENT_SHUTDOWN:
447 		return ret;
448 	case ADF_EVENT_RESTARTING:
449 		break;
450 	case ADF_EVENT_RESTARTED:
451 		break;
452 	case ADF_EVENT_START:
453 		return ret;
454 	case ADF_EVENT_STOP:
455 		break;
456 	case ADF_EVENT_ERROR:
457 		break;
458 #if defined(QAT_UIO) && defined(QAT_DBG)
459 	case ADF_EVENT_PROC_CRASH:
460 		break;
461 	case ADF_EVENT_MANUAL_DUMP:
462 		break;
463 	case ADF_EVENT_SLICE_HANG:
464 		break;
465 	case ADF_EVENT_DBG_SHUTDOWN:
466 		break;
467 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
468 	default:
469 		return -1;
470 	}
471 
472 	adf_state_set(accel_dev->accel_id, event);
473 
474 	return 0;
475 }
476 
477 static int
478 adf_state_kqfilter(struct cdev *dev, struct knote *kn)
479 {
480 	struct adf_state_priv_data *priv;
481 
482 	mtx_lock(&mtx);
483 	priv = dev->si_drv1;
484 	switch (kn->kn_filter) {
485 	case EVFILT_READ:
486 		kn->kn_fop = &adf_state_read_filterops;
487 		kn->kn_hook = priv;
488 		knlist_add(&priv->rsel.si_note, kn, 0);
489 		mtx_unlock(&mtx);
490 		return 0;
491 	default:
492 		mtx_unlock(&mtx);
493 		return -EINVAL;
494 	}
495 }
496 
497 static int
498 adf_state_kqread_event(struct knote *kn, long hint)
499 {
500 	return 1;
501 }
502 
503 static void
504 adf_state_kqread_detach(struct knote *kn)
505 {
506 	struct adf_state_priv_data *priv = NULL;
507 
508 	mtx_lock(&mtx);
509 	if (!kn) {
510 		mtx_unlock(&mtx);
511 		return;
512 	}
513 	priv = kn->kn_hook;
514 	if (!priv) {
515 		mtx_unlock(&mtx);
516 		return;
517 	}
518 	knlist_remove(&priv->rsel.si_note, kn, 1);
519 	mtx_unlock(&mtx);
520 }
521 
522 void
523 adf_state_init(void)
524 {
525 	adf_state_dev = make_dev(&adf_state_cdevsw,
526 				 0,
527 				 UID_ROOT,
528 				 GID_WHEEL,
529 				 0600,
530 				 "%s",
531 				 ADF_DEV_STATE_NAME);
532 	SLIST_INIT(&proc_events_head);
533 	mtx_init(&mtx, mtx_name, NULL, MTX_DEF);
534 	callout_init_mtx(&callout, &mtx, 0);
535 	explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl));
536 	adf_state_hndl.event_hld = adf_state_event_handler;
537 	adf_state_hndl.name = "adf_state_event_handler";
538 	mtx_lock(&mtx);
539 	adf_service_register(&adf_state_hndl);
540 	callout_reset(&callout,
541 		      ADF_STATE_CALLOUT_TIME,
542 		      adf_state_callout_notify_ev,
543 		      NULL);
544 	mtx_unlock(&mtx);
545 }
546 
547 void
548 adf_state_destroy(void)
549 {
550 	struct entry_proc_events *proc_events = NULL;
551 
552 	mtx_lock(&mtx);
553 	adf_service_unregister(&adf_state_hndl);
554 	callout_stop(&callout);
555 	while (!SLIST_EMPTY(&proc_events_head)) {
556 		proc_events = SLIST_FIRST(&proc_events_head);
557 		SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events);
558 		free(proc_events, M_QAT);
559 	}
560 	destroy_dev(adf_state_dev);
561 	mtx_unlock(&mtx);
562 	mtx_destroy(&mtx);
563 }
564 
565 static int
566 adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
567 {
568 	struct adf_state_priv_data *prv_data = NULL;
569 	struct entry_proc_events *entry_proc_events = NULL;
570 	int ret = 0;
571 
572 	prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
573 	if (!prv_data)
574 		return -ENOMEM;
575 	entry_proc_events =
576 	    malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO);
577 	if (!entry_proc_events) {
578 		free(prv_data, M_QAT);
579 		return -ENOMEM;
580 	}
581 	mtx_lock(&mtx);
582 	prv_data->cdev = dev;
583 	prv_data->cdev->si_drv1 = prv_data;
584 	knlist_init_mtx(&prv_data->rsel.si_note, &mtx);
585 	STAILQ_INIT(&prv_data->state_head);
586 	entry_proc_events->proc_events = prv_data;
587 	SLIST_INSERT_HEAD(&proc_events_head,
588 			  entry_proc_events,
589 			  entries_proc_events);
590 	ret = devfs_set_cdevpriv(prv_data, adf_state_release);
591 	if (ret) {
592 		SLIST_REMOVE(&proc_events_head,
593 			     entry_proc_events,
594 			     entry_proc_events,
595 			     entries_proc_events);
596 		free(entry_proc_events, M_QAT);
597 		free(prv_data, M_QAT);
598 	}
599 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
600 	mtx_unlock(&mtx);
601 	return ret;
602 }
603 
604 static int
605 adf_state_read(struct cdev *dev, struct uio *uio, int ioflag)
606 {
607 	int ret = 0;
608 	struct adf_state_priv_data *prv_data = NULL;
609 	struct state_head *state_head = NULL;
610 	struct entry_state *entry_state = NULL;
611 	struct adf_state *state = NULL;
612 	struct entry_proc_events *proc_events = NULL;
613 
614 	mtx_lock(&mtx);
615 	ret = devfs_get_cdevpriv((void **)&prv_data);
616 	if (ret) {
617 		mtx_unlock(&mtx);
618 		return 0;
619 	}
620 	state_head = &prv_data->state_head;
621 	if (STAILQ_EMPTY(state_head)) {
622 		mtx_unlock(&mtx);
623 		return 0;
624 	}
625 	entry_state = STAILQ_FIRST(state_head);
626 	state = &entry_state->state;
627 	ret = uiomove(state, sizeof(struct adf_state), uio);
628 	if (!ret && !STAILQ_EMPTY(state_head)) {
629 		STAILQ_REMOVE_HEAD(state_head, entries_state);
630 		free(entry_state, M_QAT);
631 	}
632 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
633 		if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
634 			prv_data = proc_events->proc_events;
635 			wakeup(prv_data);
636 			selwakeup(&prv_data->rsel);
637 			KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0);
638 		}
639 	}
640 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
641 	mtx_unlock(&mtx);
642 	return ret;
643 }
644 
645 static void
646 adf_state_release(void *data)
647 {
648 	struct adf_state_priv_data *prv_data = NULL;
649 	struct entry_state *entry_state = NULL;
650 	struct entry_proc_events *entry_proc_events = NULL;
651 	struct entry_proc_events *tmp = NULL;
652 
653 	mtx_lock(&mtx);
654 	prv_data = (struct adf_state_priv_data *)data;
655 	knlist_delete(&prv_data->rsel.si_note, curthread, 1);
656 	knlist_destroy(&prv_data->rsel.si_note);
657 	seldrain(&prv_data->rsel);
658 	while (!STAILQ_EMPTY(&prv_data->state_head)) {
659 		entry_state = STAILQ_FIRST(&prv_data->state_head);
660 		STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state);
661 		free(entry_state, M_QAT);
662 	}
663 	SLIST_FOREACH_SAFE (entry_proc_events,
664 			    &proc_events_head,
665 			    entries_proc_events,
666 			    tmp) {
667 		if (entry_proc_events->proc_events == prv_data) {
668 			SLIST_REMOVE(&proc_events_head,
669 				     entry_proc_events,
670 				     entry_proc_events,
671 				     entries_proc_events);
672 			free(entry_proc_events, M_QAT);
673 		}
674 	}
675 	free(prv_data, M_QAT);
676 	mtx_unlock(&mtx);
677 }
678