1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_uio_control.h"
13 #include "adf_uio_cleanup.h"
14 #include "adf_uio.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17 
18 #define ADF_DEV_PROCESSES_NAME "qat_dev_processes"
19 #define ADF_DEV_STATE_NAME "qat_dev_state"
20 
21 #define ADF_STATE_CALLOUT_TIME 10
22 
23 static const char *mtx_name = "state_callout_mtx";
24 
25 static d_open_t adf_processes_open;
26 static void adf_processes_release(void *data);
27 static d_read_t adf_processes_read;
28 static d_write_t adf_processes_write;
29 
30 static d_open_t adf_state_open;
31 static void adf_state_release(void *data);
32 static d_read_t adf_state_read;
33 static int adf_state_kqfilter(struct cdev *dev, struct knote *kn);
34 static int adf_state_kqread_event(struct knote *kn, long hint);
35 static void adf_state_kqread_detach(struct knote *kn);
36 
37 static struct callout callout;
38 static struct mtx mtx;
39 static struct service_hndl adf_state_hndl;
40 
41 struct entry_proc_events {
42 	struct adf_state_priv_data *proc_events;
43 
44 	SLIST_ENTRY(entry_proc_events) entries_proc_events;
45 };
46 
47 struct entry_state {
48 	struct adf_state state;
49 
50 	STAILQ_ENTRY(entry_state) entries_state;
51 };
52 
53 SLIST_HEAD(proc_events_head, entry_proc_events);
54 STAILQ_HEAD(state_head, entry_state);
55 
56 static struct proc_events_head proc_events_head;
57 
58 struct adf_processes_priv_data {
59 	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
60 	int read_flag;
61 	struct list_head list;
62 };
63 
64 struct adf_state_priv_data {
65 	struct cdev *cdev;
66 	struct selinfo rsel;
67 	struct state_head state_head;
68 };
69 
70 static struct cdevsw adf_processes_cdevsw = {
71 	.d_version = D_VERSION,
72 	.d_open = adf_processes_open,
73 	.d_read = adf_processes_read,
74 	.d_write = adf_processes_write,
75 	.d_name = ADF_DEV_PROCESSES_NAME,
76 };
77 
78 static struct cdevsw adf_state_cdevsw = {
79 	.d_version = D_VERSION,
80 	.d_open = adf_state_open,
81 	.d_read = adf_state_read,
82 	.d_kqfilter = adf_state_kqfilter,
83 	.d_name = ADF_DEV_STATE_NAME,
84 };
85 
86 static struct filterops adf_state_read_filterops = {
87 	.f_isfd = 1,
88 	.f_attach = NULL,
89 	.f_detach = adf_state_kqread_detach,
90 	.f_event = adf_state_kqread_event,
91 };
92 
93 static struct cdev *adf_processes_dev;
94 static struct cdev *adf_state_dev;
95 
96 static LINUX_LIST_HEAD(processes_list);
97 
98 struct sx processes_list_sema;
99 SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list");
100 
101 static void
102 adf_chr_drv_destroy(void)
103 {
104 	destroy_dev(adf_processes_dev);
105 }
106 
107 static int
108 adf_chr_drv_create(void)
109 {
110 
111 	adf_processes_dev = make_dev(&adf_processes_cdevsw,
112 				     0,
113 				     UID_ROOT,
114 				     GID_WHEEL,
115 				     0600,
116 				     ADF_DEV_PROCESSES_NAME);
117 	if (adf_processes_dev == NULL) {
118 		printf("QAT: failed to create device\n");
119 		goto err_cdev_del;
120 	}
121 	return 0;
122 err_cdev_del:
123 	return EFAULT;
124 }
125 
126 static int
127 adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
128 {
129 	int i = 0, devices = 0;
130 	struct adf_accel_dev *accel_dev = NULL;
131 	struct adf_processes_priv_data *prv_data = NULL;
132 	int error = 0;
133 
134 	for (i = 0; i < ADF_MAX_DEVICES; i++) {
135 		accel_dev = adf_devmgr_get_dev_by_id(i);
136 		if (!accel_dev)
137 			continue;
138 		if (!adf_dev_started(accel_dev))
139 			continue;
140 		devices++;
141 	}
142 	if (!devices) {
143 		printf("QAT: No active devices found.\n");
144 		return ENXIO;
145 	}
146 	prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
147 	if (!prv_data)
148 		return ENOMEM;
149 	INIT_LIST_HEAD(&prv_data->list);
150 	error = devfs_set_cdevpriv(prv_data, adf_processes_release);
151 	if (error) {
152 		free(prv_data, M_QAT);
153 		return error;
154 	}
155 
156 	return 0;
157 }
158 
159 static int
160 adf_get_first_started_dev(void)
161 {
162 	int i = 0;
163 	struct adf_accel_dev *accel_dev = NULL;
164 
165 	for (i = 0; i < ADF_MAX_DEVICES; i++) {
166 		accel_dev = adf_devmgr_get_dev_by_id(i);
167 		if (!accel_dev)
168 			continue;
169 		if (adf_dev_started(accel_dev))
170 			return i;
171 	}
172 
173 	return -1;
174 }
175 
176 static int
177 adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag)
178 {
179 	struct adf_processes_priv_data *prv_data = NULL;
180 	struct adf_processes_priv_data *pdata = NULL;
181 	int dev_num = 0, pr_num = 0;
182 	struct list_head *lpos = NULL;
183 	char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 };
184 	struct adf_accel_dev *accel_dev = NULL;
185 	struct adf_cfg_section *section_ptr = NULL;
186 	bool pr_name_available = 1;
187 	uint32_t num_accel_devs = 0;
188 	int error = 0;
189 	ssize_t count;
190 	int dev_id;
191 
192 	error = devfs_get_cdevpriv((void **)&prv_data);
193 	if (error) {
194 		printf("QAT: invalid file descriptor\n");
195 		return error;
196 	}
197 
198 	if (prv_data->read_flag == 1) {
199 		printf("QAT: can only write once\n");
200 		return EBADF;
201 	}
202 	count = uio->uio_resid;
203 	if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
204 		printf("QAT: wrong size %d\n", (int)count);
205 		return EIO;
206 	}
207 
208 	error = uiomove(usr_name, count, uio);
209 	if (error) {
210 		printf("QAT: can't copy data\n");
211 		return error;
212 	}
213 
214 	/* Lock other processes and try to find out the process name */
215 	if (sx_xlock_sig(&processes_list_sema)) {
216 		printf("QAT: can't aquire process info lock\n");
217 		return EBADF;
218 	}
219 
220 	dev_id = adf_get_first_started_dev();
221 	if (-1 == dev_id) {
222 		pr_err("QAT: could not find started device\n");
223 		sx_xunlock(&processes_list_sema);
224 		return -EIO;
225 	}
226 
227 	accel_dev = adf_devmgr_get_dev_by_id(dev_id);
228 	if (!accel_dev) {
229 		pr_err("QAT: could not find started device\n");
230 		sx_xunlock(&processes_list_sema);
231 		return -EIO;
232 	}
233 
234 	/* If there is nothing there then take the first name and return */
235 	if (list_empty(&processes_list)) {
236 		snprintf(prv_data->name,
237 			 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
238 			 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
239 			 usr_name,
240 			 0);
241 		list_add(&prv_data->list, &processes_list);
242 		sx_xunlock(&processes_list_sema);
243 		prv_data->read_flag = 1;
244 		return 0;
245 	}
246 
247 	/* If there are processes running then search for a first free name */
248 	adf_devmgr_get_num_dev(&num_accel_devs);
249 	for (dev_num = 0; dev_num < num_accel_devs; dev_num++) {
250 		accel_dev = adf_devmgr_get_dev_by_id(dev_num);
251 		if (!accel_dev)
252 			continue;
253 
254 		if (!adf_dev_started(accel_dev))
255 			continue; /* to next device */
256 
257 		for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev);
258 		     pr_num++) {
259 			snprintf(prv_data->name,
260 				 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
261 				 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
262 				 usr_name,
263 				 pr_num);
264 			pr_name_available = 1;
265 			/* Figure out if section exists in the config table */
266 			section_ptr =
267 			    adf_cfg_sec_find(accel_dev, prv_data->name);
268 			if (NULL == section_ptr) {
269 				/* This section name doesn't exist */
270 				pr_name_available = 0;
271 				/* As process_num enumerates from 0, once we get
272 				 * to one which doesn't exist no further ones
273 				 * will exist. On to next device
274 				 */
275 				break;
276 			}
277 			/* Figure out if it's been taken already */
278 			list_for_each(lpos, &processes_list)
279 			{
280 				pdata =
281 				    list_entry(lpos,
282 					       struct adf_processes_priv_data,
283 					       list);
284 				if (!strncmp(
285 					pdata->name,
286 					prv_data->name,
287 					ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
288 					pr_name_available = 0;
289 					break;
290 				}
291 			}
292 			if (pr_name_available)
293 				break;
294 		}
295 		if (pr_name_available)
296 			break;
297 	}
298 	/*
299 	 * If we have a valid name that is not on
300 	 * the list take it and add to the list
301 	 */
302 	if (pr_name_available) {
303 		list_add(&prv_data->list, &processes_list);
304 		sx_xunlock(&processes_list_sema);
305 		prv_data->read_flag = 1;
306 		return 0;
307 	}
308 	/* If not then the process needs to wait */
309 	sx_xunlock(&processes_list_sema);
310 	explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES);
311 	prv_data->read_flag = 0;
312 	return 1;
313 }
314 
315 static int
316 adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag)
317 {
318 	struct adf_processes_priv_data *prv_data = NULL;
319 	int error = 0;
320 
321 	error = devfs_get_cdevpriv((void **)&prv_data);
322 	if (error) {
323 		printf("QAT: invalid file descriptor\n");
324 		return error;
325 	}
326 
327 	/*
328 	 * If there is a name that the process can use then give it
329 	 * to the proocess.
330 	 */
331 	if (prv_data->read_flag) {
332 		error = uiomove(prv_data->name,
333 				strnlen(prv_data->name,
334 					ADF_CFG_MAX_SECTION_LEN_IN_BYTES),
335 				uio);
336 		if (error) {
337 			printf("QAT: failed to copy data to user\n");
338 			return error;
339 		}
340 		return 0;
341 	}
342 
343 	return EIO;
344 }
345 
346 static void
347 adf_processes_release(void *data)
348 {
349 	struct adf_processes_priv_data *prv_data = NULL;
350 
351 	prv_data = (struct adf_processes_priv_data *)data;
352 	sx_xlock(&processes_list_sema);
353 	list_del(&prv_data->list);
354 	sx_xunlock(&processes_list_sema);
355 	free(prv_data, M_QAT);
356 }
357 
358 int
359 adf_processes_dev_register(void)
360 {
361 	return adf_chr_drv_create();
362 }
363 
364 void
365 adf_processes_dev_unregister(void)
366 {
367 	adf_chr_drv_destroy();
368 }
369 
370 static void
371 adf_state_callout_notify_ev(void *arg)
372 {
373 	int notified = 0;
374 	struct adf_state_priv_data *priv = NULL;
375 	struct entry_proc_events *proc_events = NULL;
376 
377 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
378 		if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
379 			notified = 1;
380 			priv = proc_events->proc_events;
381 			wakeup(priv);
382 			selwakeup(&priv->rsel);
383 			KNOTE_UNLOCKED(&priv->rsel.si_note, 0);
384 		}
385 	}
386 	if (notified)
387 		callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
388 }
389 
390 static void
391 adf_state_set(int dev, enum adf_event event)
392 {
393 	struct adf_accel_dev *accel_dev = NULL;
394 	struct state_head *head = NULL;
395 	struct entry_proc_events *proc_events = NULL;
396 	struct entry_state *state = NULL;
397 
398 	accel_dev = adf_devmgr_get_dev_by_id(dev);
399 	if (!accel_dev)
400 		return;
401 	mtx_lock(&mtx);
402 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
403 		state = NULL;
404 		head = &proc_events->proc_events->state_head;
405 		state = malloc(sizeof(struct entry_state),
406 			       M_QAT,
407 			       M_NOWAIT | M_ZERO);
408 		if (!state)
409 			continue;
410 		state->state.dev_state = event;
411 		state->state.dev_id = dev;
412 		STAILQ_INSERT_TAIL(head, state, entries_state);
413 		if (event == ADF_EVENT_STOP) {
414 			state = NULL;
415 			state = malloc(sizeof(struct entry_state),
416 				       M_QAT,
417 				       M_NOWAIT | M_ZERO);
418 			if (!state)
419 				continue;
420 			state->state.dev_state = ADF_EVENT_SHUTDOWN;
421 			state->state.dev_id = dev;
422 			STAILQ_INSERT_TAIL(head, state, entries_state);
423 		}
424 	}
425 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
426 	mtx_unlock(&mtx);
427 }
428 
429 static int
430 adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event)
431 {
432 	int ret = 0;
433 
434 #if defined(QAT_UIO) && defined(QAT_DBG)
435 	if (event > ADF_EVENT_DBG_SHUTDOWN)
436 		return -EINVAL;
437 #else
438 	if (event > ADF_EVENT_ERROR)
439 		return -EINVAL;
440 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
441 
442 	switch (event) {
443 	case ADF_EVENT_INIT:
444 		return ret;
445 	case ADF_EVENT_SHUTDOWN:
446 		return ret;
447 	case ADF_EVENT_RESTARTING:
448 		break;
449 	case ADF_EVENT_RESTARTED:
450 		break;
451 	case ADF_EVENT_START:
452 		return ret;
453 	case ADF_EVENT_STOP:
454 		break;
455 	case ADF_EVENT_ERROR:
456 		break;
457 #if defined(QAT_UIO) && defined(QAT_DBG)
458 	case ADF_EVENT_PROC_CRASH:
459 		break;
460 	case ADF_EVENT_MANUAL_DUMP:
461 		break;
462 	case ADF_EVENT_SLICE_HANG:
463 		break;
464 	case ADF_EVENT_DBG_SHUTDOWN:
465 		break;
466 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
467 	default:
468 		return -1;
469 	}
470 
471 	adf_state_set(accel_dev->accel_id, event);
472 
473 	return 0;
474 }
475 
476 static int
477 adf_state_kqfilter(struct cdev *dev, struct knote *kn)
478 {
479 	struct adf_state_priv_data *priv;
480 
481 	mtx_lock(&mtx);
482 	priv = dev->si_drv1;
483 	switch (kn->kn_filter) {
484 	case EVFILT_READ:
485 		kn->kn_fop = &adf_state_read_filterops;
486 		kn->kn_hook = priv;
487 		knlist_add(&priv->rsel.si_note, kn, 0);
488 		mtx_unlock(&mtx);
489 		return 0;
490 	default:
491 		mtx_unlock(&mtx);
492 		return -EINVAL;
493 	}
494 }
495 
496 static int
497 adf_state_kqread_event(struct knote *kn, long hint)
498 {
499 	return 1;
500 }
501 
502 static void
503 adf_state_kqread_detach(struct knote *kn)
504 {
505 	struct adf_state_priv_data *priv = NULL;
506 
507 	mtx_lock(&mtx);
508 	if (!kn) {
509 		mtx_unlock(&mtx);
510 		return;
511 	}
512 	priv = kn->kn_hook;
513 	if (!priv) {
514 		mtx_unlock(&mtx);
515 		return;
516 	}
517 	knlist_remove(&priv->rsel.si_note, kn, 1);
518 	mtx_unlock(&mtx);
519 }
520 
521 void
522 adf_state_init(void)
523 {
524 	adf_state_dev = make_dev(&adf_state_cdevsw,
525 				 0,
526 				 UID_ROOT,
527 				 GID_WHEEL,
528 				 0600,
529 				 "%s",
530 				 ADF_DEV_STATE_NAME);
531 	SLIST_INIT(&proc_events_head);
532 	mtx_init(&mtx, mtx_name, NULL, MTX_DEF);
533 	callout_init_mtx(&callout, &mtx, 0);
534 	explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl));
535 	adf_state_hndl.event_hld = adf_state_event_handler;
536 	adf_state_hndl.name = "adf_state_event_handler";
537 	mtx_lock(&mtx);
538 	adf_service_register(&adf_state_hndl);
539 	callout_reset(&callout,
540 		      ADF_STATE_CALLOUT_TIME,
541 		      adf_state_callout_notify_ev,
542 		      NULL);
543 	mtx_unlock(&mtx);
544 }
545 
546 void
547 adf_state_destroy(void)
548 {
549 	struct entry_proc_events *proc_events = NULL;
550 
551 	mtx_lock(&mtx);
552 	adf_service_unregister(&adf_state_hndl);
553 	callout_stop(&callout);
554 	while (!SLIST_EMPTY(&proc_events_head)) {
555 		proc_events = SLIST_FIRST(&proc_events_head);
556 		SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events);
557 		free(proc_events, M_QAT);
558 	}
559 	destroy_dev(adf_state_dev);
560 	mtx_unlock(&mtx);
561 	mtx_destroy(&mtx);
562 }
563 
564 static int
565 adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
566 {
567 	struct adf_state_priv_data *prv_data = NULL;
568 	struct entry_proc_events *entry_proc_events = NULL;
569 	int ret = 0;
570 
571 	prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
572 	if (!prv_data)
573 		return -ENOMEM;
574 	entry_proc_events =
575 	    malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO);
576 	if (!entry_proc_events) {
577 		free(prv_data, M_QAT);
578 		return -ENOMEM;
579 	}
580 	mtx_lock(&mtx);
581 	prv_data->cdev = dev;
582 	prv_data->cdev->si_drv1 = prv_data;
583 	knlist_init_mtx(&prv_data->rsel.si_note, &mtx);
584 	STAILQ_INIT(&prv_data->state_head);
585 	entry_proc_events->proc_events = prv_data;
586 	SLIST_INSERT_HEAD(&proc_events_head,
587 			  entry_proc_events,
588 			  entries_proc_events);
589 	ret = devfs_set_cdevpriv(prv_data, adf_state_release);
590 	if (ret) {
591 		SLIST_REMOVE(&proc_events_head,
592 			     entry_proc_events,
593 			     entry_proc_events,
594 			     entries_proc_events);
595 		free(entry_proc_events, M_QAT);
596 		free(prv_data, M_QAT);
597 	}
598 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
599 	mtx_unlock(&mtx);
600 	return ret;
601 }
602 
603 static int
604 adf_state_read(struct cdev *dev, struct uio *uio, int ioflag)
605 {
606 	int ret = 0;
607 	struct adf_state_priv_data *prv_data = NULL;
608 	struct state_head *state_head = NULL;
609 	struct entry_state *entry_state = NULL;
610 	struct adf_state *state = NULL;
611 	struct entry_proc_events *proc_events = NULL;
612 
613 	mtx_lock(&mtx);
614 	ret = devfs_get_cdevpriv((void **)&prv_data);
615 	if (ret) {
616 		mtx_unlock(&mtx);
617 		return 0;
618 	}
619 	state_head = &prv_data->state_head;
620 	if (STAILQ_EMPTY(state_head)) {
621 		mtx_unlock(&mtx);
622 		return 0;
623 	}
624 	entry_state = STAILQ_FIRST(state_head);
625 	state = &entry_state->state;
626 	ret = uiomove(state, sizeof(struct adf_state), uio);
627 	if (!ret && !STAILQ_EMPTY(state_head)) {
628 		STAILQ_REMOVE_HEAD(state_head, entries_state);
629 		free(entry_state, M_QAT);
630 	}
631 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
632 		if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
633 			prv_data = proc_events->proc_events;
634 			wakeup(prv_data);
635 			selwakeup(&prv_data->rsel);
636 			KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0);
637 		}
638 	}
639 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
640 	mtx_unlock(&mtx);
641 	return ret;
642 }
643 
644 static void
645 adf_state_release(void *data)
646 {
647 	struct adf_state_priv_data *prv_data = NULL;
648 	struct entry_state *entry_state = NULL;
649 	struct entry_proc_events *entry_proc_events = NULL;
650 	struct entry_proc_events *tmp = NULL;
651 
652 	mtx_lock(&mtx);
653 	prv_data = (struct adf_state_priv_data *)data;
654 	knlist_delete(&prv_data->rsel.si_note, curthread, 1);
655 	knlist_destroy(&prv_data->rsel.si_note);
656 	seldrain(&prv_data->rsel);
657 	while (!STAILQ_EMPTY(&prv_data->state_head)) {
658 		entry_state = STAILQ_FIRST(&prv_data->state_head);
659 		STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state);
660 		free(entry_state, M_QAT);
661 	}
662 	SLIST_FOREACH_SAFE (entry_proc_events,
663 			    &proc_events_head,
664 			    entries_proc_events,
665 			    tmp) {
666 		if (entry_proc_events->proc_events == prv_data) {
667 			SLIST_REMOVE(&proc_events_head,
668 				     entry_proc_events,
669 				     entry_proc_events,
670 				     entries_proc_events);
671 			free(entry_proc_events, M_QAT);
672 		}
673 	}
674 	free(prv_data, M_QAT);
675 	mtx_unlock(&mtx);
676 }
677