1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_uio_control.h"
13 #include "adf_uio_cleanup.h"
14 #include "adf_uio.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17 
18 #define ADF_DEV_PROCESSES_NAME "qat_dev_processes"
19 #define ADF_DEV_STATE_NAME "qat_dev_state"
20 
21 #define ADF_STATE_CALLOUT_TIME 10
22 
23 static const char *mtx_name = "state_mtx";
24 static const char *mtx_callout_name = "callout_mtx";
25 
26 static d_open_t adf_processes_open;
27 static void adf_processes_release(void *data);
28 static d_read_t adf_processes_read;
29 static d_write_t adf_processes_write;
30 
31 static d_open_t adf_state_open;
32 static void adf_state_release(void *data);
33 static d_read_t adf_state_read;
34 static int adf_state_kqfilter(struct cdev *dev, struct knote *kn);
35 static int adf_state_kqread_event(struct knote *kn, long hint);
36 static void adf_state_kqread_detach(struct knote *kn);
37 
38 static struct callout callout;
39 static struct mtx mtx;
40 static struct mtx callout_mtx;
41 static struct service_hndl adf_state_hndl;
42 
43 struct entry_proc_events {
44 	struct adf_state_priv_data *proc_events;
45 
46 	SLIST_ENTRY(entry_proc_events) entries_proc_events;
47 };
48 
49 struct entry_state {
50 	struct adf_state state;
51 
52 	STAILQ_ENTRY(entry_state) entries_state;
53 };
54 
55 SLIST_HEAD(proc_events_head, entry_proc_events);
56 STAILQ_HEAD(state_head, entry_state);
57 
58 static struct proc_events_head proc_events_head;
59 
60 struct adf_processes_priv_data {
61 	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
62 	int read_flag;
63 	struct list_head list;
64 };
65 
66 struct adf_state_priv_data {
67 	struct cdev *cdev;
68 	struct selinfo rsel;
69 	struct state_head state_head;
70 };
71 
72 static struct cdevsw adf_processes_cdevsw = {
73 	.d_version = D_VERSION,
74 	.d_open = adf_processes_open,
75 	.d_read = adf_processes_read,
76 	.d_write = adf_processes_write,
77 	.d_name = ADF_DEV_PROCESSES_NAME,
78 };
79 
80 static struct cdevsw adf_state_cdevsw = {
81 	.d_version = D_VERSION,
82 	.d_open = adf_state_open,
83 	.d_read = adf_state_read,
84 	.d_kqfilter = adf_state_kqfilter,
85 	.d_name = ADF_DEV_STATE_NAME,
86 };
87 
88 static struct filterops adf_state_read_filterops = {
89 	.f_isfd = 1,
90 	.f_attach = NULL,
91 	.f_detach = adf_state_kqread_detach,
92 	.f_event = adf_state_kqread_event,
93 };
94 
95 static struct cdev *adf_processes_dev;
96 static struct cdev *adf_state_dev;
97 
98 static LINUX_LIST_HEAD(processes_list);
99 
100 struct sx processes_list_sema;
101 SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list");
102 
103 static void
104 adf_chr_drv_destroy(void)
105 {
106 	destroy_dev(adf_processes_dev);
107 }
108 
109 static int
110 adf_chr_drv_create(void)
111 {
112 
113 	adf_processes_dev = make_dev(&adf_processes_cdevsw,
114 				     0,
115 				     UID_ROOT,
116 				     GID_WHEEL,
117 				     0600,
118 				     ADF_DEV_PROCESSES_NAME);
119 	if (adf_processes_dev == NULL) {
120 		printf("QAT: failed to create device\n");
121 		goto err_cdev_del;
122 	}
123 	return 0;
124 err_cdev_del:
125 	return EFAULT;
126 }
127 
128 static int
129 adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
130 {
131 	int i = 0, devices = 0;
132 	struct adf_accel_dev *accel_dev = NULL;
133 	struct adf_processes_priv_data *prv_data = NULL;
134 	int error = 0;
135 
136 	for (i = 0; i < ADF_MAX_DEVICES; i++) {
137 		accel_dev = adf_devmgr_get_dev_by_id(i);
138 		if (!accel_dev)
139 			continue;
140 		if (!adf_dev_started(accel_dev))
141 			continue;
142 		devices++;
143 	}
144 	if (!devices) {
145 		printf("QAT: No active devices found.\n");
146 		return ENXIO;
147 	}
148 	prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
149 	if (!prv_data)
150 		return ENOMEM;
151 	INIT_LIST_HEAD(&prv_data->list);
152 	error = devfs_set_cdevpriv(prv_data, adf_processes_release);
153 	if (error) {
154 		free(prv_data, M_QAT);
155 		return error;
156 	}
157 
158 	return 0;
159 }
160 
161 static int
162 adf_get_first_started_dev(void)
163 {
164 	int i = 0;
165 	struct adf_accel_dev *accel_dev = NULL;
166 
167 	for (i = 0; i < ADF_MAX_DEVICES; i++) {
168 		accel_dev = adf_devmgr_get_dev_by_id(i);
169 		if (!accel_dev)
170 			continue;
171 		if (adf_dev_started(accel_dev))
172 			return i;
173 	}
174 
175 	return -1;
176 }
177 
178 static int
179 adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag)
180 {
181 	struct adf_processes_priv_data *prv_data = NULL;
182 	struct adf_processes_priv_data *pdata = NULL;
183 	int dev_num = 0, pr_num = 0;
184 	struct list_head *lpos = NULL;
185 	char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 };
186 	struct adf_accel_dev *accel_dev = NULL;
187 	struct adf_cfg_section *section_ptr = NULL;
188 	bool pr_name_available = 1;
189 	uint32_t num_accel_devs = 0;
190 	int error = 0;
191 	ssize_t count;
192 	int dev_id;
193 
194 	error = devfs_get_cdevpriv((void **)&prv_data);
195 	if (error) {
196 		printf("QAT: invalid file descriptor\n");
197 		return error;
198 	}
199 
200 	if (prv_data->read_flag == 1) {
201 		printf("QAT: can only write once\n");
202 		return EBADF;
203 	}
204 	count = uio->uio_resid;
205 	if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
206 		printf("QAT: wrong size %d\n", (int)count);
207 		return EIO;
208 	}
209 
210 	error = uiomove(usr_name, count, uio);
211 	if (error) {
212 		printf("QAT: can't copy data\n");
213 		return error;
214 	}
215 
216 	/* Lock other processes and try to find out the process name */
217 	if (sx_xlock_sig(&processes_list_sema)) {
218 		printf("QAT: can't aquire process info lock\n");
219 		return EBADF;
220 	}
221 
222 	dev_id = adf_get_first_started_dev();
223 	if (-1 == dev_id) {
224 		pr_err("QAT: could not find started device\n");
225 		sx_xunlock(&processes_list_sema);
226 		return -EIO;
227 	}
228 
229 	accel_dev = adf_devmgr_get_dev_by_id(dev_id);
230 	if (!accel_dev) {
231 		pr_err("QAT: could not find started device\n");
232 		sx_xunlock(&processes_list_sema);
233 		return -EIO;
234 	}
235 
236 	/* If there is nothing there then take the first name and return */
237 	if (list_empty(&processes_list)) {
238 		snprintf(prv_data->name,
239 			 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
240 			 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
241 			 usr_name,
242 			 0);
243 		list_add(&prv_data->list, &processes_list);
244 		sx_xunlock(&processes_list_sema);
245 		prv_data->read_flag = 1;
246 		return 0;
247 	}
248 
249 	/* If there are processes running then search for a first free name */
250 	adf_devmgr_get_num_dev(&num_accel_devs);
251 	for (dev_num = 0; dev_num < num_accel_devs; dev_num++) {
252 		accel_dev = adf_devmgr_get_dev_by_id(dev_num);
253 		if (!accel_dev)
254 			continue;
255 
256 		if (!adf_dev_started(accel_dev))
257 			continue; /* to next device */
258 
259 		for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev);
260 		     pr_num++) {
261 			snprintf(prv_data->name,
262 				 ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
263 				 "%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
264 				 usr_name,
265 				 pr_num);
266 			pr_name_available = 1;
267 			/* Figure out if section exists in the config table */
268 			section_ptr =
269 			    adf_cfg_sec_find(accel_dev, prv_data->name);
270 			if (NULL == section_ptr) {
271 				/* This section name doesn't exist */
272 				pr_name_available = 0;
273 				/* As process_num enumerates from 0, once we get
274 				 * to one which doesn't exist no further ones
275 				 * will exist. On to next device
276 				 */
277 				break;
278 			}
279 			/* Figure out if it's been taken already */
280 			list_for_each(lpos, &processes_list)
281 			{
282 				pdata =
283 				    list_entry(lpos,
284 					       struct adf_processes_priv_data,
285 					       list);
286 				if (!strncmp(
287 					pdata->name,
288 					prv_data->name,
289 					ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
290 					pr_name_available = 0;
291 					break;
292 				}
293 			}
294 			if (pr_name_available)
295 				break;
296 		}
297 		if (pr_name_available)
298 			break;
299 	}
300 	/*
301 	 * If we have a valid name that is not on
302 	 * the list take it and add to the list
303 	 */
304 	if (pr_name_available) {
305 		list_add(&prv_data->list, &processes_list);
306 		sx_xunlock(&processes_list_sema);
307 		prv_data->read_flag = 1;
308 		return 0;
309 	}
310 	/* If not then the process needs to wait */
311 	sx_xunlock(&processes_list_sema);
312 	explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES);
313 	prv_data->read_flag = 0;
314 	return 1;
315 }
316 
317 static int
318 adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag)
319 {
320 	struct adf_processes_priv_data *prv_data = NULL;
321 	int error = 0;
322 
323 	error = devfs_get_cdevpriv((void **)&prv_data);
324 	if (error) {
325 		printf("QAT: invalid file descriptor\n");
326 		return error;
327 	}
328 
329 	/*
330 	 * If there is a name that the process can use then give it
331 	 * to the proocess.
332 	 */
333 	if (prv_data->read_flag) {
334 		error = uiomove(prv_data->name,
335 				strnlen(prv_data->name,
336 					ADF_CFG_MAX_SECTION_LEN_IN_BYTES),
337 				uio);
338 		if (error) {
339 			printf("QAT: failed to copy data to user\n");
340 			return error;
341 		}
342 		return 0;
343 	}
344 
345 	return EIO;
346 }
347 
348 static void
349 adf_processes_release(void *data)
350 {
351 	struct adf_processes_priv_data *prv_data = NULL;
352 
353 	prv_data = (struct adf_processes_priv_data *)data;
354 	sx_xlock(&processes_list_sema);
355 	list_del(&prv_data->list);
356 	sx_xunlock(&processes_list_sema);
357 	free(prv_data, M_QAT);
358 }
359 
360 int
361 adf_processes_dev_register(void)
362 {
363 	return adf_chr_drv_create();
364 }
365 
366 void
367 adf_processes_dev_unregister(void)
368 {
369 	adf_chr_drv_destroy();
370 }
371 
372 static void
373 adf_state_callout_notify_ev(void *arg)
374 {
375 	int notified = 0;
376 	struct adf_state_priv_data *priv = NULL;
377 	struct entry_proc_events *proc_events = NULL;
378 
379 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
380 		if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
381 			notified = 1;
382 			priv = proc_events->proc_events;
383 			wakeup(priv);
384 			selwakeup(&priv->rsel);
385 			KNOTE_UNLOCKED(&priv->rsel.si_note, 0);
386 		}
387 	}
388 	if (notified)
389 		callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
390 }
391 
392 static void
393 adf_state_set(int dev, enum adf_event event)
394 {
395 	struct adf_accel_dev *accel_dev = NULL;
396 	struct state_head *head = NULL;
397 	struct entry_proc_events *proc_events = NULL;
398 	struct entry_state *state = NULL;
399 
400 	accel_dev = adf_devmgr_get_dev_by_id(dev);
401 	if (!accel_dev)
402 		return;
403 	mtx_lock(&mtx);
404 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
405 		state = NULL;
406 		head = &proc_events->proc_events->state_head;
407 		state = malloc(sizeof(struct entry_state),
408 			       M_QAT,
409 			       M_NOWAIT | M_ZERO);
410 		if (!state)
411 			continue;
412 		state->state.dev_state = event;
413 		state->state.dev_id = dev;
414 		STAILQ_INSERT_TAIL(head, state, entries_state);
415 		if (event == ADF_EVENT_STOP) {
416 			state = NULL;
417 			state = malloc(sizeof(struct entry_state),
418 				       M_QAT,
419 				       M_NOWAIT | M_ZERO);
420 			if (!state)
421 				continue;
422 			state->state.dev_state = ADF_EVENT_SHUTDOWN;
423 			state->state.dev_id = dev;
424 			STAILQ_INSERT_TAIL(head, state, entries_state);
425 		}
426 	}
427 	mtx_unlock(&mtx);
428 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
429 }
430 
431 static int
432 adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event)
433 {
434 	int ret = 0;
435 
436 #if defined(QAT_UIO) && defined(QAT_DBG)
437 	if (event > ADF_EVENT_DBG_SHUTDOWN)
438 		return -EINVAL;
439 #else
440 	if (event > ADF_EVENT_ERROR)
441 		return -EINVAL;
442 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
443 
444 	switch (event) {
445 	case ADF_EVENT_INIT:
446 		return ret;
447 	case ADF_EVENT_SHUTDOWN:
448 		return ret;
449 	case ADF_EVENT_RESTARTING:
450 		break;
451 	case ADF_EVENT_RESTARTED:
452 		break;
453 	case ADF_EVENT_START:
454 		return ret;
455 	case ADF_EVENT_STOP:
456 		break;
457 	case ADF_EVENT_ERROR:
458 		break;
459 #if defined(QAT_UIO) && defined(QAT_DBG)
460 	case ADF_EVENT_PROC_CRASH:
461 		break;
462 	case ADF_EVENT_MANUAL_DUMP:
463 		break;
464 	case ADF_EVENT_SLICE_HANG:
465 		break;
466 	case ADF_EVENT_DBG_SHUTDOWN:
467 		break;
468 #endif /* defined(QAT_UIO) && defined(QAT_DBG) */
469 	default:
470 		return -1;
471 	}
472 
473 	adf_state_set(accel_dev->accel_id, event);
474 
475 	return 0;
476 }
477 
478 static int
479 adf_state_kqfilter(struct cdev *dev, struct knote *kn)
480 {
481 	struct adf_state_priv_data *priv;
482 
483 	mtx_lock(&mtx);
484 	priv = dev->si_drv1;
485 	switch (kn->kn_filter) {
486 	case EVFILT_READ:
487 		kn->kn_fop = &adf_state_read_filterops;
488 		kn->kn_hook = priv;
489 		knlist_add(&priv->rsel.si_note, kn, 1);
490 		mtx_unlock(&mtx);
491 		return 0;
492 	default:
493 		mtx_unlock(&mtx);
494 		return -EINVAL;
495 	}
496 }
497 
498 static int
499 adf_state_kqread_event(struct knote *kn, long hint)
500 {
501 	return 1;
502 }
503 
504 static void
505 adf_state_kqread_detach(struct knote *kn)
506 {
507 	struct adf_state_priv_data *priv = NULL;
508 
509 	mtx_lock(&mtx);
510 	if (!kn) {
511 		mtx_unlock(&mtx);
512 		return;
513 	}
514 	priv = kn->kn_hook;
515 	if (!priv) {
516 		mtx_unlock(&mtx);
517 		return;
518 	}
519 	knlist_remove(&priv->rsel.si_note, kn, 1);
520 	mtx_unlock(&mtx);
521 }
522 
523 void
524 adf_state_init(void)
525 {
526 	adf_state_dev = make_dev(&adf_state_cdevsw,
527 				 0,
528 				 UID_ROOT,
529 				 GID_WHEEL,
530 				 0600,
531 				 "%s",
532 				 ADF_DEV_STATE_NAME);
533 	SLIST_INIT(&proc_events_head);
534 	mtx_init(&mtx, mtx_name, NULL, MTX_DEF);
535 	mtx_init(&callout_mtx, mtx_callout_name, NULL, MTX_DEF);
536 	callout_init_mtx(&callout, &callout_mtx, 0);
537 	explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl));
538 	adf_state_hndl.event_hld = adf_state_event_handler;
539 	adf_state_hndl.name = "adf_state_event_handler";
540 	adf_service_register(&adf_state_hndl);
541 	callout_reset(&callout,
542 		      ADF_STATE_CALLOUT_TIME,
543 		      adf_state_callout_notify_ev,
544 		      NULL);
545 }
546 
547 void
548 adf_state_destroy(void)
549 {
550 	struct entry_proc_events *proc_events = NULL;
551 
552 	adf_service_unregister(&adf_state_hndl);
553 	mtx_lock(&callout_mtx);
554 	callout_stop(&callout);
555 	mtx_unlock(&callout_mtx);
556 	mtx_destroy(&callout_mtx);
557 	mtx_lock(&mtx);
558 	while (!SLIST_EMPTY(&proc_events_head)) {
559 		proc_events = SLIST_FIRST(&proc_events_head);
560 		SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events);
561 		free(proc_events, M_QAT);
562 	}
563 	mtx_unlock(&mtx);
564 	mtx_destroy(&mtx);
565 	destroy_dev(adf_state_dev);
566 }
567 
568 static int
569 adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
570 {
571 	struct adf_state_priv_data *prv_data = NULL;
572 	struct entry_proc_events *entry_proc_events = NULL;
573 	int ret = 0;
574 
575 	prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
576 	if (!prv_data)
577 		return -ENOMEM;
578 	entry_proc_events =
579 	    malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO);
580 	if (!entry_proc_events) {
581 		free(prv_data, M_QAT);
582 		return -ENOMEM;
583 	}
584 	mtx_lock(&mtx);
585 	prv_data->cdev = dev;
586 	prv_data->cdev->si_drv1 = prv_data;
587 	knlist_init_mtx(&prv_data->rsel.si_note, &mtx);
588 	STAILQ_INIT(&prv_data->state_head);
589 	entry_proc_events->proc_events = prv_data;
590 	SLIST_INSERT_HEAD(&proc_events_head,
591 			  entry_proc_events,
592 			  entries_proc_events);
593 	mtx_unlock(&mtx);
594 	ret = devfs_set_cdevpriv(prv_data, adf_state_release);
595 	if (ret) {
596 		SLIST_REMOVE(&proc_events_head,
597 			     entry_proc_events,
598 			     entry_proc_events,
599 			     entries_proc_events);
600 		free(entry_proc_events, M_QAT);
601 		free(prv_data, M_QAT);
602 	}
603 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
604 	return ret;
605 }
606 
607 static int
608 adf_state_read(struct cdev *dev, struct uio *uio, int ioflag)
609 {
610 	int ret = 0;
611 	struct adf_state_priv_data *prv_data = NULL;
612 	struct state_head *state_head = NULL;
613 	struct entry_state *entry_state = NULL;
614 	struct adf_state *state = NULL;
615 	struct entry_proc_events *proc_events = NULL;
616 
617 	mtx_lock(&mtx);
618 	ret = devfs_get_cdevpriv((void **)&prv_data);
619 	if (ret) {
620 		mtx_unlock(&mtx);
621 		return 0;
622 	}
623 	state_head = &prv_data->state_head;
624 	if (STAILQ_EMPTY(state_head)) {
625 		mtx_unlock(&mtx);
626 		return 0;
627 	}
628 	entry_state = STAILQ_FIRST(state_head);
629 	state = &entry_state->state;
630 	ret = uiomove(state, sizeof(struct adf_state), uio);
631 	if (!ret && !STAILQ_EMPTY(state_head)) {
632 		STAILQ_REMOVE_HEAD(state_head, entries_state);
633 		free(entry_state, M_QAT);
634 	}
635 	SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
636 		if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
637 			prv_data = proc_events->proc_events;
638 			wakeup(prv_data);
639 			selwakeup(&prv_data->rsel);
640 			KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0);
641 		}
642 	}
643 	mtx_unlock(&mtx);
644 	callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
645 	return ret;
646 }
647 
648 static void
649 adf_state_release(void *data)
650 {
651 	struct adf_state_priv_data *prv_data = NULL;
652 	struct entry_state *entry_state = NULL;
653 	struct entry_proc_events *entry_proc_events = NULL;
654 	struct entry_proc_events *tmp = NULL;
655 
656 	mtx_lock(&mtx);
657 	prv_data = (struct adf_state_priv_data *)data;
658 	knlist_delete(&prv_data->rsel.si_note, curthread, 1);
659 	knlist_destroy(&prv_data->rsel.si_note);
660 	seldrain(&prv_data->rsel);
661 	while (!STAILQ_EMPTY(&prv_data->state_head)) {
662 		entry_state = STAILQ_FIRST(&prv_data->state_head);
663 		STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state);
664 		free(entry_state, M_QAT);
665 	}
666 	SLIST_FOREACH_SAFE (entry_proc_events,
667 			    &proc_events_head,
668 			    entries_proc_events,
669 			    tmp) {
670 		if (entry_proc_events->proc_events == prv_data) {
671 			SLIST_REMOVE(&proc_events_head,
672 				     entry_proc_events,
673 				     entry_proc_events,
674 				     entries_proc_events);
675 			free(entry_proc_events, M_QAT);
676 		}
677 	}
678 	free(prv_data, M_QAT);
679 	mtx_unlock(&mtx);
680 }
681