1 /*	$NetBSD: nouveau_nvkm_core_event.c,v 1.4 2021/12/18 23:45:34 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2013-2014 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_core_event.c,v 1.4 2021/12/18 23:45:34 riastradh Exp $");
26 
27 #include <core/event.h>
28 #include <core/notify.h>
29 
30 void
nvkm_event_put(struct nvkm_event * event,u32 types,int index)31 nvkm_event_put(struct nvkm_event *event, u32 types, int index)
32 {
33 	assert_spin_locked(&event->refs_lock);
34 	while (types) {
35 		int type = __ffs(types); types &= ~(1 << type);
36 		if (--event->refs[index * event->types_nr + type] == 0) {
37 			if (event->func->fini)
38 				event->func->fini(event, 1 << type, index);
39 		}
40 	}
41 }
42 
43 void
nvkm_event_get(struct nvkm_event * event,u32 types,int index)44 nvkm_event_get(struct nvkm_event *event, u32 types, int index)
45 {
46 	assert_spin_locked(&event->refs_lock);
47 	while (types) {
48 		int type = __ffs(types); types &= ~(1 << type);
49 		if (++event->refs[index * event->types_nr + type] == 1) {
50 			if (event->func->init)
51 				event->func->init(event, 1 << type, index);
52 		}
53 	}
54 }
55 
56 void
nvkm_event_send(struct nvkm_event * event,u32 types,int index,void * data,u32 size)57 nvkm_event_send(struct nvkm_event *event, u32 types, int index,
58 		void *data, u32 size)
59 {
60 	struct nvkm_notify *notify;
61 	unsigned long flags;
62 
63 	if (!event->refs || WARN_ON(index >= event->index_nr))
64 		return;
65 
66 	spin_lock_irqsave(&event->list_lock, flags);
67 	list_for_each_entry(notify, &event->list, head) {
68 		if (notify->index == index && (notify->types & types)) {
69 			if (event->func->send) {
70 				event->func->send(data, size, notify);
71 				continue;
72 			}
73 			nvkm_notify_send(notify, data, size);
74 		}
75 	}
76 	spin_unlock_irqrestore(&event->list_lock, flags);
77 }
78 
79 void
nvkm_event_fini(struct nvkm_event * event)80 nvkm_event_fini(struct nvkm_event *event)
81 {
82 	if (event->refs) {
83 		kfree(event->refs);
84 		event->refs = NULL;
85 	}
86 	spin_lock_destroy(&event->list_lock);
87 	spin_lock_destroy(&event->refs_lock);
88 }
89 
90 int
nvkm_event_init(const struct nvkm_event_func * func,int types_nr,int index_nr,struct nvkm_event * event)91 nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
92 		struct nvkm_event *event)
93 {
94 	event->refs = kzalloc(array3_size(index_nr, types_nr,
95 					  sizeof(*event->refs)),
96 			      GFP_KERNEL);
97 	if (!event->refs)
98 		return -ENOMEM;
99 
100 	event->func = func;
101 	event->types_nr = types_nr;
102 	event->index_nr = index_nr;
103 	spin_lock_init(&event->refs_lock);
104 	spin_lock_init(&event->list_lock);
105 	INIT_LIST_HEAD(&event->list);
106 	return 0;
107 }
108