xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c (revision 44f57d78)
1 /*
2  * Copyright 2016 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "priv.h"
25 
26 struct nvkm_top_device *
27 nvkm_top_device_new(struct nvkm_top *top)
28 {
29 	struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
30 	if (info) {
31 		info->index = NVKM_SUBDEV_NR;
32 		info->addr = 0;
33 		info->fault = -1;
34 		info->engine = -1;
35 		info->runlist = -1;
36 		info->reset = -1;
37 		info->intr = -1;
38 		list_add_tail(&info->head, &top->device);
39 	}
40 	return info;
41 }
42 
43 u32
44 nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
45 {
46 	struct nvkm_top *top = device->top;
47 	struct nvkm_top_device *info;
48 
49 	if (top) {
50 		list_for_each_entry(info, &top->device, head) {
51 			if (info->index == index)
52 				return info->addr;
53 		}
54 	}
55 
56 	return 0;
57 }
58 
59 u32
60 nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
61 {
62 	struct nvkm_top *top = device->top;
63 	struct nvkm_top_device *info;
64 
65 	if (top) {
66 		list_for_each_entry(info, &top->device, head) {
67 			if (info->index == index && info->reset >= 0)
68 				return BIT(info->reset);
69 		}
70 	}
71 
72 	return 0;
73 }
74 
75 u32
76 nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
77 {
78 	struct nvkm_top *top = device->top;
79 	struct nvkm_top_device *info;
80 
81 	if (top) {
82 		list_for_each_entry(info, &top->device, head) {
83 			if (info->index == devidx && info->intr >= 0)
84 				return BIT(info->intr);
85 		}
86 	}
87 
88 	return 0;
89 }
90 
91 u32
92 nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
93 {
94 	struct nvkm_top *top = device->top;
95 	struct nvkm_top_device *info;
96 	u64 subdevs = 0;
97 	u32 handled = 0;
98 
99 	if (top) {
100 		list_for_each_entry(info, &top->device, head) {
101 			if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) {
102 				if (intr & BIT(info->intr)) {
103 					subdevs |= BIT_ULL(info->index);
104 					handled |= BIT(info->intr);
105 				}
106 			}
107 		}
108 	}
109 
110 	*psubdevs = subdevs;
111 	return intr & ~handled;
112 }
113 
114 int
115 nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx)
116 {
117 	struct nvkm_top *top = device->top;
118 	struct nvkm_top_device *info;
119 
120 	list_for_each_entry(info, &top->device, head) {
121 		if (info->index == devidx && info->fault >= 0)
122 			return info->fault;
123 	}
124 
125 	return -ENOENT;
126 }
127 
128 enum nvkm_devidx
129 nvkm_top_fault(struct nvkm_device *device, int fault)
130 {
131 	struct nvkm_top *top = device->top;
132 	struct nvkm_top_device *info;
133 
134 	list_for_each_entry(info, &top->device, head) {
135 		if (info->fault == fault)
136 			return info->index;
137 	}
138 
139 	return NVKM_SUBDEV_NR;
140 }
141 
142 enum nvkm_devidx
143 nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
144 {
145 	struct nvkm_top *top = device->top;
146 	struct nvkm_top_device *info;
147 	int n = 0;
148 
149 	list_for_each_entry(info, &top->device, head) {
150 		if (info->engine >= 0 && info->runlist >= 0 && n++ == index) {
151 			*runl = info->runlist;
152 			*engn = info->engine;
153 			return info->index;
154 		}
155 	}
156 
157 	return -ENODEV;
158 }
159 
160 static int
161 nvkm_top_oneinit(struct nvkm_subdev *subdev)
162 {
163 	struct nvkm_top *top = nvkm_top(subdev);
164 	return top->func->oneinit(top);
165 }
166 
167 static void *
168 nvkm_top_dtor(struct nvkm_subdev *subdev)
169 {
170 	struct nvkm_top *top = nvkm_top(subdev);
171 	struct nvkm_top_device *info, *temp;
172 
173 	list_for_each_entry_safe(info, temp, &top->device, head) {
174 		list_del(&info->head);
175 		kfree(info);
176 	}
177 
178 	return top;
179 }
180 
181 static const struct nvkm_subdev_func
182 nvkm_top = {
183 	.dtor = nvkm_top_dtor,
184 	.oneinit = nvkm_top_oneinit,
185 };
186 
187 int
188 nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
189 	      int index, struct nvkm_top **ptop)
190 {
191 	struct nvkm_top *top;
192 	if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
193 		return -ENOMEM;
194 	nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev);
195 	top->func = func;
196 	INIT_LIST_HEAD(&top->device);
197 	return 0;
198 }
199