1 /* $NetBSD: nouveau_nvkm_subdev_pmu_gt215.c,v 1.4 2021/12/19 10:51:59 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_pmu_gt215.c,v 1.4 2021/12/19 10:51:59 riastradh Exp $");
28
29 #include "priv.h"
30 #include "fuc/gt215.fuc3.h"
31
32 #include <subdev/timer.h>
33
34 int
gt215_pmu_send(struct nvkm_pmu * pmu,u32 reply[2],u32 process,u32 message,u32 data0,u32 data1)35 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
36 u32 process, u32 message, u32 data0, u32 data1)
37 {
38 struct nvkm_subdev *subdev = &pmu->subdev;
39 struct nvkm_device *device = subdev->device;
40 u32 addr;
41
42 mutex_lock(&subdev->mutex);
43 /* wait for a free slot in the fifo */
44 addr = nvkm_rd32(device, 0x10a4a0);
45 if (nvkm_msec(device, 2000,
46 u32 tmp = nvkm_rd32(device, 0x10a4b0);
47 if (tmp != (addr ^ 8))
48 break;
49 ) < 0) {
50 mutex_unlock(&subdev->mutex);
51 return -EBUSY;
52 }
53
54 /* we currently only support a single process at a time waiting
55 * on a synchronous reply, take the PMU mutex and tell the
56 * receive handler what we're waiting for
57 */
58 if (reply) {
59 pmu->recv.message = message;
60 pmu->recv.process = process;
61 }
62
63 /* acquire data segment access */
64 do {
65 nvkm_wr32(device, 0x10a580, 0x00000001);
66 } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
67
68 /* write the packet */
69 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
70 pmu->send.base));
71 nvkm_wr32(device, 0x10a1c4, process);
72 nvkm_wr32(device, 0x10a1c4, message);
73 nvkm_wr32(device, 0x10a1c4, data0);
74 nvkm_wr32(device, 0x10a1c4, data1);
75 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
76
77 /* release data segment access */
78 nvkm_wr32(device, 0x10a580, 0x00000000);
79
80 /* wait for reply, if requested */
81 if (reply) {
82 int ret;
83 DRM_WAIT_NOINTR_UNTIL(ret, &pmu->recv.wait, &subdev->mutex,
84 (pmu->recv.process == 0));
85 KASSERT(ret == 0);
86 reply[0] = pmu->recv.data[0];
87 reply[1] = pmu->recv.data[1];
88 }
89
90 mutex_unlock(&subdev->mutex);
91 return 0;
92 }
93
94 void
gt215_pmu_recv(struct nvkm_pmu * pmu)95 gt215_pmu_recv(struct nvkm_pmu *pmu)
96 {
97 struct nvkm_subdev *subdev = &pmu->subdev;
98 struct nvkm_device *device = subdev->device;
99 u32 process, message, data0, data1;
100
101 /* nothing to do if GET == PUT */
102 u32 addr = nvkm_rd32(device, 0x10a4cc);
103 if (addr == nvkm_rd32(device, 0x10a4c8))
104 return;
105
106 /* acquire data segment access */
107 do {
108 nvkm_wr32(device, 0x10a580, 0x00000002);
109 } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
110
111 /* read the packet */
112 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
113 pmu->recv.base));
114 process = nvkm_rd32(device, 0x10a1c4);
115 message = nvkm_rd32(device, 0x10a1c4);
116 data0 = nvkm_rd32(device, 0x10a1c4);
117 data1 = nvkm_rd32(device, 0x10a1c4);
118 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
119
120 /* release data segment access */
121 nvkm_wr32(device, 0x10a580, 0x00000000);
122
123 /* wake process if it's waiting on a synchronous reply */
124 if (pmu->recv.process) {
125 if (process == pmu->recv.process &&
126 message == pmu->recv.message) {
127 mutex_lock(&subdev->mutex);
128 pmu->recv.data[0] = data0;
129 pmu->recv.data[1] = data1;
130 pmu->recv.process = 0;
131 DRM_WAKEUP_ONE(&pmu->recv.wait, &subdev->mutex);
132 mutex_unlock(&subdev->mutex);
133 return;
134 }
135 }
136
137 /* right now there's no other expected responses from the engine,
138 * so assume that any unexpected message is an error.
139 */
140 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
141 (char)((process & 0x000000ff) >> 0),
142 (char)((process & 0x0000ff00) >> 8),
143 (char)((process & 0x00ff0000) >> 16),
144 (char)((process & 0xff000000) >> 24),
145 process, message, data0, data1);
146 }
147
148 void
gt215_pmu_intr(struct nvkm_pmu * pmu)149 gt215_pmu_intr(struct nvkm_pmu *pmu)
150 {
151 struct nvkm_subdev *subdev = &pmu->subdev;
152 struct nvkm_device *device = subdev->device;
153 u32 disp = nvkm_rd32(device, 0x10a01c);
154 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
155
156 if (intr & 0x00000020) {
157 u32 stat = nvkm_rd32(device, 0x10a16c);
158 if (stat & 0x80000000) {
159 nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
160 stat & 0x00ffffff,
161 nvkm_rd32(device, 0x10a168));
162 nvkm_wr32(device, 0x10a16c, 0x00000000);
163 intr &= ~0x00000020;
164 }
165 }
166
167 if (intr & 0x00000040) {
168 schedule_work(&pmu->recv.work);
169 nvkm_wr32(device, 0x10a004, 0x00000040);
170 intr &= ~0x00000040;
171 }
172
173 if (intr & 0x00000080) {
174 nvkm_info(subdev, "wr32 %06x %08x\n",
175 nvkm_rd32(device, 0x10a7a0),
176 nvkm_rd32(device, 0x10a7a4));
177 nvkm_wr32(device, 0x10a004, 0x00000080);
178 intr &= ~0x00000080;
179 }
180
181 if (intr) {
182 nvkm_error(subdev, "intr %08x\n", intr);
183 nvkm_wr32(device, 0x10a004, intr);
184 }
185 }
186
187 void
gt215_pmu_fini(struct nvkm_pmu * pmu)188 gt215_pmu_fini(struct nvkm_pmu *pmu)
189 {
190 nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
191 }
192
193 static void
gt215_pmu_reset(struct nvkm_pmu * pmu)194 gt215_pmu_reset(struct nvkm_pmu *pmu)
195 {
196 struct nvkm_device *device = pmu->subdev.device;
197 nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
198 nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
199 nvkm_rd32(device, 0x022210);
200 }
201
202 static bool
gt215_pmu_enabled(struct nvkm_pmu * pmu)203 gt215_pmu_enabled(struct nvkm_pmu *pmu)
204 {
205 return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001;
206 }
207
208 int
gt215_pmu_init(struct nvkm_pmu * pmu)209 gt215_pmu_init(struct nvkm_pmu *pmu)
210 {
211 struct nvkm_device *device = pmu->subdev.device;
212 int i;
213
214 /* upload data segment */
215 nvkm_wr32(device, 0x10a1c0, 0x01000000);
216 for (i = 0; i < pmu->func->data.size / 4; i++)
217 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
218
219 /* upload code segment */
220 nvkm_wr32(device, 0x10a180, 0x01000000);
221 for (i = 0; i < pmu->func->code.size / 4; i++) {
222 if ((i & 0x3f) == 0)
223 nvkm_wr32(device, 0x10a188, i >> 6);
224 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
225 }
226
227 /* start it running */
228 nvkm_wr32(device, 0x10a10c, 0x00000000);
229 nvkm_wr32(device, 0x10a104, 0x00000000);
230 nvkm_wr32(device, 0x10a100, 0x00000002);
231
232 /* wait for valid host->pmu ring configuration */
233 if (nvkm_msec(device, 2000,
234 if (nvkm_rd32(device, 0x10a4d0))
235 break;
236 ) < 0)
237 return -EBUSY;
238 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
239 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
240
241 /* wait for valid pmu->host ring configuration */
242 if (nvkm_msec(device, 2000,
243 if (nvkm_rd32(device, 0x10a4dc))
244 break;
245 ) < 0)
246 return -EBUSY;
247 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
248 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
249
250 nvkm_wr32(device, 0x10a010, 0x000000e0);
251 return 0;
252 }
253
254 const struct nvkm_falcon_func
255 gt215_pmu_flcn = {
256 .debug = 0xc08,
257 .fbif = 0xe00,
258 .load_imem = nvkm_falcon_v1_load_imem,
259 .load_dmem = nvkm_falcon_v1_load_dmem,
260 .read_dmem = nvkm_falcon_v1_read_dmem,
261 .bind_context = nvkm_falcon_v1_bind_context,
262 .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
263 .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
264 .set_start_addr = nvkm_falcon_v1_set_start_addr,
265 .start = nvkm_falcon_v1_start,
266 .enable = nvkm_falcon_v1_enable,
267 .disable = nvkm_falcon_v1_disable,
268 .cmdq = { 0x4a0, 0x4b0, 4 },
269 .msgq = { 0x4c8, 0x4cc, 0 },
270 };
271
272 static const struct nvkm_pmu_func
273 gt215_pmu = {
274 .flcn = >215_pmu_flcn,
275 .code.data = gt215_pmu_code,
276 .code.size = sizeof(gt215_pmu_code),
277 .data.data = gt215_pmu_data,
278 .data.size = sizeof(gt215_pmu_data),
279 .enabled = gt215_pmu_enabled,
280 .reset = gt215_pmu_reset,
281 .init = gt215_pmu_init,
282 .fini = gt215_pmu_fini,
283 .intr = gt215_pmu_intr,
284 .send = gt215_pmu_send,
285 .recv = gt215_pmu_recv,
286 };
287
288 static const struct nvkm_pmu_fwif
289 gt215_pmu_fwif[] = {
290 { -1, gf100_pmu_nofw, >215_pmu },
291 {}
292 };
293
294 int
gt215_pmu_new(struct nvkm_device * device,int index,struct nvkm_pmu ** ppmu)295 gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
296 {
297 return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu);
298 }
299