1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
3
4 #include <linux/align.h>
5 #include <linux/bitfield.h>
6 #include <linux/bitmap.h>
7 #include <linux/clk.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/iommu.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/of_device.h>
15 #include <linux/ratelimit.h>
16 #include <linux/slab.h>
17 #include <linux/soc/apple/rtkit.h>
18
19 #include <drm/drm_fb_dma_helper.h>
20 #include <drm/drm_fourcc.h>
21 #include <drm/drm_framebuffer.h>
22 #include <drm/drm_gem_dma_helper.h>
23 #include <drm/drm_probe_helper.h>
24 #include <drm/drm_vblank.h>
25
26 #include "dcp.h"
27 #include "dcp-internal.h"
28 #include "iomfb.h"
29 #include "iomfb_internal.h"
30 #include "parser.h"
31 #include "trace.h"
32
dcp_tx_offset(enum dcp_context_id id)33 static int dcp_tx_offset(enum dcp_context_id id)
34 {
35 switch (id) {
36 case DCP_CONTEXT_CB:
37 case DCP_CONTEXT_CMD:
38 return 0x00000;
39 case DCP_CONTEXT_OOBCB:
40 case DCP_CONTEXT_OOBCMD:
41 return 0x08000;
42 default:
43 return -EINVAL;
44 }
45 }
46
dcp_channel_offset(enum dcp_context_id id)47 static int dcp_channel_offset(enum dcp_context_id id)
48 {
49 switch (id) {
50 case DCP_CONTEXT_ASYNC:
51 return 0x40000;
52 case DCP_CONTEXT_OOBASYNC:
53 return 0x48000;
54 case DCP_CONTEXT_CB:
55 return 0x60000;
56 case DCP_CONTEXT_OOBCB:
57 return 0x68000;
58 default:
59 return dcp_tx_offset(id);
60 }
61 }
62
dcpep_set_shmem(u64 dart_va)63 static inline u64 dcpep_set_shmem(u64 dart_va)
64 {
65 return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_SET_SHMEM) |
66 FIELD_PREP(IOMFB_SHMEM_FLAG, IOMFB_SHMEM_FLAG_VALUE) |
67 FIELD_PREP(IOMFB_SHMEM_DVA, dart_va);
68 }
69
dcpep_msg(enum dcp_context_id id,u32 length,u16 offset)70 static inline u64 dcpep_msg(enum dcp_context_id id, u32 length, u16 offset)
71 {
72 return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_MSG) |
73 FIELD_PREP(IOMFB_MSG_CONTEXT, id) |
74 FIELD_PREP(IOMFB_MSG_OFFSET, offset) |
75 FIELD_PREP(IOMFB_MSG_LENGTH, length);
76 }
77
dcpep_ack(enum dcp_context_id id)78 static inline u64 dcpep_ack(enum dcp_context_id id)
79 {
80 return dcpep_msg(id, 0, 0) | IOMFB_MSG_ACK;
81 }
82
83 /*
84 * A channel is busy if we have sent a message that has yet to be
85 * acked. The driver must not sent a message to a busy channel.
86 */
dcp_channel_busy(struct dcp_channel * ch)87 static bool dcp_channel_busy(struct dcp_channel *ch)
88 {
89 return (ch->depth != 0);
90 }
91
92 /*
93 * Get the context ID passed to the DCP for a command we push. The rule is
94 * simple: callback contexts are used when replying to the DCP, command
95 * contexts are used otherwise. That corresponds to a non/zero call stack
96 * depth. This rule frees the caller from tracking the call context manually.
97 */
dcp_call_context(struct apple_dcp * dcp,bool oob)98 static enum dcp_context_id dcp_call_context(struct apple_dcp *dcp, bool oob)
99 {
100 u8 depth = oob ? dcp->ch_oobcmd.depth : dcp->ch_cmd.depth;
101
102 if (depth)
103 return oob ? DCP_CONTEXT_OOBCB : DCP_CONTEXT_CB;
104 else
105 return oob ? DCP_CONTEXT_OOBCMD : DCP_CONTEXT_CMD;
106 }
107
108 /* Get a channel for a context */
dcp_get_channel(struct apple_dcp * dcp,enum dcp_context_id context)109 static struct dcp_channel *dcp_get_channel(struct apple_dcp *dcp,
110 enum dcp_context_id context)
111 {
112 switch (context) {
113 case DCP_CONTEXT_CB:
114 return &dcp->ch_cb;
115 case DCP_CONTEXT_CMD:
116 return &dcp->ch_cmd;
117 case DCP_CONTEXT_OOBCB:
118 return &dcp->ch_oobcb;
119 case DCP_CONTEXT_OOBCMD:
120 return &dcp->ch_oobcmd;
121 case DCP_CONTEXT_ASYNC:
122 return &dcp->ch_async;
123 case DCP_CONTEXT_OOBASYNC:
124 return &dcp->ch_oobasync;
125 default:
126 return NULL;
127 }
128 }
129
130 /* Get the start of a packet: after the end of the previous packet */
dcp_packet_start(struct dcp_channel * ch,u8 depth)131 static u16 dcp_packet_start(struct dcp_channel *ch, u8 depth)
132 {
133 if (depth > 0)
134 return ch->end[depth - 1];
135 else
136 return 0;
137 }
138
139 /* Pushes and pops the depth of the call stack with safety checks */
dcp_push_depth(u8 * depth)140 static u8 dcp_push_depth(u8 *depth)
141 {
142 u8 ret = (*depth)++;
143
144 WARN_ON(ret >= DCP_MAX_CALL_DEPTH);
145 return ret;
146 }
147
dcp_pop_depth(u8 * depth)148 static u8 dcp_pop_depth(u8 *depth)
149 {
150 WARN_ON((*depth) == 0);
151
152 return --(*depth);
153 }
154
155 /* Call a DCP function given by a tag */
dcp_push(struct apple_dcp * dcp,bool oob,const struct dcp_method_entry * call,u32 in_len,u32 out_len,void * data,dcp_callback_t cb,void * cookie)156 void dcp_push(struct apple_dcp *dcp, bool oob, const struct dcp_method_entry *call,
157 u32 in_len, u32 out_len, void *data, dcp_callback_t cb,
158 void *cookie)
159 {
160 enum dcp_context_id context = dcp_call_context(dcp, oob);
161 struct dcp_channel *ch = dcp_get_channel(dcp, context);
162
163 struct dcp_packet_header header = {
164 .in_len = in_len,
165 .out_len = out_len,
166
167 /* Tag is reversed due to endianness of the fourcc */
168 .tag[0] = call->tag[3],
169 .tag[1] = call->tag[2],
170 .tag[2] = call->tag[1],
171 .tag[3] = call->tag[0],
172 };
173
174 u8 depth = dcp_push_depth(&ch->depth);
175 u16 offset = dcp_packet_start(ch, depth);
176
177 void *out = dcp->shmem + dcp_tx_offset(context) + offset;
178 void *out_data = out + sizeof(header);
179 size_t data_len = sizeof(header) + in_len + out_len;
180
181 memcpy(out, &header, sizeof(header));
182
183 if (in_len > 0)
184 memcpy(out_data, data, in_len);
185
186 trace_iomfb_push(dcp, call, context, offset, depth);
187
188 ch->callbacks[depth] = cb;
189 ch->cookies[depth] = cookie;
190 ch->output[depth] = out + sizeof(header) + in_len;
191 ch->end[depth] = offset + ALIGN(data_len, DCP_PACKET_ALIGNMENT);
192
193 dcp_send_message(dcp, IOMFB_ENDPOINT,
194 dcpep_msg(context, data_len, offset));
195 }
196
197 /* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */
dcp_parse_tag(char tag[4])198 int dcp_parse_tag(char tag[4])
199 {
200 u32 d[3];
201 int i;
202
203 if (tag[3] != 'D')
204 return -EINVAL;
205
206 for (i = 0; i < 3; ++i) {
207 d[i] = (u32)(tag[i] - '0');
208
209 if (d[i] > 9)
210 return -EINVAL;
211 }
212
213 return d[0] + (d[1] * 10) + (d[2] * 100);
214 }
215
216 /* Ack a callback from the DCP */
dcp_ack(struct apple_dcp * dcp,enum dcp_context_id context)217 void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context)
218 {
219 struct dcp_channel *ch = dcp_get_channel(dcp, context);
220
221 dcp_pop_depth(&ch->depth);
222 dcp_send_message(dcp, IOMFB_ENDPOINT,
223 dcpep_ack(context));
224 }
225
226 /*
227 * Helper to send a DRM hotplug event. The DCP is accessed from a single
228 * (RTKit) thread. To handle hotplug callbacks, we need to call
229 * drm_kms_helper_hotplug_event, which does an atomic commit (via DCP) and
230 * waits for vblank (a DCP callback). That means we deadlock if we call from
231 * the RTKit thread! Instead, move the call to another thread via a workqueue.
232 */
dcp_hotplug(struct work_struct * work)233 void dcp_hotplug(struct work_struct *work)
234 {
235 struct apple_connector *connector;
236 struct apple_dcp *dcp;
237
238 connector = container_of(work, struct apple_connector, hotplug_wq);
239
240 dcp = platform_get_drvdata(connector->dcp);
241 dev_info(dcp->dev, "%s() connected:%d valid_mode:%d nr_modes:%u\n", __func__,
242 connector->connected, dcp->valid_mode, dcp->nr_modes);
243
244 /*
245 * DCP defers link training until we set a display mode. But we set
246 * display modes from atomic_flush, so userspace needs to trigger a
247 * flush, or the CRTC gets no signal.
248 */
249 if (connector->base.state && !dcp->valid_mode && connector->connected)
250 drm_connector_set_link_status_property(&connector->base,
251 DRM_MODE_LINK_STATUS_BAD);
252
253 drm_kms_helper_connector_hotplug_event(&connector->base);
254 }
255 EXPORT_SYMBOL_GPL(dcp_hotplug);
256
dcpep_handle_cb(struct apple_dcp * dcp,enum dcp_context_id context,void * data,u32 length,u16 offset)257 static void dcpep_handle_cb(struct apple_dcp *dcp, enum dcp_context_id context,
258 void *data, u32 length, u16 offset)
259 {
260 struct device *dev = dcp->dev;
261 struct dcp_packet_header *hdr = data;
262 void *in, *out;
263 int tag = dcp_parse_tag(hdr->tag);
264 struct dcp_channel *ch = dcp_get_channel(dcp, context);
265 u8 depth;
266
267 if (tag < 0 || tag >= IOMFB_MAX_CB || !dcp->cb_handlers || !dcp->cb_handlers[tag]) {
268 dev_warn(dev, "received unknown callback %c%c%c%c\n",
269 hdr->tag[3], hdr->tag[2], hdr->tag[1], hdr->tag[0]);
270 return;
271 }
272
273 in = data + sizeof(*hdr);
274 out = in + hdr->in_len;
275
276 // TODO: verify that in_len and out_len match our prototypes
277 // for now just clear the out data to have at least consistent results
278 if (hdr->out_len)
279 memset(out, 0, hdr->out_len);
280
281 depth = dcp_push_depth(&ch->depth);
282 ch->output[depth] = out;
283 ch->end[depth] = offset + ALIGN(length, DCP_PACKET_ALIGNMENT);
284
285 if (dcp->cb_handlers[tag](dcp, tag, out, in))
286 dcp_ack(dcp, context);
287 }
288
dcpep_handle_ack(struct apple_dcp * dcp,enum dcp_context_id context,void * data,u32 length)289 static void dcpep_handle_ack(struct apple_dcp *dcp, enum dcp_context_id context,
290 void *data, u32 length)
291 {
292 struct dcp_packet_header *header = data;
293 struct dcp_channel *ch = dcp_get_channel(dcp, context);
294 void *cookie;
295 dcp_callback_t cb;
296
297 if (!ch) {
298 dev_warn(dcp->dev, "ignoring ack on context %X\n", context);
299 return;
300 }
301
302 dcp_pop_depth(&ch->depth);
303
304 cb = ch->callbacks[ch->depth];
305 cookie = ch->cookies[ch->depth];
306
307 ch->callbacks[ch->depth] = NULL;
308 ch->cookies[ch->depth] = NULL;
309
310 if (cb)
311 cb(dcp, data + sizeof(*header) + header->in_len, cookie);
312 }
313
dcpep_got_msg(struct apple_dcp * dcp,u64 message)314 static void dcpep_got_msg(struct apple_dcp *dcp, u64 message)
315 {
316 enum dcp_context_id ctx_id;
317 u16 offset;
318 u32 length;
319 int channel_offset;
320 void *data;
321
322 ctx_id = FIELD_GET(IOMFB_MSG_CONTEXT, message);
323 offset = FIELD_GET(IOMFB_MSG_OFFSET, message);
324 length = FIELD_GET(IOMFB_MSG_LENGTH, message);
325
326 channel_offset = dcp_channel_offset(ctx_id);
327
328 if (channel_offset < 0) {
329 dev_warn(dcp->dev, "invalid context received %u\n", ctx_id);
330 return;
331 }
332
333 data = dcp->shmem + channel_offset + offset;
334
335 if (FIELD_GET(IOMFB_MSG_ACK, message))
336 dcpep_handle_ack(dcp, ctx_id, data, length);
337 else
338 dcpep_handle_cb(dcp, ctx_id, data, length, offset);
339 }
340
341 /*
342 * DRM specifies rectangles as start and end coordinates. DCP specifies
343 * rectangles as a start coordinate and a width/height. Convert a DRM rectangle
344 * to a DCP rectangle.
345 */
drm_to_dcp_rect(struct drm_rect * rect)346 struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect)
347 {
348 return (struct dcp_rect){ .x = rect->x1,
349 .y = rect->y1,
350 .w = drm_rect_width(rect),
351 .h = drm_rect_height(rect) };
352 }
353
drm_format_to_dcp(u32 drm)354 u32 drm_format_to_dcp(u32 drm)
355 {
356 switch (drm) {
357 case DRM_FORMAT_XRGB8888:
358 case DRM_FORMAT_ARGB8888:
359 return fourcc_code('A', 'R', 'G', 'B');
360
361 case DRM_FORMAT_XBGR8888:
362 case DRM_FORMAT_ABGR8888:
363 return fourcc_code('A', 'B', 'G', 'R');
364
365 case DRM_FORMAT_XRGB2101010:
366 return fourcc_code('r', '0', '3', 'w');
367 }
368
369 pr_warn("DRM format %X not supported in DCP\n", drm);
370 return 0;
371 }
372
dcp_get_modes(struct drm_connector * connector)373 int dcp_get_modes(struct drm_connector *connector)
374 {
375 struct apple_connector *apple_connector = to_apple_connector(connector);
376 struct platform_device *pdev = apple_connector->dcp;
377 struct apple_dcp *dcp = platform_get_drvdata(pdev);
378
379 struct drm_device *dev = connector->dev;
380 struct drm_display_mode *mode;
381 int i;
382
383 for (i = 0; i < dcp->nr_modes; ++i) {
384 mode = drm_mode_duplicate(dev, &dcp->modes[i].mode);
385
386 if (!mode) {
387 dev_err(dev->dev, "Failed to duplicate display mode\n");
388 return 0;
389 }
390
391 drm_mode_probed_add(connector, mode);
392 }
393
394 return dcp->nr_modes;
395 }
396 EXPORT_SYMBOL_GPL(dcp_get_modes);
397
398 /* The user may own drm_display_mode, so we need to search for our copy */
lookup_mode(struct apple_dcp * dcp,const struct drm_display_mode * mode)399 struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp,
400 const struct drm_display_mode *mode)
401 {
402 int i;
403
404 for (i = 0; i < dcp->nr_modes; ++i) {
405 if (drm_mode_match(mode, &dcp->modes[i].mode,
406 DRM_MODE_MATCH_TIMINGS |
407 DRM_MODE_MATCH_CLOCK))
408 return &dcp->modes[i];
409 }
410
411 return NULL;
412 }
413
dcp_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)414 int dcp_mode_valid(struct drm_connector *connector,
415 struct drm_display_mode *mode)
416 {
417 struct apple_connector *apple_connector = to_apple_connector(connector);
418 struct platform_device *pdev = apple_connector->dcp;
419 struct apple_dcp *dcp = platform_get_drvdata(pdev);
420
421 return lookup_mode(dcp, mode) ? MODE_OK : MODE_BAD;
422 }
423 EXPORT_SYMBOL_GPL(dcp_mode_valid);
424
dcp_crtc_atomic_modeset(struct drm_crtc * crtc,struct drm_atomic_state * state)425 int dcp_crtc_atomic_modeset(struct drm_crtc *crtc,
426 struct drm_atomic_state *state)
427 {
428 struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
429 struct apple_dcp *dcp = platform_get_drvdata(apple_crtc->dcp);
430 struct drm_crtc_state *crtc_state;
431 int ret = -EIO;
432 bool modeset;
433
434 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
435 if (!crtc_state)
436 return 0;
437
438 modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
439
440 if (!modeset)
441 return 0;
442
443 /* ignore no mode, poweroff is handled elsewhere */
444 if (crtc_state->mode.hdisplay == 0 && crtc_state->mode.vdisplay == 0)
445 return 0;
446
447 switch (dcp->fw_compat) {
448 case DCP_FIRMWARE_V_12_3:
449 ret = iomfb_modeset_v12_3(dcp, crtc_state);
450 break;
451 case DCP_FIRMWARE_V_13_5:
452 ret = iomfb_modeset_v13_3(dcp, crtc_state);
453 break;
454 default:
455 WARN_ONCE(true, "Unexpected firmware version: %u\n",
456 dcp->fw_compat);
457 break;
458 }
459
460 return ret;
461 }
462 EXPORT_SYMBOL_GPL(dcp_crtc_atomic_modeset);
463
dcp_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)464 bool dcp_crtc_mode_fixup(struct drm_crtc *crtc,
465 const struct drm_display_mode *mode,
466 struct drm_display_mode *adjusted_mode)
467 {
468 struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
469 struct platform_device *pdev = apple_crtc->dcp;
470 struct apple_dcp *dcp = platform_get_drvdata(pdev);
471
472 /* TODO: support synthesized modes through scaling */
473 return lookup_mode(dcp, mode) != NULL;
474 }
475 EXPORT_SYMBOL(dcp_crtc_mode_fixup);
476
477
dcp_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)478 void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
479 {
480 struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
481 struct apple_dcp *dcp = platform_get_drvdata(pdev);
482
483 if (dcp_channel_busy(&dcp->ch_cmd))
484 {
485 dev_err(dcp->dev, "unexpected busy command channel\n");
486 /* HACK: issue a delayed vblank event to avoid timeouts in
487 * drm_atomic_helper_wait_for_vblanks().
488 */
489 schedule_work(&dcp->vblank_wq);
490 return;
491 }
492
493 switch (dcp->fw_compat) {
494 case DCP_FIRMWARE_V_12_3:
495 iomfb_flush_v12_3(dcp, crtc, state);
496 break;
497 case DCP_FIRMWARE_V_13_5:
498 iomfb_flush_v13_3(dcp, crtc, state);
499 break;
500 default:
501 WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
502 break;
503 }
504 }
505 EXPORT_SYMBOL_GPL(dcp_flush);
506
iomfb_start(struct apple_dcp * dcp)507 static void iomfb_start(struct apple_dcp *dcp)
508 {
509 switch (dcp->fw_compat) {
510 case DCP_FIRMWARE_V_12_3:
511 iomfb_start_v12_3(dcp);
512 break;
513 case DCP_FIRMWARE_V_13_5:
514 iomfb_start_v13_3(dcp);
515 break;
516 default:
517 WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
518 break;
519 }
520 }
521
dcp_is_initialized(struct platform_device * pdev)522 bool dcp_is_initialized(struct platform_device *pdev)
523 {
524 struct apple_dcp *dcp = platform_get_drvdata(pdev);
525
526 return dcp->active;
527 }
528 EXPORT_SYMBOL_GPL(dcp_is_initialized);
529
iomfb_recv_msg(struct apple_dcp * dcp,u64 message)530 void iomfb_recv_msg(struct apple_dcp *dcp, u64 message)
531 {
532 enum dcpep_type type = FIELD_GET(IOMFB_MESSAGE_TYPE, message);
533
534 if (type == IOMFB_MESSAGE_TYPE_INITIALIZED)
535 iomfb_start(dcp);
536 else if (type == IOMFB_MESSAGE_TYPE_MSG)
537 dcpep_got_msg(dcp, message);
538 else
539 dev_warn(dcp->dev, "Ignoring unknown message %llx\n", message);
540 }
541
iomfb_start_rtkit(struct apple_dcp * dcp)542 int iomfb_start_rtkit(struct apple_dcp *dcp)
543 {
544 dma_addr_t shmem_iova;
545 apple_rtkit_start_ep(dcp->rtk, IOMFB_ENDPOINT);
546
547 dcp->shmem = dma_alloc_coherent(dcp->dev, DCP_SHMEM_SIZE, &shmem_iova,
548 GFP_KERNEL);
549
550 dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_set_shmem(shmem_iova));
551
552 return 0;
553 }
554
iomfb_shutdown(struct apple_dcp * dcp)555 void iomfb_shutdown(struct apple_dcp *dcp)
556 {
557 /* We're going down */
558 dcp->active = false;
559 dcp->valid_mode = false;
560
561 switch (dcp->fw_compat) {
562 case DCP_FIRMWARE_V_12_3:
563 iomfb_shutdown_v12_3(dcp);
564 break;
565 case DCP_FIRMWARE_V_13_5:
566 iomfb_shutdown_v13_3(dcp);
567 break;
568 default:
569 WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
570 break;
571 }
572 }
573