1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3 * Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Copyright The Asahi Linux Contributors
5 */
6
7 #include <linux/align.h>
8 #include <linux/bitmap.h>
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19
20 #include <drm/drm_fb_dma_helper.h>
21 #include <drm/drm_fourcc.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_gem_dma_helper.h>
24 #include <drm/drm_probe_helper.h>
25 #include <drm/drm_vblank.h>
26
27 #include "dcp.h"
28 #include "dcp-internal.h"
29 #include "iomfb.h"
30 #include "iomfb_internal.h"
31 #include "parser.h"
32 #include "trace.h"
33 #include "version_utils.h"
34
35 /* Register defines used in bandwidth setup structure */
36 #define REG_DOORBELL_BIT(idx) (2 + (idx))
37
38 struct dcp_wait_cookie {
39 struct kref refcount;
40 struct completion done;
41 };
42
release_wait_cookie(struct kref * ref)43 static void release_wait_cookie(struct kref *ref)
44 {
45 struct dcp_wait_cookie *cookie;
46 cookie = container_of(ref, struct dcp_wait_cookie, refcount);
47
48 kfree(cookie);
49 }
50
51 DCP_THUNK_OUT(iomfb_a131_pmu_service_matched, iomfbep_a131_pmu_service_matched, u32);
52 DCP_THUNK_OUT(iomfb_a132_backlight_service_matched, iomfbep_a132_backlight_service_matched, u32);
53 DCP_THUNK_OUT(iomfb_a358_vi_set_temperature_hint, iomfbep_a358_vi_set_temperature_hint, u32);
54
55 IOMFB_THUNK_INOUT(set_matrix);
56 IOMFB_THUNK_INOUT(get_color_remap_mode);
57 IOMFB_THUNK_INOUT(last_client_close);
58 IOMFB_THUNK_INOUT(abort_swaps_dcp);
59
60 DCP_THUNK_INOUT(dcp_swap_submit, dcpep_swap_submit,
61 struct DCP_FW_NAME(dcp_swap_submit_req),
62 struct DCP_FW_NAME(dcp_swap_submit_resp));
63
64 DCP_THUNK_INOUT(dcp_swap_start, dcpep_swap_start, struct dcp_swap_start_req,
65 struct dcp_swap_start_resp);
66
67 DCP_THUNK_INOUT(dcp_set_power_state, dcpep_set_power_state,
68 struct dcp_set_power_state_req,
69 struct dcp_set_power_state_resp);
70
71 DCP_THUNK_INOUT(dcp_set_digital_out_mode, dcpep_set_digital_out_mode,
72 struct dcp_set_digital_out_mode_req, u32);
73
74 DCP_THUNK_INOUT(dcp_set_display_device, dcpep_set_display_device, u32, u32);
75
76 DCP_THUNK_OUT(dcp_set_display_refresh_properties,
77 dcpep_set_display_refresh_properties, u32);
78
79 #if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
80 DCP_THUNK_INOUT(dcp_late_init_signal, dcpep_late_init_signal, u32, u32);
81 #else
82 DCP_THUNK_OUT(dcp_late_init_signal, dcpep_late_init_signal, u32);
83 #endif
84 DCP_THUNK_IN(dcp_flush_supports_power, dcpep_flush_supports_power, u32);
85 DCP_THUNK_OUT(dcp_create_default_fb, dcpep_create_default_fb, u32);
86 DCP_THUNK_OUT(dcp_start_signal, dcpep_start_signal, u32);
87 DCP_THUNK_VOID(dcp_setup_video_limits, dcpep_setup_video_limits);
88 DCP_THUNK_VOID(dcp_set_create_dfb, dcpep_set_create_dfb);
89 DCP_THUNK_VOID(dcp_first_client_open, dcpep_first_client_open);
90
91 DCP_THUNK_INOUT(dcp_set_parameter_dcp, dcpep_set_parameter_dcp,
92 struct dcp_set_parameter_dcp, u32);
93
94 DCP_THUNK_INOUT(dcp_enable_disable_video_power_savings,
95 dcpep_enable_disable_video_power_savings, u32, int);
96
97 DCP_THUNK_OUT(dcp_is_main_display, dcpep_is_main_display, u32);
98
99 /* DCP callback handlers */
dcpep_cb_nop(struct apple_dcp * dcp)100 static void dcpep_cb_nop(struct apple_dcp *dcp)
101 {
102 /* No operation */
103 }
104
dcpep_cb_true(struct apple_dcp * dcp)105 static u8 dcpep_cb_true(struct apple_dcp *dcp)
106 {
107 return true;
108 }
109
dcpep_cb_false(struct apple_dcp * dcp)110 static u8 dcpep_cb_false(struct apple_dcp *dcp)
111 {
112 return false;
113 }
114
dcpep_cb_zero(struct apple_dcp * dcp)115 static u32 dcpep_cb_zero(struct apple_dcp *dcp)
116 {
117 return 0;
118 }
119
dcpep_cb_swap_complete(struct apple_dcp * dcp,struct DCP_FW_NAME (dc_swap_complete_resp)* resp)120 static void dcpep_cb_swap_complete(struct apple_dcp *dcp,
121 struct DCP_FW_NAME(dc_swap_complete_resp) *resp)
122 {
123 trace_iomfb_swap_complete(dcp, resp->swap_id);
124 dcp->last_swap_id = resp->swap_id;
125
126 dcp_drm_crtc_vblank(dcp->crtc);
127 }
128
129 /* special */
complete_vi_set_temperature_hint(struct apple_dcp * dcp,void * out,void * cookie)130 static void complete_vi_set_temperature_hint(struct apple_dcp *dcp, void *out, void *cookie)
131 {
132 // ack D100 cb_match_pmu_service
133 dcp_ack(dcp, DCP_CONTEXT_CB);
134 }
135
iomfbep_cb_match_pmu_service(struct apple_dcp * dcp,int tag,void * out,void * in)136 static bool iomfbep_cb_match_pmu_service(struct apple_dcp *dcp, int tag, void *out, void *in)
137 {
138 trace_iomfb_callback(dcp, tag, __func__);
139 iomfb_a358_vi_set_temperature_hint(dcp, false,
140 complete_vi_set_temperature_hint,
141 NULL);
142
143 // return false for deferred ACK
144 return false;
145 }
146
complete_pmu_service_matched(struct apple_dcp * dcp,void * out,void * cookie)147 static void complete_pmu_service_matched(struct apple_dcp *dcp, void *out, void *cookie)
148 {
149 struct dcp_channel *ch = &dcp->ch_cb;
150 u8 *succ = ch->output[ch->depth - 1];
151
152 *succ = true;
153
154 // ack D206 cb_match_pmu_service_2
155 dcp_ack(dcp, DCP_CONTEXT_CB);
156 }
157
iomfbep_cb_match_pmu_service_2(struct apple_dcp * dcp,int tag,void * out,void * in)158 static bool iomfbep_cb_match_pmu_service_2(struct apple_dcp *dcp, int tag, void *out, void *in)
159 {
160 trace_iomfb_callback(dcp, tag, __func__);
161
162 iomfb_a131_pmu_service_matched(dcp, false, complete_pmu_service_matched,
163 out);
164
165 // return false for deferred ACK
166 return false;
167 }
168
complete_backlight_service_matched(struct apple_dcp * dcp,void * out,void * cookie)169 static void complete_backlight_service_matched(struct apple_dcp *dcp, void *out, void *cookie)
170 {
171 struct dcp_channel *ch = &dcp->ch_cb;
172 u8 *succ = ch->output[ch->depth - 1];
173
174 *succ = true;
175
176 // ack D206 cb_match_backlight_service
177 dcp_ack(dcp, DCP_CONTEXT_CB);
178 }
179
iomfbep_cb_match_backlight_service(struct apple_dcp * dcp,int tag,void * out,void * in)180 static bool iomfbep_cb_match_backlight_service(struct apple_dcp *dcp, int tag, void *out, void *in)
181 {
182 trace_iomfb_callback(dcp, tag, __func__);
183
184 if (!dcp_has_panel(dcp)) {
185 u8 *succ = out;
186 *succ = true;
187 return true;
188 }
189
190 iomfb_a132_backlight_service_matched(dcp, false, complete_backlight_service_matched, out);
191
192 // return false for deferred ACK
193 return false;
194 }
195
iomfb_cb_pr_publish(struct apple_dcp * dcp,struct iomfb_property * prop)196 static void iomfb_cb_pr_publish(struct apple_dcp *dcp, struct iomfb_property *prop)
197 {
198 switch (prop->id) {
199 case IOMFB_PROPERTY_NITS:
200 {
201 if (dcp_has_panel(dcp)) {
202 dcp->brightness.nits = prop->value / dcp->brightness.scale;
203 /* notify backlight device of the initial brightness */
204 if (!dcp->brightness.bl_dev && dcp->brightness.maximum > 0)
205 schedule_work(&dcp->bl_register_wq);
206 trace_iomfb_brightness(dcp, prop->value);
207 }
208 break;
209 }
210 default:
211 dev_dbg(dcp->dev, "pr_publish: id: %d = %u\n", prop->id, prop->value);
212 }
213 }
214
215 static struct dcp_get_uint_prop_resp
dcpep_cb_get_uint_prop(struct apple_dcp * dcp,struct dcp_get_uint_prop_req * req)216 dcpep_cb_get_uint_prop(struct apple_dcp *dcp, struct dcp_get_uint_prop_req *req)
217 {
218 struct dcp_get_uint_prop_resp resp = (struct dcp_get_uint_prop_resp){
219 .value = 0
220 };
221
222 if (dcp->panel.has_mini_led &&
223 memcmp(req->obj, "SUMP", sizeof(req->obj)) == 0) { /* "PMUS */
224 if (strncmp(req->key, "Temperature", sizeof(req->key)) == 0) {
225 /*
226 * TODO: value from j314c, find out if it is temperature in
227 * centigrade C and which temperature sensor reports it
228 */
229 resp.value = 3029;
230 resp.ret = true;
231 }
232 }
233
234 return resp;
235 }
236
iomfbep_cb_sr_set_property_int(struct apple_dcp * dcp,struct iomfb_sr_set_property_int_req * req)237 static u8 iomfbep_cb_sr_set_property_int(struct apple_dcp *dcp,
238 struct iomfb_sr_set_property_int_req *req)
239 {
240 if (memcmp(req->obj, "FMOI", sizeof(req->obj)) == 0) { /* "IOMF */
241 if (strncmp(req->key, "Brightness_Scale", sizeof(req->key)) == 0) {
242 if (!req->value_null)
243 dcp->brightness.scale = req->value;
244 }
245 }
246
247 return 1;
248 }
249
iomfbep_cb_set_fx_prop(struct apple_dcp * dcp,struct iomfb_set_fx_prop_req * req)250 static void iomfbep_cb_set_fx_prop(struct apple_dcp *dcp, struct iomfb_set_fx_prop_req *req)
251 {
252 // TODO: trace this, see if there properties which needs to used later
253 }
254
255 /*
256 * Callback to map a buffer allocated with allocate_buf for PIODMA usage.
257 * PIODMA is separate from the main DCP and uses own IOVA space on a dedicated
258 * stream of the display DART, rather than the expected DCP DART.
259 */
dcpep_cb_map_piodma(struct apple_dcp * dcp,struct dcp_map_buf_req * req)260 static struct dcp_map_buf_resp dcpep_cb_map_piodma(struct apple_dcp *dcp,
261 struct dcp_map_buf_req *req)
262 {
263 struct dcp_mem_descriptor *memdesc;
264 struct sg_table *map;
265 ssize_t ret;
266
267 if (req->buffer >= ARRAY_SIZE(dcp->memdesc))
268 goto reject;
269
270 memdesc = &dcp->memdesc[req->buffer];
271 map = &memdesc->map;
272
273 if (!map->sgl)
274 goto reject;
275
276 /* use the piodma iommu domain to map against the right IOMMU */
277 ret = iommu_map_sgtable(dcp->iommu_dom, memdesc->dva, map,
278 IOMMU_READ | IOMMU_WRITE);
279
280 /* HACK: expect size to be 16K aligned since the iommu API only maps
281 * full pages
282 */
283 if (ret < 0 || ret != ALIGN(memdesc->size, SZ_16K)) {
284 dev_err(dcp->dev, "iommu_map_sgtable() returned %zd instead of expected buffer size of %zu\n", ret, memdesc->size);
285 goto reject;
286 }
287
288 return (struct dcp_map_buf_resp){ .dva = memdesc->dva };
289
290 reject:
291 dev_err(dcp->dev, "denying map of invalid buffer %llx for pidoma\n",
292 req->buffer);
293 return (struct dcp_map_buf_resp){ .ret = EINVAL };
294 }
295
dcpep_cb_unmap_piodma(struct apple_dcp * dcp,struct dcp_unmap_buf_resp * resp)296 static void dcpep_cb_unmap_piodma(struct apple_dcp *dcp,
297 struct dcp_unmap_buf_resp *resp)
298 {
299 struct dcp_mem_descriptor *memdesc;
300
301 if (resp->buffer >= ARRAY_SIZE(dcp->memdesc)) {
302 dev_warn(dcp->dev, "unmap request for out of range buffer %llu\n",
303 resp->buffer);
304 return;
305 }
306
307 memdesc = &dcp->memdesc[resp->buffer];
308
309 if (!memdesc->buf) {
310 dev_warn(dcp->dev,
311 "unmap for non-mapped buffer %llu iova:0x%08llx\n",
312 resp->buffer, resp->dva);
313 return;
314 }
315
316 if (memdesc->dva != resp->dva) {
317 dev_warn(dcp->dev, "unmap buffer %llu address mismatch "
318 "memdesc.dva:%llx dva:%llx\n", resp->buffer,
319 memdesc->dva, resp->dva);
320 return;
321 }
322
323 /* use the piodma iommu domain to unmap from the right IOMMU */
324 iommu_unmap(dcp->iommu_dom, memdesc->dva, memdesc->size);
325 }
326
327 /*
328 * Allocate an IOVA contiguous buffer mapped to the DCP. The buffer need not be
329 * physically contiguous, however we should save the sgtable in case the
330 * buffer needs to be later mapped for PIODMA.
331 */
332 static struct dcp_allocate_buffer_resp
dcpep_cb_allocate_buffer(struct apple_dcp * dcp,struct dcp_allocate_buffer_req * req)333 dcpep_cb_allocate_buffer(struct apple_dcp *dcp,
334 struct dcp_allocate_buffer_req *req)
335 {
336 struct dcp_allocate_buffer_resp resp = { 0 };
337 struct dcp_mem_descriptor *memdesc;
338 size_t size;
339 u32 id;
340
341 resp.dva_size = ALIGN(req->size, 4096);
342 resp.mem_desc_id =
343 find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
344
345 if (resp.mem_desc_id >= DCP_MAX_MAPPINGS) {
346 dev_warn(dcp->dev, "DCP overflowed mapping table, ignoring\n");
347 resp.dva_size = 0;
348 resp.mem_desc_id = 0;
349 return resp;
350 }
351 id = resp.mem_desc_id;
352 set_bit(id, dcp->memdesc_map);
353
354 memdesc = &dcp->memdesc[id];
355
356 memdesc->size = resp.dva_size;
357 /* HACK: align size to 16K since the iommu API only maps full pages */
358 size = ALIGN(resp.dva_size, SZ_16K);
359 memdesc->buf = dma_alloc_coherent(dcp->dev, size,
360 &memdesc->dva, GFP_KERNEL);
361
362 dma_get_sgtable(dcp->dev, &memdesc->map, memdesc->buf, memdesc->dva,
363 size);
364 resp.dva = memdesc->dva;
365
366 return resp;
367 }
368
dcpep_cb_release_mem_desc(struct apple_dcp * dcp,u32 * mem_desc_id)369 static u8 dcpep_cb_release_mem_desc(struct apple_dcp *dcp, u32 *mem_desc_id)
370 {
371 struct dcp_mem_descriptor *memdesc;
372 u32 id = *mem_desc_id;
373
374 if (id >= DCP_MAX_MAPPINGS) {
375 dev_warn(dcp->dev,
376 "unmap request for out of range mem_desc_id %u", id);
377 return 0;
378 }
379
380 if (!test_and_clear_bit(id, dcp->memdesc_map)) {
381 dev_warn(dcp->dev, "unmap request for unused mem_desc_id %u\n",
382 id);
383 return 0;
384 }
385
386 memdesc = &dcp->memdesc[id];
387 if (memdesc->buf) {
388 dma_free_coherent(dcp->dev, memdesc->size, memdesc->buf,
389 memdesc->dva);
390
391 memdesc->buf = NULL;
392 memset(&memdesc->map, 0, sizeof(memdesc->map));
393 } else {
394 memdesc->reg = 0;
395 }
396
397 memdesc->size = 0;
398
399 return 1;
400 }
401
402 /* Validate that the specified region is a display register */
is_disp_register(struct apple_dcp * dcp,u64 start,u64 end)403 static bool is_disp_register(struct apple_dcp *dcp, u64 start, u64 end)
404 {
405 int i;
406
407 for (i = 0; i < dcp->nr_disp_registers; ++i) {
408 struct resource *r = dcp->disp_registers[i];
409
410 if ((start >= r->start) && (end <= r->end))
411 return true;
412 }
413
414 return false;
415 }
416
417 /*
418 * Map contiguous physical memory into the DCP's address space. The firmware
419 * uses this to map the display registers we advertise in
420 * sr_map_device_memory_with_index, so we bounds check against that to guard
421 * safe against malicious coprocessors.
422 */
423 static struct dcp_map_physical_resp
dcpep_cb_map_physical(struct apple_dcp * dcp,struct dcp_map_physical_req * req)424 dcpep_cb_map_physical(struct apple_dcp *dcp, struct dcp_map_physical_req *req)
425 {
426 int size = ALIGN(req->size, 4096);
427 dma_addr_t dva;
428 u32 id;
429
430 if (!is_disp_register(dcp, req->paddr, req->paddr + size - 1)) {
431 dev_err(dcp->dev, "refusing to map phys address %llx size %llx\n",
432 req->paddr, req->size);
433 return (struct dcp_map_physical_resp){};
434 }
435
436 id = find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
437 set_bit(id, dcp->memdesc_map);
438 dcp->memdesc[id].size = size;
439 dcp->memdesc[id].reg = req->paddr;
440
441 dva = dma_map_resource(dcp->dev, req->paddr, size, DMA_BIDIRECTIONAL, 0);
442 WARN_ON(dva == DMA_MAPPING_ERROR);
443
444 return (struct dcp_map_physical_resp){
445 .dva_size = size,
446 .mem_desc_id = id,
447 .dva = dva,
448 };
449 }
450
dcpep_cb_get_frequency(struct apple_dcp * dcp)451 static u64 dcpep_cb_get_frequency(struct apple_dcp *dcp)
452 {
453 return clk_get_rate(dcp->clk);
454 }
455
DCP_FW_NAME(dcp_map_reg_resp)456 static struct DCP_FW_NAME(dcp_map_reg_resp) dcpep_cb_map_reg(struct apple_dcp *dcp,
457 struct DCP_FW_NAME(dcp_map_reg_req) *req)
458 {
459 if (req->index >= dcp->nr_disp_registers) {
460 dev_warn(dcp->dev, "attempted to read invalid reg index %u\n",
461 req->index);
462
463 return (struct DCP_FW_NAME(dcp_map_reg_resp)){ .ret = 1 };
464 } else {
465 struct resource *rsrc = dcp->disp_registers[req->index];
466 #if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
467 dma_addr_t dva = dma_map_resource(dcp->dev, rsrc->start, resource_size(rsrc),
468 DMA_BIDIRECTIONAL, 0);
469 WARN_ON(dva == DMA_MAPPING_ERROR);
470 #endif
471
472 return (struct DCP_FW_NAME(dcp_map_reg_resp)){
473 .addr = rsrc->start,
474 .length = resource_size(rsrc),
475 #if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
476 .dva = dva,
477 #endif
478 };
479 }
480 }
481
482 static struct dcp_read_edt_data_resp
dcpep_cb_read_edt_data(struct apple_dcp * dcp,struct dcp_read_edt_data_req * req)483 dcpep_cb_read_edt_data(struct apple_dcp *dcp, struct dcp_read_edt_data_req *req)
484 {
485 return (struct dcp_read_edt_data_resp){
486 .value[0] = req->value[0],
487 .ret = 0,
488 };
489 }
490
iomfbep_cb_enable_backlight_message_ap_gated(struct apple_dcp * dcp,u8 * enabled)491 static void iomfbep_cb_enable_backlight_message_ap_gated(struct apple_dcp *dcp,
492 u8 *enabled)
493 {
494 /*
495 * update backlight brightness on next swap, on non mini-LED displays
496 * DCP seems to set an invalid iDAC value after coming out of DPMS.
497 * syslog: "[BrightnessLCD.cpp:743][AFK]nitsToDBV: iDAC out of range"
498 */
499 dcp->brightness.update = true;
500 schedule_work(&dcp->bl_update_wq);
501 }
502
503 /* Chunked data transfer for property dictionaries */
dcpep_cb_prop_start(struct apple_dcp * dcp,u32 * length)504 static u8 dcpep_cb_prop_start(struct apple_dcp *dcp, u32 *length)
505 {
506 if (dcp->chunks.data != NULL) {
507 dev_warn(dcp->dev, "ignoring spurious transfer start\n");
508 return false;
509 }
510
511 dcp->chunks.length = *length;
512 dcp->chunks.data = devm_kzalloc(dcp->dev, *length, GFP_KERNEL);
513
514 if (!dcp->chunks.data) {
515 dev_warn(dcp->dev, "failed to allocate chunks\n");
516 return false;
517 }
518
519 return true;
520 }
521
dcpep_cb_prop_chunk(struct apple_dcp * dcp,struct dcp_set_dcpav_prop_chunk_req * req)522 static u8 dcpep_cb_prop_chunk(struct apple_dcp *dcp,
523 struct dcp_set_dcpav_prop_chunk_req *req)
524 {
525 if (!dcp->chunks.data) {
526 dev_warn(dcp->dev, "ignoring spurious chunk\n");
527 return false;
528 }
529
530 if (req->offset + req->length > dcp->chunks.length) {
531 dev_warn(dcp->dev, "ignoring overflowing chunk\n");
532 return false;
533 }
534
535 memcpy(dcp->chunks.data + req->offset, req->data, req->length);
536 return true;
537 }
538
dcpep_process_chunks(struct apple_dcp * dcp,struct dcp_set_dcpav_prop_end_req * req)539 static bool dcpep_process_chunks(struct apple_dcp *dcp,
540 struct dcp_set_dcpav_prop_end_req *req)
541 {
542 struct dcp_parse_ctx ctx;
543 int ret;
544
545 if (!dcp->chunks.data) {
546 dev_warn(dcp->dev, "ignoring spurious end\n");
547 return false;
548 }
549
550 /* used just as opaque pointer for tracing */
551 ctx.dcp = dcp;
552
553 ret = parse(dcp->chunks.data, dcp->chunks.length, &ctx);
554
555 if (ret) {
556 dev_warn(dcp->dev, "bad header on dcpav props\n");
557 return false;
558 }
559
560 if (!strcmp(req->key, "TimingElements")) {
561 dcp->modes = enumerate_modes(&ctx, &dcp->nr_modes,
562 dcp->width_mm, dcp->height_mm,
563 dcp->notch_height);
564
565 if (IS_ERR(dcp->modes)) {
566 dev_warn(dcp->dev, "failed to parse modes\n");
567 dcp->modes = NULL;
568 dcp->nr_modes = 0;
569 return false;
570 }
571 if (dcp->nr_modes == 0)
572 dev_warn(dcp->dev, "TimingElements without valid modes!\n");
573 } else if (!strcmp(req->key, "DisplayAttributes")) {
574 ret = parse_display_attributes(&ctx, &dcp->width_mm,
575 &dcp->height_mm);
576
577 if (ret) {
578 dev_warn(dcp->dev, "failed to parse display attribs\n");
579 return false;
580 }
581
582 dcp_set_dimensions(dcp);
583 }
584
585 return true;
586 }
587
dcpep_cb_prop_end(struct apple_dcp * dcp,struct dcp_set_dcpav_prop_end_req * req)588 static u8 dcpep_cb_prop_end(struct apple_dcp *dcp,
589 struct dcp_set_dcpav_prop_end_req *req)
590 {
591 u8 resp = dcpep_process_chunks(dcp, req);
592
593 /* Reset for the next transfer */
594 devm_kfree(dcp->dev, dcp->chunks.data);
595 dcp->chunks.data = NULL;
596
597 return resp;
598 }
599
600 /* Boot sequence */
boot_done(struct apple_dcp * dcp,void * out,void * cookie)601 static void boot_done(struct apple_dcp *dcp, void *out, void *cookie)
602 {
603 struct dcp_channel *ch = &dcp->ch_cb;
604 u8 *succ = ch->output[ch->depth - 1];
605 dev_dbg(dcp->dev, "boot done\n");
606
607 *succ = true;
608 dcp_ack(dcp, DCP_CONTEXT_CB);
609 }
610
boot_5(struct apple_dcp * dcp,void * out,void * cookie)611 static void boot_5(struct apple_dcp *dcp, void *out, void *cookie)
612 {
613 dcp_set_display_refresh_properties(dcp, false, boot_done, NULL);
614 }
615
boot_4(struct apple_dcp * dcp,void * out,void * cookie)616 static void boot_4(struct apple_dcp *dcp, void *out, void *cookie)
617 {
618 #if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
619 u32 v_true = 1;
620 dcp_late_init_signal(dcp, false, &v_true, boot_5, NULL);
621 #else
622 dcp_late_init_signal(dcp, false, boot_5, NULL);
623 #endif
624 }
625
boot_3(struct apple_dcp * dcp,void * out,void * cookie)626 static void boot_3(struct apple_dcp *dcp, void *out, void *cookie)
627 {
628 u32 v_true = true;
629
630 dcp_flush_supports_power(dcp, false, &v_true, boot_4, NULL);
631 }
632
boot_2(struct apple_dcp * dcp,void * out,void * cookie)633 static void boot_2(struct apple_dcp *dcp, void *out, void *cookie)
634 {
635 dcp_setup_video_limits(dcp, false, boot_3, NULL);
636 }
637
boot_1_5(struct apple_dcp * dcp,void * out,void * cookie)638 static void boot_1_5(struct apple_dcp *dcp, void *out, void *cookie)
639 {
640 dcp_create_default_fb(dcp, false, boot_2, NULL);
641 }
642
643 /* Use special function signature to defer the ACK */
dcpep_cb_boot_1(struct apple_dcp * dcp,int tag,void * out,void * in)644 static bool dcpep_cb_boot_1(struct apple_dcp *dcp, int tag, void *out, void *in)
645 {
646 trace_iomfb_callback(dcp, tag, __func__);
647 dcp_set_create_dfb(dcp, false, boot_1_5, NULL);
648 return false;
649 }
650
dcpep_cb_allocate_bandwidth(struct apple_dcp * dcp,struct dcp_allocate_bandwidth_req * req)651 static struct dcp_allocate_bandwidth_resp dcpep_cb_allocate_bandwidth(struct apple_dcp *dcp,
652 struct dcp_allocate_bandwidth_req *req)
653 {
654 return (struct dcp_allocate_bandwidth_resp){
655 .unk1 = req->unk1,
656 .unk2 = req->unk2,
657 .ret = 1,
658 };
659 }
660
dcpep_cb_rt_bandwidth(struct apple_dcp * dcp)661 static struct dcp_rt_bandwidth dcpep_cb_rt_bandwidth(struct apple_dcp *dcp)
662 {
663 struct dcp_rt_bandwidth rt_bw = (struct dcp_rt_bandwidth){
664 .reg_scratch = 0,
665 .reg_doorbell = 0,
666 .doorbell_bit = 0,
667 };
668
669 if (dcp->disp_bw_scratch_index) {
670 u32 offset = dcp->disp_bw_scratch_offset;
671 u32 index = dcp->disp_bw_scratch_index;
672 rt_bw.reg_scratch = dcp->disp_registers[index]->start + offset;
673 }
674
675 if (dcp->disp_bw_doorbell_index) {
676 u32 index = dcp->disp_bw_doorbell_index;
677 rt_bw.reg_doorbell = dcp->disp_registers[index]->start;
678 rt_bw.doorbell_bit = REG_DOORBELL_BIT(dcp->index);
679 /*
680 * This is most certainly not padding. t8103-dcp crashes without
681 * setting this immediately during modeset on 12.3 and 13.5
682 * firmware.
683 */
684 rt_bw.padding[3] = 0x4;
685 }
686
687 return rt_bw;
688 }
689
690 static struct dcp_set_frame_sync_props_resp
dcpep_cb_set_frame_sync_props(struct apple_dcp * dcp,struct dcp_set_frame_sync_props_req * req)691 dcpep_cb_set_frame_sync_props(struct apple_dcp *dcp,
692 struct dcp_set_frame_sync_props_req *req)
693 {
694 return (struct dcp_set_frame_sync_props_resp){};
695 }
696
697 /* Callback to get the current time as milliseconds since the UNIX epoch */
dcpep_cb_get_time(struct apple_dcp * dcp)698 static u64 dcpep_cb_get_time(struct apple_dcp *dcp)
699 {
700 return ktime_to_ms(ktime_get_real());
701 }
702
703 struct dcp_swap_cookie {
704 struct kref refcount;
705 struct completion done;
706 u32 swap_id;
707 };
708
release_swap_cookie(struct kref * ref)709 static void release_swap_cookie(struct kref *ref)
710 {
711 struct dcp_swap_cookie *cookie;
712 cookie = container_of(ref, struct dcp_swap_cookie, refcount);
713
714 kfree(cookie);
715 }
716
dcp_swap_cleared(struct apple_dcp * dcp,void * data,void * cookie)717 static void dcp_swap_cleared(struct apple_dcp *dcp, void *data, void *cookie)
718 {
719 struct DCP_FW_NAME(dcp_swap_submit_resp) *resp = data;
720
721 if (cookie) {
722 struct dcp_swap_cookie *info = cookie;
723 complete(&info->done);
724 kref_put(&info->refcount, release_swap_cookie);
725 }
726
727 if (resp->ret) {
728 dev_err(dcp->dev, "swap_clear failed! status %u\n", resp->ret);
729 dcp_drm_crtc_vblank(dcp->crtc);
730 return;
731 }
732
733 while (!list_empty(&dcp->swapped_out_fbs)) {
734 struct dcp_fb_reference *entry;
735 entry = list_first_entry(&dcp->swapped_out_fbs,
736 struct dcp_fb_reference, head);
737 if (entry->swap_id == dcp->last_swap_id)
738 break;
739 if (entry->fb)
740 drm_framebuffer_put(entry->fb);
741 list_del(&entry->head);
742 kfree(entry);
743 }
744 }
745
dcp_swap_clear_started(struct apple_dcp * dcp,void * data,void * cookie)746 static void dcp_swap_clear_started(struct apple_dcp *dcp, void *data,
747 void *cookie)
748 {
749 struct dcp_swap_start_resp *resp = data;
750 DCP_FW_UNION(dcp->swap).swap.swap_id = resp->swap_id;
751
752 if (cookie) {
753 struct dcp_swap_cookie *info = cookie;
754 info->swap_id = resp->swap_id;
755 }
756
757 dcp_swap_submit(dcp, false, &DCP_FW_UNION(dcp->swap), dcp_swap_cleared, cookie);
758 }
759
dcp_on_final(struct apple_dcp * dcp,void * out,void * cookie)760 static void dcp_on_final(struct apple_dcp *dcp, void *out, void *cookie)
761 {
762 struct dcp_wait_cookie *wait = cookie;
763
764 if (wait) {
765 complete(&wait->done);
766 kref_put(&wait->refcount, release_wait_cookie);
767 }
768 }
769
dcp_on_set_power_state(struct apple_dcp * dcp,void * out,void * cookie)770 static void dcp_on_set_power_state(struct apple_dcp *dcp, void *out, void *cookie)
771 {
772 struct dcp_set_power_state_req req = {
773 .unklong = 1,
774 };
775
776 dcp_set_power_state(dcp, false, &req, dcp_on_final, cookie);
777 }
778
dcp_on_set_parameter(struct apple_dcp * dcp,void * out,void * cookie)779 static void dcp_on_set_parameter(struct apple_dcp *dcp, void *out, void *cookie)
780 {
781 struct dcp_set_parameter_dcp param = {
782 .param = 14,
783 .value = { 0 },
784 #if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
785 .count = 3,
786 #else
787 .count = 1,
788 #endif
789 };
790
791 dcp_set_parameter_dcp(dcp, false, ¶m, dcp_on_set_power_state, cookie);
792 }
793
DCP_FW_NAME(iomfb_poweron)794 void DCP_FW_NAME(iomfb_poweron)(struct apple_dcp *dcp)
795 {
796 struct dcp_wait_cookie *cookie;
797 int ret;
798 u32 handle;
799 dev_info(dcp->dev, "dcp_poweron() starting\n");
800
801 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
802 if (!cookie)
803 return;
804
805 init_completion(&cookie->done);
806 kref_init(&cookie->refcount);
807 /* increase refcount to ensure the receiver has a reference */
808 kref_get(&cookie->refcount);
809
810 if (dcp->main_display) {
811 handle = 0;
812 dcp_set_display_device(dcp, false, &handle, dcp_on_set_power_state,
813 cookie);
814 } else {
815 handle = 2;
816 dcp_set_display_device(dcp, false, &handle,
817 dcp_on_set_parameter, cookie);
818 }
819 ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(500));
820
821 if (ret == 0)
822 dev_warn(dcp->dev, "wait for power timed out\n");
823
824 kref_put(&cookie->refcount, release_wait_cookie);;
825
826 /* Force a brightness update after poweron, to restore the brightness */
827 dcp->brightness.update = true;
828 }
829
complete_set_powerstate(struct apple_dcp * dcp,void * out,void * cookie)830 static void complete_set_powerstate(struct apple_dcp *dcp, void *out,
831 void *cookie)
832 {
833 struct dcp_wait_cookie *wait = cookie;
834
835 if (wait) {
836 complete(&wait->done);
837 kref_put(&wait->refcount, release_wait_cookie);
838 }
839 }
840
last_client_closed_poff(struct apple_dcp * dcp,void * out,void * cookie)841 static void last_client_closed_poff(struct apple_dcp *dcp, void *out, void *cookie)
842 {
843 struct dcp_set_power_state_req power_req = {
844 .unklong = 0,
845 };
846 dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate,
847 cookie);
848 }
849
aborted_swaps_dcp_poff(struct apple_dcp * dcp,void * out,void * cookie)850 static void aborted_swaps_dcp_poff(struct apple_dcp *dcp, void *out, void *cookie)
851 {
852 struct iomfb_last_client_close_req last_client_req = {};
853 iomfb_last_client_close(dcp, false, &last_client_req,
854 last_client_closed_poff, cookie);
855 }
856
DCP_FW_NAME(iomfb_poweroff)857 void DCP_FW_NAME(iomfb_poweroff)(struct apple_dcp *dcp)
858 {
859 int ret, swap_id;
860 struct iomfb_abort_swaps_dcp_req abort_req = {
861 .client = {
862 .flag2 = 1,
863 },
864 };
865 struct dcp_swap_cookie *cookie;
866 struct dcp_wait_cookie *poff_cookie;
867 struct dcp_swap_start_req swap_req = { 0 };
868 struct DCP_FW_NAME(dcp_swap_submit_req) *swap = &DCP_FW_UNION(dcp->swap);
869
870 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
871 if (!cookie)
872 return;
873 init_completion(&cookie->done);
874 kref_init(&cookie->refcount);
875 /* increase refcount to ensure the receiver has a reference */
876 kref_get(&cookie->refcount);
877
878 // clear surfaces
879 memset(swap, 0, sizeof(*swap));
880
881 swap->swap.swap_enabled =
882 swap->swap.swap_completed = IOMFB_SET_BACKGROUND | 0x7;
883 swap->swap.bg_color = 0xFF000000;
884
885 /*
886 * Turn off the backlight. This matters because the DCP's idea of
887 * backlight brightness gets desynced after a power change, and it
888 * needs to be told it's going to turn off so it will consider the
889 * subsequent update on poweron an actual change and restore the
890 * brightness.
891 */
892 if (dcp_has_panel(dcp)) {
893 swap->swap.bl_unk = 1;
894 swap->swap.bl_value = 0;
895 swap->swap.bl_power = 0;
896 }
897
898 for (int l = 0; l < SWAP_SURFACES; l++)
899 swap->surf_null[l] = true;
900 #if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
901 for (int l = 0; l < 5; l++)
902 swap->surf2_null[l] = true;
903 swap->unkU32Ptr_null = true;
904 swap->unkU32out_null = true;
905 #endif
906
907 dcp_swap_start(dcp, false, &swap_req, dcp_swap_clear_started, cookie);
908
909 ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(50));
910 swap_id = cookie->swap_id;
911 kref_put(&cookie->refcount, release_swap_cookie);
912 if (ret <= 0) {
913 dcp->crashed = true;
914 return;
915 }
916
917 dev_dbg(dcp->dev, "%s: clear swap submitted: %u\n", __func__, swap_id);
918
919 poff_cookie = kzalloc(sizeof(*poff_cookie), GFP_KERNEL);
920 if (!poff_cookie)
921 return;
922 init_completion(&poff_cookie->done);
923 kref_init(&poff_cookie->refcount);
924 /* increase refcount to ensure the receiver has a reference */
925 kref_get(&poff_cookie->refcount);
926
927 iomfb_abort_swaps_dcp(dcp, false, &abort_req,
928 aborted_swaps_dcp_poff, poff_cookie);
929 ret = wait_for_completion_timeout(&poff_cookie->done,
930 msecs_to_jiffies(1000));
931
932 if (ret == 0)
933 dev_warn(dcp->dev, "setPowerState(0) timeout %u ms\n", 1000);
934 else if (ret > 0)
935 dev_dbg(dcp->dev,
936 "setPowerState(0) finished with %d ms to spare",
937 jiffies_to_msecs(ret));
938
939 kref_put(&poff_cookie->refcount, release_wait_cookie);
940
941 dev_info(dcp->dev, "dcp_poweroff() done\n");
942 }
943
last_client_closed_sleep(struct apple_dcp * dcp,void * out,void * cookie)944 static void last_client_closed_sleep(struct apple_dcp *dcp, void *out, void *cookie)
945 {
946 struct dcp_set_power_state_req power_req = {
947 .unklong = 0,
948 };
949 dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate, cookie);
950 }
951
aborted_swaps_dcp_sleep(struct apple_dcp * dcp,void * out,void * cookie)952 static void aborted_swaps_dcp_sleep(struct apple_dcp *dcp, void *out, void *cookie)
953 {
954 struct iomfb_last_client_close_req req = { 0 };
955 iomfb_last_client_close(dcp, false, &req, last_client_closed_sleep, cookie);
956 }
957
DCP_FW_NAME(iomfb_sleep)958 void DCP_FW_NAME(iomfb_sleep)(struct apple_dcp *dcp)
959 {
960 int ret;
961 struct iomfb_abort_swaps_dcp_req req = {
962 .client = {
963 .flag2 = 1,
964 },
965 };
966
967 struct dcp_wait_cookie *cookie;
968
969 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
970 if (!cookie)
971 return;
972 init_completion(&cookie->done);
973 kref_init(&cookie->refcount);
974 /* increase refcount to ensure the receiver has a reference */
975 kref_get(&cookie->refcount);
976
977 iomfb_abort_swaps_dcp(dcp, false, &req, aborted_swaps_dcp_sleep,
978 cookie);
979 ret = wait_for_completion_timeout(&cookie->done,
980 msecs_to_jiffies(1000));
981
982 if (ret == 0)
983 dev_warn(dcp->dev, "setDCPPower(0) timeout %u ms\n", 1000);
984
985 kref_put(&cookie->refcount, release_wait_cookie);
986 dev_info(dcp->dev, "dcp_sleep() done\n");
987 }
988
dcpep_cb_hotplug(struct apple_dcp * dcp,u64 * connected)989 static void dcpep_cb_hotplug(struct apple_dcp *dcp, u64 *connected)
990 {
991 struct apple_connector *connector = dcp->connector;
992
993 /* DCP issues hotplug_gated callbacks after SetPowerState() calls on
994 * devices with display (macbooks, imacs). This must not result in
995 * connector state changes on DRM side. Some applications won't enable
996 * a CRTC with a connector in disconnected state. Weston after DPMS off
997 * is one example. dcp_is_main_display() returns true on devices with
998 * integrated display. Ignore the hotplug_gated() callbacks there.
999 */
1000 if (dcp->main_display)
1001 return;
1002
1003 if (dcp->during_modeset) {
1004 dev_info(dcp->dev,
1005 "cb_hotplug() ignored during modeset connected:%llu\n",
1006 *connected);
1007 return;
1008 }
1009
1010 dev_info(dcp->dev, "cb_hotplug() connected:%llu, valid_mode:%d\n",
1011 *connected, dcp->valid_mode);
1012
1013 /* Hotplug invalidates mode. DRM doesn't always handle this. */
1014 if (!(*connected)) {
1015 dcp->valid_mode = false;
1016 /* after unplug swap will not complete until the next
1017 * set_digital_out_mode */
1018 schedule_work(&dcp->vblank_wq);
1019 }
1020
1021 if (connector && connector->connected != !!(*connected)) {
1022 connector->connected = !!(*connected);
1023 dcp->valid_mode = false;
1024 schedule_work(&connector->hotplug_wq);
1025 }
1026 }
1027
1028 static void
dcpep_cb_swap_complete_intent_gated(struct apple_dcp * dcp,struct dcp_swap_complete_intent_gated * info)1029 dcpep_cb_swap_complete_intent_gated(struct apple_dcp *dcp,
1030 struct dcp_swap_complete_intent_gated *info)
1031 {
1032 trace_iomfb_swap_complete_intent_gated(dcp, info->swap_id,
1033 info->width, info->height);
1034 }
1035
1036 static void
dcpep_cb_abort_swap_ap_gated(struct apple_dcp * dcp,u32 * swap_id)1037 dcpep_cb_abort_swap_ap_gated(struct apple_dcp *dcp, u32 *swap_id)
1038 {
1039 trace_iomfb_abort_swap_ap_gated(dcp, *swap_id);
1040 }
1041
1042 static struct dcpep_get_tiling_state_resp
dcpep_cb_get_tiling_state(struct apple_dcp * dcp,struct dcpep_get_tiling_state_req * req)1043 dcpep_cb_get_tiling_state(struct apple_dcp *dcp,
1044 struct dcpep_get_tiling_state_req *req)
1045 {
1046 return (struct dcpep_get_tiling_state_resp){
1047 .value = 0,
1048 .ret = 1,
1049 };
1050 }
1051
dcpep_cb_create_backlight_service(struct apple_dcp * dcp)1052 static u8 dcpep_cb_create_backlight_service(struct apple_dcp *dcp)
1053 {
1054 return dcp_has_panel(dcp);
1055 }
1056
1057 TRAMPOLINE_VOID(trampoline_nop, dcpep_cb_nop);
1058 TRAMPOLINE_OUT(trampoline_true, dcpep_cb_true, u8);
1059 TRAMPOLINE_OUT(trampoline_false, dcpep_cb_false, u8);
1060 TRAMPOLINE_OUT(trampoline_zero, dcpep_cb_zero, u32);
1061 TRAMPOLINE_IN(trampoline_swap_complete, dcpep_cb_swap_complete,
1062 struct DCP_FW_NAME(dc_swap_complete_resp));
1063 TRAMPOLINE_INOUT(trampoline_get_uint_prop, dcpep_cb_get_uint_prop,
1064 struct dcp_get_uint_prop_req, struct dcp_get_uint_prop_resp);
1065 TRAMPOLINE_IN(trampoline_set_fx_prop, iomfbep_cb_set_fx_prop,
1066 struct iomfb_set_fx_prop_req)
1067 TRAMPOLINE_INOUT(trampoline_map_piodma, dcpep_cb_map_piodma,
1068 struct dcp_map_buf_req, struct dcp_map_buf_resp);
1069 TRAMPOLINE_IN(trampoline_unmap_piodma, dcpep_cb_unmap_piodma,
1070 struct dcp_unmap_buf_resp);
1071 TRAMPOLINE_INOUT(trampoline_sr_set_property_int, iomfbep_cb_sr_set_property_int,
1072 struct iomfb_sr_set_property_int_req, u8);
1073 TRAMPOLINE_INOUT(trampoline_allocate_buffer, dcpep_cb_allocate_buffer,
1074 struct dcp_allocate_buffer_req,
1075 struct dcp_allocate_buffer_resp);
1076 TRAMPOLINE_INOUT(trampoline_map_physical, dcpep_cb_map_physical,
1077 struct dcp_map_physical_req, struct dcp_map_physical_resp);
1078 TRAMPOLINE_INOUT(trampoline_release_mem_desc, dcpep_cb_release_mem_desc, u32,
1079 u8);
1080 TRAMPOLINE_INOUT(trampoline_map_reg, dcpep_cb_map_reg,
1081 struct DCP_FW_NAME(dcp_map_reg_req),
1082 struct DCP_FW_NAME(dcp_map_reg_resp));
1083 TRAMPOLINE_INOUT(trampoline_read_edt_data, dcpep_cb_read_edt_data,
1084 struct dcp_read_edt_data_req, struct dcp_read_edt_data_resp);
1085 TRAMPOLINE_INOUT(trampoline_prop_start, dcpep_cb_prop_start, u32, u8);
1086 TRAMPOLINE_INOUT(trampoline_prop_chunk, dcpep_cb_prop_chunk,
1087 struct dcp_set_dcpav_prop_chunk_req, u8);
1088 TRAMPOLINE_INOUT(trampoline_prop_end, dcpep_cb_prop_end,
1089 struct dcp_set_dcpav_prop_end_req, u8);
1090 TRAMPOLINE_INOUT(trampoline_allocate_bandwidth, dcpep_cb_allocate_bandwidth,
1091 struct dcp_allocate_bandwidth_req, struct dcp_allocate_bandwidth_resp);
1092 TRAMPOLINE_OUT(trampoline_rt_bandwidth, dcpep_cb_rt_bandwidth,
1093 struct dcp_rt_bandwidth);
1094 TRAMPOLINE_INOUT(trampoline_set_frame_sync_props, dcpep_cb_set_frame_sync_props,
1095 struct dcp_set_frame_sync_props_req,
1096 struct dcp_set_frame_sync_props_resp);
1097 TRAMPOLINE_OUT(trampoline_get_frequency, dcpep_cb_get_frequency, u64);
1098 TRAMPOLINE_OUT(trampoline_get_time, dcpep_cb_get_time, u64);
1099 TRAMPOLINE_IN(trampoline_hotplug, dcpep_cb_hotplug, u64);
1100 TRAMPOLINE_IN(trampoline_swap_complete_intent_gated,
1101 dcpep_cb_swap_complete_intent_gated,
1102 struct dcp_swap_complete_intent_gated);
1103 TRAMPOLINE_IN(trampoline_abort_swap_ap_gated, dcpep_cb_abort_swap_ap_gated, u32);
1104 TRAMPOLINE_IN(trampoline_enable_backlight_message_ap_gated,
1105 iomfbep_cb_enable_backlight_message_ap_gated, u8);
1106 TRAMPOLINE_IN(trampoline_pr_publish, iomfb_cb_pr_publish,
1107 struct iomfb_property);
1108 TRAMPOLINE_INOUT(trampoline_get_tiling_state, dcpep_cb_get_tiling_state,
1109 struct dcpep_get_tiling_state_req, struct dcpep_get_tiling_state_resp);
1110 TRAMPOLINE_OUT(trampoline_create_backlight_service, dcpep_cb_create_backlight_service, u8);
1111
1112 /*
1113 * Callback for swap requests. If a swap failed, we'll never get a swap
1114 * complete event so we need to fake a vblank event early to avoid a hang.
1115 */
1116
dcp_swapped(struct apple_dcp * dcp,void * data,void * cookie)1117 static void dcp_swapped(struct apple_dcp *dcp, void *data, void *cookie)
1118 {
1119 struct DCP_FW_NAME(dcp_swap_submit_resp) *resp = data;
1120
1121 if (resp->ret) {
1122 dev_err(dcp->dev, "swap failed! status %u\n", resp->ret);
1123 dcp_drm_crtc_vblank(dcp->crtc);
1124 return;
1125 }
1126
1127 while (!list_empty(&dcp->swapped_out_fbs)) {
1128 struct dcp_fb_reference *entry;
1129 entry = list_first_entry(&dcp->swapped_out_fbs,
1130 struct dcp_fb_reference, head);
1131 if (entry->swap_id == dcp->last_swap_id)
1132 break;
1133 if (entry->fb)
1134 drm_framebuffer_put(entry->fb);
1135 list_del(&entry->head);
1136 kfree(entry);
1137 }
1138 }
1139
dcp_swap_started(struct apple_dcp * dcp,void * data,void * cookie)1140 static void dcp_swap_started(struct apple_dcp *dcp, void *data, void *cookie)
1141 {
1142 struct dcp_swap_start_resp *resp = data;
1143
1144 DCP_FW_UNION(dcp->swap).swap.swap_id = resp->swap_id;
1145
1146 trace_iomfb_swap_submit(dcp, resp->swap_id);
1147 dcp_swap_submit(dcp, false, &DCP_FW_UNION(dcp->swap), dcp_swapped, NULL);
1148 }
1149
1150 /* Helpers to modeset and swap, used to flush */
do_swap(struct apple_dcp * dcp,void * data,void * cookie)1151 static void do_swap(struct apple_dcp *dcp, void *data, void *cookie)
1152 {
1153 struct dcp_swap_start_req start_req = { 0 };
1154
1155 if (dcp->connector && dcp->connector->connected)
1156 dcp_swap_start(dcp, false, &start_req, dcp_swap_started, NULL);
1157 else
1158 dcp_drm_crtc_vblank(dcp->crtc);
1159 }
1160
complete_set_digital_out_mode(struct apple_dcp * dcp,void * data,void * cookie)1161 static void complete_set_digital_out_mode(struct apple_dcp *dcp, void *data,
1162 void *cookie)
1163 {
1164 struct dcp_wait_cookie *wait = cookie;
1165
1166 if (wait) {
1167 complete(&wait->done);
1168 kref_put(&wait->refcount, release_wait_cookie);
1169 }
1170 }
1171
DCP_FW_NAME(iomfb_modeset)1172 int DCP_FW_NAME(iomfb_modeset)(struct apple_dcp *dcp,
1173 struct drm_crtc_state *crtc_state)
1174 {
1175 struct dcp_display_mode *mode;
1176 struct dcp_wait_cookie *cookie;
1177 struct dcp_color_mode *cmode = NULL;
1178 int ret;
1179
1180 mode = lookup_mode(dcp, &crtc_state->mode);
1181 if (!mode) {
1182 dev_err(dcp->dev, "no match for " DRM_MODE_FMT "\n",
1183 DRM_MODE_ARG(&crtc_state->mode));
1184 return -EIO;
1185 }
1186
1187 dev_info(dcp->dev,
1188 "set_digital_out_mode(color:%d timing:%d) " DRM_MODE_FMT "\n",
1189 mode->color_mode_id, mode->timing_mode_id,
1190 DRM_MODE_ARG(&crtc_state->mode));
1191 if (mode->color_mode_id == mode->sdr_rgb.id)
1192 cmode = &mode->sdr_rgb;
1193 else if (mode->color_mode_id == mode->sdr_444.id)
1194 cmode = &mode->sdr_444;
1195 else if (mode->color_mode_id == mode->sdr.id)
1196 cmode = &mode->sdr;
1197 else if (mode->color_mode_id == mode->best.id)
1198 cmode = &mode->best;
1199 if (cmode)
1200 dev_info(dcp->dev,
1201 "set_digital_out_mode() color mode depth:%hhu format:%u "
1202 "colorimetry:%u eotf:%u range:%u\n", cmode->depth,
1203 cmode->format, cmode->colorimetry, cmode->eotf,
1204 cmode->range);
1205
1206 dcp->mode = (struct dcp_set_digital_out_mode_req){
1207 .color_mode_id = mode->color_mode_id,
1208 .timing_mode_id = mode->timing_mode_id
1209 };
1210
1211 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1212 if (!cookie) {
1213 return -ENOMEM;
1214 }
1215
1216 init_completion(&cookie->done);
1217 kref_init(&cookie->refcount);
1218 /* increase refcount to ensure the receiver has a reference */
1219 kref_get(&cookie->refcount);
1220
1221 dcp->during_modeset = true;
1222
1223 dcp_set_digital_out_mode(dcp, false, &dcp->mode,
1224 complete_set_digital_out_mode, cookie);
1225
1226 /*
1227 * The DCP firmware has an internal timeout of ~8 seconds for
1228 * modesets. Add an extra 500ms to safe side that the modeset
1229 * call has returned.
1230 */
1231 ret = wait_for_completion_timeout(&cookie->done,
1232 msecs_to_jiffies(8500));
1233
1234 kref_put(&cookie->refcount, release_wait_cookie);
1235 dcp->during_modeset = false;
1236 dev_info(dcp->dev, "set_digital_out_mode finished:%d\n", ret);
1237
1238 if (ret == 0) {
1239 dev_info(dcp->dev, "set_digital_out_mode timed out\n");
1240 return -EIO;
1241 } else if (ret < 0) {
1242 dev_info(dcp->dev,
1243 "waiting on set_digital_out_mode failed:%d\n", ret);
1244 return -EIO;
1245
1246 } else if (ret > 0) {
1247 dev_dbg(dcp->dev,
1248 "set_digital_out_mode finished with %d to spare\n",
1249 jiffies_to_msecs(ret));
1250 }
1251 dcp->valid_mode = true;
1252
1253 return 0;
1254 }
1255
DCP_FW_NAME(iomfb_flush)1256 void DCP_FW_NAME(iomfb_flush)(struct apple_dcp *dcp, struct drm_crtc *crtc, struct drm_atomic_state *state)
1257 {
1258 struct drm_plane *plane;
1259 struct drm_plane_state *new_state, *old_state;
1260 struct drm_crtc_state *crtc_state;
1261 struct DCP_FW_NAME(dcp_swap_submit_req) *req = &DCP_FW_UNION(dcp->swap);
1262 int plane_idx, l;
1263 int has_surface = 0;
1264
1265 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1266
1267 /* Reset to defaults */
1268 memset(req, 0, sizeof(*req));
1269 for (l = 0; l < SWAP_SURFACES; l++)
1270 req->surf_null[l] = true;
1271 #if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
1272 for (l = 0; l < 5; l++)
1273 req->surf2_null[l] = true;
1274 req->unkU32Ptr_null = true;
1275 req->unkU32out_null = true;
1276 #endif
1277
1278 /*
1279 * Clear all surfaces on startup. The boot framebuffer in surface 0
1280 * sticks around.
1281 */
1282 if (!dcp->surfaces_cleared) {
1283 req->swap.swap_enabled = IOMFB_SET_BACKGROUND | 0x7;
1284 req->swap.bg_color = 0xFF000000;
1285 dcp->surfaces_cleared = true;
1286 }
1287
1288 // Surface 0 has limitations at least on t600x.
1289 l = 1;
1290 for_each_oldnew_plane_in_state(state, plane, old_state, new_state, plane_idx) {
1291 struct drm_framebuffer *fb = new_state->fb;
1292 struct drm_gem_dma_object *obj;
1293 struct drm_rect src_rect;
1294 bool is_premultiplied = false;
1295
1296 /* skip planes not for this crtc */
1297 if (old_state->crtc != crtc && new_state->crtc != crtc)
1298 continue;
1299
1300 WARN_ON(l >= SWAP_SURFACES);
1301
1302 req->swap.swap_enabled |= BIT(l);
1303
1304 if (old_state->fb && fb != old_state->fb) {
1305 /*
1306 * Race condition between a framebuffer unbind getting
1307 * swapped out and GEM unreferencing a framebuffer. If
1308 * we lose the race, the display gets IOVA faults and
1309 * the DCP crashes. We need to extend the lifetime of
1310 * the drm_framebuffer (and hence the GEM object) until
1311 * after we get a swap complete for the swap unbinding
1312 * it.
1313 */
1314 struct dcp_fb_reference *entry =
1315 kzalloc(sizeof(*entry), GFP_KERNEL);
1316 if (entry) {
1317 entry->fb = old_state->fb;
1318 entry->swap_id = dcp->last_swap_id;
1319 list_add_tail(&entry->head,
1320 &dcp->swapped_out_fbs);
1321 }
1322 drm_framebuffer_get(old_state->fb);
1323 }
1324
1325 if (!new_state->fb) {
1326 l += 1;
1327 continue;
1328 }
1329 req->surf_null[l] = false;
1330 has_surface = 1;
1331
1332 /*
1333 * DCP doesn't support XBGR8 / XRGB8 natively. Blending as
1334 * pre-multiplied alpha with a black background can be used as
1335 * workaround for the bottommost plane.
1336 */
1337 if (fb->format->format == DRM_FORMAT_XRGB8888 ||
1338 fb->format->format == DRM_FORMAT_XBGR8888)
1339 is_premultiplied = true;
1340
1341 drm_rect_fp_to_int(&src_rect, &new_state->src);
1342
1343 req->swap.src_rect[l] = drm_to_dcp_rect(&src_rect);
1344 req->swap.dst_rect[l] = drm_to_dcp_rect(&new_state->dst);
1345
1346 if (dcp->notch_height > 0)
1347 req->swap.dst_rect[l].y += dcp->notch_height;
1348
1349 /* the obvious helper call drm_fb_dma_get_gem_addr() adjusts
1350 * the address for source x/y offsets. Since IOMFB has a direct
1351 * support source position prefer that.
1352 */
1353 obj = drm_fb_dma_get_gem_obj(fb, 0);
1354 if (obj)
1355 req->surf_iova[l] = obj->dma_addr + fb->offsets[0];
1356
1357 req->surf[l] = (struct DCP_FW_NAME(dcp_surface)){
1358 .is_premultiplied = is_premultiplied,
1359 .format = drm_format_to_dcp(fb->format->format),
1360 .xfer_func = DCP_XFER_FUNC_SDR,
1361 .colorspace = DCP_COLORSPACE_NATIVE,
1362 .stride = fb->pitches[0],
1363 .width = fb->width,
1364 .height = fb->height,
1365 .buf_size = fb->height * fb->pitches[0],
1366 .surface_id = req->swap.surf_ids[l],
1367
1368 /* Only used for compressed or multiplanar surfaces */
1369 .pix_size = 1,
1370 .pel_w = 1,
1371 .pel_h = 1,
1372 .has_comp = 1,
1373 .has_planes = 1,
1374 };
1375
1376 l += 1;
1377 }
1378
1379 if (!has_surface && !crtc_state->color_mgmt_changed) {
1380 if (crtc_state->enable && crtc_state->active &&
1381 !crtc_state->planes_changed) {
1382 schedule_work(&dcp->vblank_wq);
1383 return;
1384 }
1385
1386 /* Set black background */
1387 req->swap.swap_enabled |= IOMFB_SET_BACKGROUND;
1388 req->swap.bg_color = 0xFF000000;
1389 req->clear = 1;
1390 }
1391
1392 /* These fields should be set together */
1393 req->swap.swap_completed = req->swap.swap_enabled;
1394
1395 /* update brightness if changed */
1396 if (dcp_has_panel(dcp) && dcp->brightness.update) {
1397 req->swap.bl_unk = 1;
1398 req->swap.bl_value = dcp->brightness.dac;
1399 req->swap.bl_power = 0x40;
1400 dcp->brightness.update = false;
1401 }
1402
1403 if (crtc_state->color_mgmt_changed && crtc_state->ctm) {
1404 struct iomfb_set_matrix_req mat;
1405 struct drm_color_ctm *ctm = (struct drm_color_ctm *)crtc_state->ctm->data;
1406
1407 mat.unk_u32 = 9;
1408 mat.r[0] = ctm->matrix[0];
1409 mat.r[1] = ctm->matrix[1];
1410 mat.r[2] = ctm->matrix[2];
1411 mat.g[0] = ctm->matrix[3];
1412 mat.g[1] = ctm->matrix[4];
1413 mat.g[2] = ctm->matrix[5];
1414 mat.b[0] = ctm->matrix[6];
1415 mat.b[1] = ctm->matrix[7];
1416 mat.b[2] = ctm->matrix[8];
1417
1418 iomfb_set_matrix(dcp, false, &mat, do_swap, NULL);
1419 } else
1420 do_swap(dcp, NULL, NULL);
1421 }
1422
res_is_main_display(struct apple_dcp * dcp,void * out,void * cookie)1423 static void res_is_main_display(struct apple_dcp *dcp, void *out, void *cookie)
1424 {
1425 struct apple_connector *connector;
1426 int result = *(int *)out;
1427 dev_info(dcp->dev, "DCP is_main_display: %d\n", result);
1428
1429 dcp->main_display = result != 0;
1430
1431 connector = dcp->connector;
1432 if (connector) {
1433 connector->connected = dcp->nr_modes > 0;
1434 schedule_work(&connector->hotplug_wq);
1435 }
1436
1437 dcp->active = true;
1438 complete(&dcp->start_done);
1439 }
1440
init_3(struct apple_dcp * dcp,void * out,void * cookie)1441 static void init_3(struct apple_dcp *dcp, void *out, void *cookie)
1442 {
1443 dcp_is_main_display(dcp, false, res_is_main_display, NULL);
1444 }
1445
init_2(struct apple_dcp * dcp,void * out,void * cookie)1446 static void init_2(struct apple_dcp *dcp, void *out, void *cookie)
1447 {
1448 dcp_first_client_open(dcp, false, init_3, NULL);
1449 }
1450
init_1(struct apple_dcp * dcp,void * out,void * cookie)1451 static void init_1(struct apple_dcp *dcp, void *out, void *cookie)
1452 {
1453 u32 val = 0;
1454 dcp_enable_disable_video_power_savings(dcp, false, &val, init_2, NULL);
1455 }
1456
dcp_started(struct apple_dcp * dcp,void * data,void * cookie)1457 static void dcp_started(struct apple_dcp *dcp, void *data, void *cookie)
1458 {
1459 struct iomfb_get_color_remap_mode_req color_remap =
1460 (struct iomfb_get_color_remap_mode_req){
1461 .mode = 6,
1462 };
1463
1464 dev_info(dcp->dev, "DCP booted\n");
1465
1466 iomfb_get_color_remap_mode(dcp, false, &color_remap, init_1, cookie);
1467 }
1468
DCP_FW_NAME(iomfb_shutdown)1469 void DCP_FW_NAME(iomfb_shutdown)(struct apple_dcp *dcp)
1470 {
1471 struct dcp_set_power_state_req req = {
1472 /* defaults are ok */
1473 };
1474
1475 dcp_set_power_state(dcp, false, &req, NULL, NULL);
1476 }
1477