xref: /openbsd/sys/dev/pci/drm/apple/afk.c (revision 44a5d259)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
3 
4 #include <linux/bitfield.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/slab.h>
7 #include <linux/workqueue.h>
8 #include <linux/soc/apple/rtkit.h>
9 
10 #include "afk.h"
11 #include "trace.h"
12 
13 struct afk_receive_message_work {
14 	struct apple_dcp_afkep *ep;
15 	u64 message;
16 	struct work_struct work;
17 };
18 
19 #define RBEP_TYPE GENMASK(63, 48)
20 
21 enum rbep_msg_type {
22 	RBEP_INIT = 0x80,
23 	RBEP_INIT_ACK = 0xa0,
24 	RBEP_GETBUF = 0x89,
25 	RBEP_GETBUF_ACK = 0xa1,
26 	RBEP_INIT_TX = 0x8a,
27 	RBEP_INIT_RX = 0x8b,
28 	RBEP_START = 0xa3,
29 	RBEP_START_ACK = 0x86,
30 	RBEP_SEND = 0xa2,
31 	RBEP_RECV = 0x85,
32 	RBEP_SHUTDOWN = 0xc0,
33 	RBEP_SHUTDOWN_ACK = 0xc1,
34 };
35 
36 #define BLOCK_SHIFT 6
37 
38 #define GETBUF_SIZE GENMASK(31, 16)
39 #define GETBUF_TAG GENMASK(15, 0)
40 #define GETBUF_ACK_DVA GENMASK(47, 0)
41 
42 #define INITRB_OFFSET GENMASK(47, 32)
43 #define INITRB_SIZE GENMASK(31, 16)
44 #define INITRB_TAG GENMASK(15, 0)
45 
46 #define SEND_WPTR GENMASK(31, 0)
47 
afk_send(struct apple_dcp_afkep * ep,u64 message)48 static void afk_send(struct apple_dcp_afkep *ep, u64 message)
49 {
50 	dcp_send_message(ep->dcp, ep->endpoint, message);
51 }
52 
afk_init(struct apple_dcp * dcp,u32 endpoint,const struct apple_epic_service_ops * ops)53 struct apple_dcp_afkep *afk_init(struct apple_dcp *dcp, u32 endpoint,
54 				 const struct apple_epic_service_ops *ops)
55 {
56 	struct apple_dcp_afkep *afkep;
57 	int ret;
58 
59 	afkep = devm_kzalloc(dcp->dev, sizeof(*afkep), GFP_KERNEL);
60 	if (!afkep)
61 		return ERR_PTR(-ENOMEM);
62 
63 	afkep->ops = ops;
64 	afkep->dcp = dcp;
65 	afkep->endpoint = endpoint;
66 	afkep->wq = alloc_ordered_workqueue("apple-dcp-afkep%02x",
67 					    WQ_MEM_RECLAIM, endpoint);
68 	if (!afkep->wq) {
69 		ret = -ENOMEM;
70 		goto out_free_afkep;
71 	}
72 
73 	// TODO: devm_ for wq
74 
75 	init_completion(&afkep->started);
76 	init_completion(&afkep->stopped);
77 	mtx_init(&afkep->lock, IPL_TTY);
78 
79 	return afkep;
80 
81 out_free_afkep:
82 	devm_kfree(dcp->dev, afkep);
83 	return ERR_PTR(ret);
84 }
85 
afk_start(struct apple_dcp_afkep * ep)86 int afk_start(struct apple_dcp_afkep *ep)
87 {
88 	int ret;
89 
90 	reinit_completion(&ep->started);
91 	apple_rtkit_start_ep(ep->dcp->rtk, ep->endpoint);
92 	afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_INIT));
93 
94 	ret = wait_for_completion_timeout(&ep->started, msecs_to_jiffies(1000));
95 	if (ret <= 0)
96 		return -ETIMEDOUT;
97 	else
98 		return 0;
99 }
100 
afk_getbuf(struct apple_dcp_afkep * ep,u64 message)101 static void afk_getbuf(struct apple_dcp_afkep *ep, u64 message)
102 {
103 	u16 size = FIELD_GET(GETBUF_SIZE, message) << BLOCK_SHIFT;
104 	u16 tag = FIELD_GET(GETBUF_TAG, message);
105 	u64 reply;
106 
107 	trace_afk_getbuf(ep, size, tag);
108 
109 	if (ep->bfr) {
110 		dev_err(ep->dcp->dev,
111 			"Got GETBUF message but buffer already exists\n");
112 		return;
113 	}
114 
115 	ep->bfr = dmam_alloc_coherent(ep->dcp->dev, size, &ep->bfr_dma,
116 				      GFP_KERNEL);
117 	if (!ep->bfr) {
118 		dev_err(ep->dcp->dev, "Failed to allocate %d bytes buffer\n",
119 			size);
120 		return;
121 	}
122 
123 	ep->bfr_size = size;
124 	ep->bfr_tag = tag;
125 
126 	reply = FIELD_PREP(RBEP_TYPE, RBEP_GETBUF_ACK);
127 	reply |= FIELD_PREP(GETBUF_ACK_DVA, ep->bfr_dma);
128 	afk_send(ep, reply);
129 }
130 
afk_init_rxtx(struct apple_dcp_afkep * ep,u64 message,struct afk_ringbuffer * bfr)131 static void afk_init_rxtx(struct apple_dcp_afkep *ep, u64 message,
132 			  struct afk_ringbuffer *bfr)
133 {
134 	u16 base = FIELD_GET(INITRB_OFFSET, message) << BLOCK_SHIFT;
135 	u16 size = FIELD_GET(INITRB_SIZE, message) << BLOCK_SHIFT;
136 	u16 tag = FIELD_GET(INITRB_TAG, message);
137 	u32 bufsz, end;
138 
139 	if (tag != ep->bfr_tag) {
140 		dev_err(ep->dcp->dev, "AFK[ep:%02x]: expected tag 0x%x but got 0x%x\n",
141 			ep->endpoint, ep->bfr_tag, tag);
142 		return;
143 	}
144 
145 	if (bfr->ready) {
146 		dev_err(ep->dcp->dev, "AFK[ep:%02x]: buffer is already initialized\n",
147 			ep->endpoint);
148 		return;
149 	}
150 
151 	if (base >= ep->bfr_size) {
152 		dev_err(ep->dcp->dev,
153 			"AFK[ep:%02x]: requested base 0x%x >= max size 0x%lx\n",
154 			ep->endpoint, base, ep->bfr_size);
155 		return;
156 	}
157 
158 	end = base + size;
159 	if (end > ep->bfr_size) {
160 		dev_err(ep->dcp->dev,
161 			"AFK[ep:%02x]: requested end 0x%x > max size 0x%lx\n",
162 			ep->endpoint, end, ep->bfr_size);
163 		return;
164 	}
165 
166 	bfr->hdr = ep->bfr + base;
167 	bufsz = le32_to_cpu(bfr->hdr->bufsz);
168 	if (bufsz + sizeof(*bfr->hdr) != size) {
169 		dev_err(ep->dcp->dev,
170 			"AFK[ep:%02x]: ring buffer size 0x%x != expected 0x%lx\n",
171 			ep->endpoint, bufsz, sizeof(*bfr->hdr));
172 		return;
173 	}
174 
175 	bfr->buf = bfr->hdr + 1;
176 	bfr->bufsz = bufsz;
177 	bfr->ready = true;
178 
179 	if (ep->rxbfr.ready && ep->txbfr.ready)
180 		afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_START));
181 }
182 
183 static const struct apple_epic_service_ops *
afk_match_service(struct apple_dcp_afkep * ep,const char * name)184 afk_match_service(struct apple_dcp_afkep *ep, const char *name)
185 {
186 	const struct apple_epic_service_ops *ops;
187 
188 	if (!name[0])
189 		return NULL;
190 	if (!ep->ops)
191 		return NULL;
192 
193 	for (ops = ep->ops; ops->name[0]; ops++) {
194 		if (strcmp(ops->name, name))
195 			continue;
196 
197 		return ops;
198 	}
199 
200 	return NULL;
201 }
202 
afk_epic_find_service(struct apple_dcp_afkep * ep,u32 channel)203 static struct apple_epic_service *afk_epic_find_service(struct apple_dcp_afkep *ep,
204 						 u32 channel)
205 {
206     for (u32 i = 0; i < ep->num_channels; i++)
207         if (ep->services[i].enabled && ep->services[i].channel == channel)
208             return &ep->services[i];
209 
210     return NULL;
211 }
212 
afk_recv_handle_init(struct apple_dcp_afkep * ep,u32 channel,u8 * payload,size_t payload_size)213 static void afk_recv_handle_init(struct apple_dcp_afkep *ep, u32 channel,
214 				 u8 *payload, size_t payload_size)
215 {
216 	char name[32];
217 	s64 epic_unit = -1;
218 	u32 ch_idx;
219 	const char *service_name = name;
220 	const char *epic_name = NULL, *epic_class = NULL;
221 	const struct apple_epic_service_ops *ops;
222 	struct dcp_parse_ctx ctx;
223 	u8 *props = payload + sizeof(name);
224 	size_t props_size = payload_size - sizeof(name);
225 
226 	WARN_ON(afk_epic_find_service(ep, channel));
227 
228 	if (payload_size < sizeof(name)) {
229 		dev_err(ep->dcp->dev, "AFK[ep:%02x]: payload too small: %lx\n",
230 			ep->endpoint, payload_size);
231 		return;
232 	}
233 
234 	if (ep->num_channels >= AFK_MAX_CHANNEL) {
235 		dev_err(ep->dcp->dev, "AFK[ep:%02x]: too many enabled services!\n",
236 			ep->endpoint);
237 		return;
238 	}
239 
240 	strscpy(name, payload, sizeof(name));
241 
242 	/*
243 	 * in DCP firmware 13.2 DCP reports interface-name as name which starts
244 	 * with "dispext%d" using -1 s ID for "dcp". In the 12.3 firmware
245 	 * EPICProviderClass was used. If the init call has props parse them and
246 	 * use EPICProviderClass to match the service.
247 	 */
248 	if (props_size > 36) {
249 		int ret = parse(props, props_size, &ctx);
250 		if (ret) {
251 			dev_err(ep->dcp->dev,
252 				"AFK[ep:%02x]: Failed to parse service init props for %s\n",
253 				ep->endpoint, name);
254 			return;
255 		}
256 		ret = parse_epic_service_init(&ctx, &epic_name, &epic_class, &epic_unit);
257 		if (ret) {
258 			dev_err(ep->dcp->dev,
259 				"AFK[ep:%02x]: failed to extract init props: %d\n",
260 				ep->endpoint, ret);
261 			return;
262 		}
263 		service_name = epic_class;
264 	} else {
265             service_name = name;
266         }
267 
268 	ops = afk_match_service(ep, service_name);
269 	if (!ops) {
270 		dev_err(ep->dcp->dev,
271 			"AFK[ep:%02x]: unable to match service %s on channel %d\n",
272 			ep->endpoint, service_name, channel);
273 		goto free;
274 	}
275 
276 	ch_idx = ep->num_channels++;
277 	mtx_init(&ep->services[ch_idx].lock, IPL_TTY);
278 	ep->services[ch_idx].enabled = true;
279 	ep->services[ch_idx].ops = ops;
280 	ep->services[ch_idx].ep = ep;
281 	ep->services[ch_idx].channel = channel;
282 	ep->services[ch_idx].cmd_tag = 0;
283 	ops->init(&ep->services[ch_idx], epic_name, epic_class, epic_unit);
284 	dev_info(ep->dcp->dev, "AFK[ep:%02x]: new service %s on channel %d\n",
285 		 ep->endpoint, service_name, channel);
286 free:
287 	kfree(epic_name);
288 	kfree(epic_class);
289 }
290 
afk_recv_handle_teardown(struct apple_dcp_afkep * ep,u32 channel)291 static void afk_recv_handle_teardown(struct apple_dcp_afkep *ep, u32 channel)
292 {
293 	struct apple_epic_service *service;
294 	const struct apple_epic_service_ops *ops;
295 	unsigned long flags;
296 
297 	service = afk_epic_find_service(ep, channel);
298 	if (!service) {
299 		dev_warn(ep->dcp->dev, "AFK[ep:%02x]: teardown for disabled channel %u\n",
300 			 ep->endpoint, channel);
301 		return;
302 	}
303 
304 	// TODO: think through what locking is necessary
305 	spin_lock_irqsave(&service->lock, flags);
306 	service->enabled = false;
307 	ops = service->ops;
308 	spin_unlock_irqrestore(&service->lock, flags);
309 
310 	if (ops->teardown)
311 		ops->teardown(service);
312 }
313 
afk_recv_handle_reply(struct apple_dcp_afkep * ep,u32 channel,u16 tag,void * payload,size_t payload_size)314 static void afk_recv_handle_reply(struct apple_dcp_afkep *ep, u32 channel,
315 				  u16 tag, void *payload, size_t payload_size)
316 {
317 	struct epic_cmd *cmd = payload;
318 	struct apple_epic_service *service;
319 	unsigned long flags;
320 	u8 idx = tag & 0xff;
321 	void *rxbuf, *txbuf;
322 	dma_addr_t rxbuf_dma, txbuf_dma;
323 	size_t rxlen, txlen;
324 
325 	service = afk_epic_find_service(ep, channel);
326 	if (!service) {
327 		dev_warn(ep->dcp->dev, "AFK[ep:%02x]: command reply on disabled channel %u\n",
328 			 ep->endpoint, channel);
329 		return;
330 	}
331 
332 	if (payload_size < sizeof(*cmd)) {
333 		dev_err(ep->dcp->dev,
334 			"AFK[ep:%02x]: command reply on channel %d too small: %ld\n",
335 			ep->endpoint, channel, payload_size);
336 		return;
337 	}
338 
339 	if (idx >= MAX_PENDING_CMDS) {
340 		dev_err(ep->dcp->dev,
341 			"AFK[ep:%02x]: command reply on channel %d out of range: %d\n",
342 			ep->endpoint, channel, idx);
343 		return;
344 	}
345 
346 	spin_lock_irqsave(&service->lock, flags);
347 	if (service->cmds[idx].done) {
348 		dev_err(ep->dcp->dev,
349 			"AFK[ep:%02x]: command reply on channel %d already handled\n",
350 			ep->endpoint, channel);
351 		spin_unlock_irqrestore(&service->lock, flags);
352 		return;
353 	}
354 
355 	if (tag != service->cmds[idx].tag) {
356 		dev_err(ep->dcp->dev,
357 			"AFK[ep:%02x]: command reply on channel %d has invalid tag: expected 0x%04x != 0x%04x\n",
358 			ep->endpoint, channel, tag, service->cmds[idx].tag);
359 		spin_unlock_irqrestore(&service->lock, flags);
360 		return;
361 	}
362 
363 	service->cmds[idx].done = true;
364 	service->cmds[idx].retcode = le32_to_cpu(cmd->retcode);
365 	if (service->cmds[idx].free_on_ack) {
366 		/* defer freeing until we're no longer in atomic context */
367 		rxbuf = service->cmds[idx].rxbuf;
368 		txbuf = service->cmds[idx].txbuf;
369 		rxlen = service->cmds[idx].rxlen;
370 		txlen = service->cmds[idx].txlen;
371 		rxbuf_dma = service->cmds[idx].rxbuf_dma;
372 		txbuf_dma = service->cmds[idx].txbuf_dma;
373 		bitmap_release_region(service->cmd_map, idx, 0);
374 	} else {
375 		rxbuf = txbuf = NULL;
376 		rxlen = txlen = 0;
377 	}
378 	if (service->cmds[idx].completion)
379 		complete(service->cmds[idx].completion);
380 
381 	spin_unlock_irqrestore(&service->lock, flags);
382 
383 	if (rxbuf && rxlen)
384 		dma_free_coherent(ep->dcp->dev, rxlen, rxbuf, rxbuf_dma);
385 	if (txbuf && txlen)
386 		dma_free_coherent(ep->dcp->dev, txlen, txbuf, txbuf_dma);
387 }
388 
389 struct epic_std_service_ap_call {
390 	__le32 unk0;
391 	__le32 unk1;
392 	__le32 type;
393 	__le32 len;
394 	__le32 magic;
395 	u8 _unk[48];
396 } __attribute__((packed));
397 
afk_recv_handle_std_service(struct apple_dcp_afkep * ep,u32 channel,u32 type,struct epic_hdr * ehdr,struct epic_sub_hdr * eshdr,void * payload,size_t payload_size)398 static void afk_recv_handle_std_service(struct apple_dcp_afkep *ep, u32 channel,
399 					u32 type, struct epic_hdr *ehdr,
400 					struct epic_sub_hdr *eshdr,
401 					void *payload, size_t payload_size)
402 {
403 	struct apple_epic_service *service = afk_epic_find_service(ep, channel);
404 
405 	if (!service) {
406 		dev_warn(ep->dcp->dev,
407 			 "AFK[ep:%02x]: std service notify on disabled channel %u\n",
408 			 ep->endpoint, channel);
409 		return;
410 	}
411 
412 	if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_NOTIFY) {
413 		struct epic_std_service_ap_call *call = payload;
414 		size_t call_size;
415 		void *reply;
416 		int ret;
417 
418 		if (payload_size < sizeof(*call))
419 			return;
420 
421 		call_size = le32_to_cpu(call->len);
422 		if (payload_size < sizeof(*call) + call_size)
423 			return;
424 
425 		if (!service->ops->call)
426 			return;
427 		reply = kzalloc(payload_size, GFP_KERNEL);
428 		if (!reply)
429 			return;
430 
431 		ret = service->ops->call(service, le32_to_cpu(call->type),
432 					 payload + sizeof(*call), call_size,
433 					 reply + sizeof(*call), call_size);
434 		if (ret) {
435 			kfree(reply);
436 			return;
437 		}
438 
439 		memcpy(reply, call, sizeof(*call));
440 		afk_send_epic(ep, channel, le16_to_cpu(eshdr->tag),
441 			      EPIC_TYPE_NOTIFY_ACK, EPIC_CAT_REPLY,
442 			      EPIC_SUBTYPE_STD_SERVICE, reply, payload_size);
443 		kfree(reply);
444 
445 		return;
446 	}
447 
448 	if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_REPORT) {
449 		if (service->ops->report)
450 			service->ops->report(service, le16_to_cpu(eshdr->type),
451 					     payload, payload_size);
452 		return;
453 	}
454 
455 	dev_err(ep->dcp->dev,
456 		"AFK[ep:%02x]: channel %d received unhandled standard service message: %x / %x\n",
457 		ep->endpoint, channel, type, eshdr->category);
458 	print_hex_dump(KERN_INFO, "AFK: ", DUMP_PREFIX_NONE, 16, 1, payload,
459 				   payload_size, true);
460 }
461 
afk_recv_handle(struct apple_dcp_afkep * ep,u32 channel,u32 type,u8 * data,size_t data_size)462 static void afk_recv_handle(struct apple_dcp_afkep *ep, u32 channel, u32 type,
463 			    u8 *data, size_t data_size)
464 {
465 	struct apple_epic_service *service;
466 	struct epic_hdr *ehdr = (struct epic_hdr *)data;
467 	struct epic_sub_hdr *eshdr =
468 		(struct epic_sub_hdr *)(data + sizeof(*ehdr));
469 	u16 subtype = le16_to_cpu(eshdr->type);
470 	u8 *payload = data + sizeof(*ehdr) + sizeof(*eshdr);
471 	size_t payload_size;
472 
473 	if (data_size < sizeof(*ehdr) + sizeof(*eshdr)) {
474 		dev_err(ep->dcp->dev, "AFK[ep:%02x]: payload too small: %lx\n",
475 			ep->endpoint, data_size);
476 		return;
477 	}
478 	payload_size = data_size - sizeof(*ehdr) - sizeof(*eshdr);
479 
480 	trace_afk_recv_handle(ep, channel, type, data_size, ehdr, eshdr);
481 
482 	service = afk_epic_find_service(ep, channel);
483 
484 	if (!service) {
485 		if (type != EPIC_TYPE_NOTIFY && type != EPIC_TYPE_REPLY) {
486 			dev_err(ep->dcp->dev,
487 				"AFK[ep:%02x]: expected notify but got 0x%x on channel %d\n",
488 				ep->endpoint, type, channel);
489 			return;
490 		}
491 		if (eshdr->category != EPIC_CAT_REPORT) {
492 			dev_err(ep->dcp->dev,
493 				"AFK[ep:%02x]: expected report but got 0x%x on channel %d\n",
494 				ep->endpoint, eshdr->category, channel);
495 			return;
496 		}
497 		if (subtype == EPIC_SUBTYPE_TEARDOWN) {
498 			dev_dbg(ep->dcp->dev,
499 				"AFK[ep:%02x]: teardown without service on channel %d\n",
500 				ep->endpoint, channel);
501 			return;
502 		}
503 		if (subtype != EPIC_SUBTYPE_ANNOUNCE) {
504 			dev_err(ep->dcp->dev,
505 				"AFK[ep:%02x]: expected announce but got 0x%x on channel %d\n",
506 				ep->endpoint, subtype, channel);
507 			return;
508 		}
509 
510 		return afk_recv_handle_init(ep, channel, payload, payload_size);
511 	}
512 
513 	if (!service) {
514 		dev_err(ep->dcp->dev, "AFK[ep:%02x]: channel %d has no service\n",
515 			ep->endpoint, channel);
516 		return;
517 	}
518 
519 	if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_REPORT &&
520 	    subtype == EPIC_SUBTYPE_TEARDOWN)
521 		return afk_recv_handle_teardown(ep, channel);
522 
523 	if (type == EPIC_TYPE_REPLY && eshdr->category == EPIC_CAT_REPLY)
524 		return afk_recv_handle_reply(ep, channel,
525 					     le16_to_cpu(eshdr->tag), payload,
526 					     payload_size);
527 
528 	if (subtype == EPIC_SUBTYPE_STD_SERVICE)
529 		return afk_recv_handle_std_service(
530 			ep, channel, type, ehdr, eshdr, payload, payload_size);
531 
532 	dev_err(ep->dcp->dev, "AFK[ep:%02x]: channel %d received unhandled message "
533 		"(type %x subtype %x)\n", ep->endpoint, channel, type, subtype);
534 	print_hex_dump(KERN_INFO, "AFK: ", DUMP_PREFIX_NONE, 16, 1, payload,
535 				   payload_size, true);
536 }
537 
afk_recv(struct apple_dcp_afkep * ep)538 static bool afk_recv(struct apple_dcp_afkep *ep)
539 {
540 	struct afk_qe *hdr;
541 	u32 rptr, wptr;
542 	u32 magic, size, channel, type;
543 
544 	if (!ep->rxbfr.ready) {
545 		dev_err(ep->dcp->dev, "AFK[ep:%02x]: got RECV but not ready\n",
546 			ep->endpoint);
547 		return false;
548 	}
549 
550 	rptr = le32_to_cpu(ep->rxbfr.hdr->rptr);
551 	wptr = le32_to_cpu(ep->rxbfr.hdr->wptr);
552 	trace_afk_recv_rwptr_pre(ep, rptr, wptr);
553 
554 	if (rptr == wptr)
555 		return false;
556 
557 	if (rptr > (ep->rxbfr.bufsz - sizeof(*hdr))) {
558 		dev_warn(ep->dcp->dev,
559 			 "AFK[ep:%02x]: rptr out of bounds: 0x%x > 0x%lx\n",
560 			 ep->endpoint, rptr, ep->rxbfr.bufsz - sizeof(*hdr));
561 		return false;
562 	}
563 
564 	dma_rmb();
565 
566 	hdr = ep->rxbfr.buf + rptr;
567 	magic = le32_to_cpu(hdr->magic);
568 	size = le32_to_cpu(hdr->size);
569 	trace_afk_recv_qe(ep, rptr, magic, size);
570 
571 	if (magic != QE_MAGIC) {
572 		dev_warn(ep->dcp->dev, "AFK[ep:%02x]: invalid queue entry magic: 0x%x\n",
573 			 ep->endpoint, magic);
574 		return false;
575 	}
576 
577 	/*
578 	 * If there's not enough space for the payload the co-processor inserted
579 	 * the current dummy queue entry and we have to advance to the next one
580 	 * which will contain the real data.
581 	*/
582 	if (rptr + size + sizeof(*hdr) > ep->rxbfr.bufsz) {
583 		rptr = 0;
584 		hdr = ep->rxbfr.buf + rptr;
585 		magic = le32_to_cpu(hdr->magic);
586 		size = le32_to_cpu(hdr->size);
587 		trace_afk_recv_qe(ep, rptr, magic, size);
588 
589 		if (magic != QE_MAGIC) {
590 			dev_warn(ep->dcp->dev,
591 				 "AFK[ep:%02x]: invalid next queue entry magic: 0x%x\n",
592 				 ep->endpoint, magic);
593 			return false;
594 		}
595 
596 		ep->rxbfr.hdr->rptr = cpu_to_le32(rptr);
597 	}
598 
599 	if (rptr + size + sizeof(*hdr) > ep->rxbfr.bufsz) {
600 		dev_warn(ep->dcp->dev,
601 			 "AFK[ep:%02x]: queue entry out of bounds: 0x%lx > 0x%lx\n",
602 			 ep->endpoint, rptr + size + sizeof(*hdr), ep->rxbfr.bufsz);
603 		return false;
604 	}
605 
606 	channel = le32_to_cpu(hdr->channel);
607 	type = le32_to_cpu(hdr->type);
608 
609 	rptr = ALIGN(rptr + sizeof(*hdr) + size, 1 << BLOCK_SHIFT);
610 	if (WARN_ON(rptr > ep->rxbfr.bufsz))
611 		rptr = 0;
612 	if (rptr == ep->rxbfr.bufsz)
613 		rptr = 0;
614 
615 	dma_mb();
616 
617 	ep->rxbfr.hdr->rptr = cpu_to_le32(rptr);
618 	trace_afk_recv_rwptr_post(ep, rptr, wptr);
619 
620 	/*
621 	 * TODO: this is theoretically unsafe since DCP could overwrite data
622 	 *       after the read pointer was updated above. Do it anyway since
623 	 *       it avoids 2 problems in the DCP tracer:
624 	 *       1. the tracer sees replies before the the notifies from dcp
625 	 *       2. the tracer tries to read buffers after they are unmapped.
626 	 */
627 	afk_recv_handle(ep, channel, type, hdr->data, size);
628 
629 	return true;
630 }
631 
afk_receive_message_worker(struct work_struct * work_)632 static void afk_receive_message_worker(struct work_struct *work_)
633 {
634 	struct afk_receive_message_work *work;
635 	u16 type;
636 
637 	work = container_of(work_, struct afk_receive_message_work, work);
638 
639 	type = FIELD_GET(RBEP_TYPE, work->message);
640 	switch (type) {
641 	case RBEP_INIT_ACK:
642 		break;
643 
644 	case RBEP_START_ACK:
645 		complete_all(&work->ep->started);
646 		break;
647 
648 	case RBEP_SHUTDOWN_ACK:
649 		complete_all(&work->ep->stopped);
650 		break;
651 
652 	case RBEP_GETBUF:
653 		afk_getbuf(work->ep, work->message);
654 		break;
655 
656 	case RBEP_INIT_TX:
657 		afk_init_rxtx(work->ep, work->message, &work->ep->txbfr);
658 		break;
659 
660 	case RBEP_INIT_RX:
661 		afk_init_rxtx(work->ep, work->message, &work->ep->rxbfr);
662 		break;
663 
664 	case RBEP_RECV:
665 		while (afk_recv(work->ep))
666 			;
667 		break;
668 
669 	default:
670 		dev_err(work->ep->dcp->dev,
671 			"Received unknown AFK message type: 0x%x\n", type);
672 	}
673 
674 	kfree(work);
675 }
676 
afk_receive_message(struct apple_dcp_afkep * ep,u64 message)677 int afk_receive_message(struct apple_dcp_afkep *ep, u64 message)
678 {
679 	struct afk_receive_message_work *work;
680 
681 	// TODO: comment why decoupling from rtkit thread is required here
682 	work = kzalloc(sizeof(*work), GFP_KERNEL);
683 	if (!work)
684 		return -ENOMEM;
685 
686 	work->ep = ep;
687 	work->message = message;
688 	INIT_WORK(&work->work, afk_receive_message_worker);
689 	queue_work(ep->wq, &work->work);
690 
691 	return 0;
692 }
693 
afk_send_epic(struct apple_dcp_afkep * ep,u32 channel,u16 tag,enum epic_type etype,enum epic_category ecat,u8 stype,const void * payload,size_t payload_len)694 int afk_send_epic(struct apple_dcp_afkep *ep, u32 channel, u16 tag,
695 		  enum epic_type etype, enum epic_category ecat, u8 stype,
696 		  const void *payload, size_t payload_len)
697 {
698 	u32 rptr, wptr;
699 	struct afk_qe *hdr, *hdr2;
700 	struct epic_hdr *ehdr;
701 	struct epic_sub_hdr *eshdr;
702 	unsigned long flags;
703 	size_t total_epic_size, total_size;
704 	int ret;
705 
706 	spin_lock_irqsave(&ep->lock, flags);
707 
708 	dma_rmb();
709 	rptr = le32_to_cpu(ep->txbfr.hdr->rptr);
710 	wptr = le32_to_cpu(ep->txbfr.hdr->wptr);
711 	trace_afk_send_rwptr_pre(ep, rptr, wptr);
712 	total_epic_size = sizeof(*ehdr) + sizeof(*eshdr) + payload_len;
713 	total_size = sizeof(*hdr) + total_epic_size;
714 
715 	hdr = hdr2 = NULL;
716 
717 	/*
718 	 * We need to figure out how to place the entire headers and payload
719 	 * into the ring buffer:
720 	 * - If the write pointer is in front of the read pointer we just need
721 	 *   enough space inbetween to store everything.
722 	 * - If the read pointer has already wrapper around the end of the
723 	 *   buffer we can
724 	 *    a) either store the entire payload at the writer pointer if
725 	 *       there's enough space until the end,
726 	 *    b) or just store the queue entry at the write pointer to indicate
727 	 *       that we need to wrap to the start and then store the headers
728 	 *       and the payload at the beginning of the buffer. The queue
729 	 *       header has to be store twice in this case.
730 	 * In either case we have to ensure that there's always enough space
731 	 * so that we don't accidentally overwrite other buffers.
732 	 */
733 	if (wptr < rptr) {
734 		/*
735 		 * If wptr < rptr we can't wrap around and only have to make
736 		 * sure that there's enough space for the entire payload.
737 		 */
738 		if (wptr + total_size > rptr) {
739 			ret = -ENOMEM;
740 			goto out;
741 		}
742 
743 		hdr = ep->txbfr.buf + wptr;
744 		wptr += sizeof(*hdr);
745 	} else {
746 		/* We need enough space to place at least a queue entry */
747 		if (wptr + sizeof(*hdr) > ep->txbfr.bufsz) {
748 			ret = -ENOMEM;
749 			goto out;
750 		}
751 
752 		/*
753 		 * If we can place a single queue entry but not the full payload
754 		 * we need to place one queue entry at the end of the ring
755 		 * buffer and then another one together with the entire
756 		 * payload at the beginning.
757 		 */
758 		if (wptr + total_size > ep->txbfr.bufsz) {
759 			/*
760 			 * Ensure there's space for the  queue entry at the
761 			 * beginning
762 			 */
763 			if (sizeof(*hdr) > rptr) {
764 				ret = -ENOMEM;
765 				goto out;
766 			}
767 
768 			/*
769 			 * Place two queue entries to indicate we want to wrap
770 			 * around to the firmware.
771 			 */
772 			hdr = ep->txbfr.buf + wptr;
773 			hdr2 = ep->txbfr.buf;
774 			wptr = sizeof(*hdr);
775 
776 			/* Ensure there's enough space for the entire payload */
777 			if (wptr + total_epic_size > rptr) {
778 				ret = -ENOMEM;
779 				goto out;
780 			}
781 		} else {
782 			/* We have enough space to place the entire payload */
783 			hdr = ep->txbfr.buf + wptr;
784 			wptr += sizeof(*hdr);
785 		}
786 	}
787 	/*
788 	 * At this point we're guaranteed that hdr (and possibly hdr2) point
789 	 * to a buffer large enough to fit the queue entry and that we have
790 	 * enough space at wptr to store the payload.
791 	 */
792 
793 	hdr->magic = cpu_to_le32(QE_MAGIC);
794 	hdr->size = cpu_to_le32(total_epic_size);
795 	hdr->channel = cpu_to_le32(channel);
796 	hdr->type = cpu_to_le32(etype);
797 	if (hdr2)
798 		memcpy(hdr2, hdr, sizeof(*hdr));
799 
800 	ehdr = ep->txbfr.buf + wptr;
801 	memset(ehdr, 0, sizeof(*ehdr));
802 	ehdr->version = 2;
803 	ehdr->seq = cpu_to_le16(ep->qe_seq++);
804 	ehdr->timestamp = cpu_to_le64(0);
805 	wptr += sizeof(*ehdr);
806 
807 	eshdr = ep->txbfr.buf + wptr;
808 	memset(eshdr, 0, sizeof(*eshdr));
809 	eshdr->length = cpu_to_le32(payload_len);
810 	eshdr->version = 4;
811 	eshdr->category = ecat;
812 	eshdr->type = cpu_to_le16(stype);
813 	eshdr->timestamp = cpu_to_le64(0);
814 	eshdr->tag = cpu_to_le16(tag);
815 	if (ecat == EPIC_CAT_REPLY)
816 		eshdr->inline_len = cpu_to_le16(payload_len - 4);
817 	else
818 		eshdr->inline_len = cpu_to_le16(0);
819 	wptr += sizeof(*eshdr);
820 
821 	memcpy(ep->txbfr.buf + wptr, payload, payload_len);
822 	wptr += payload_len;
823 	wptr = ALIGN(wptr, 1 << BLOCK_SHIFT);
824 	if (wptr == ep->txbfr.bufsz)
825 		wptr = 0;
826 	trace_afk_send_rwptr_post(ep, rptr, wptr);
827 
828 	ep->txbfr.hdr->wptr = cpu_to_le32(wptr);
829 	afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_SEND) |
830 			     FIELD_PREP(SEND_WPTR, wptr));
831 	ret = 0;
832 
833 out:
834 	spin_unlock_irqrestore(&ep->lock, flags);
835 	return ret;
836 }
837 
afk_send_command(struct apple_epic_service * service,u8 type,const void * payload,size_t payload_len,void * output,size_t output_len,u32 * retcode)838 int afk_send_command(struct apple_epic_service *service, u8 type,
839 		     const void *payload, size_t payload_len, void *output,
840 		     size_t output_len, u32 *retcode)
841 {
842 	struct epic_cmd cmd;
843 	void *rxbuf, *txbuf;
844 	dma_addr_t rxbuf_dma, txbuf_dma;
845 	unsigned long flags;
846 	int ret, idx;
847 	u16 tag;
848 	struct apple_dcp_afkep *ep = service->ep;
849 	DECLARE_COMPLETION_ONSTACK(completion);
850 
851 	rxbuf = dma_alloc_coherent(ep->dcp->dev, output_len, &rxbuf_dma,
852 				   GFP_KERNEL);
853 	if (!rxbuf)
854 		return -ENOMEM;
855 	txbuf = dma_alloc_coherent(ep->dcp->dev, payload_len, &txbuf_dma,
856 				   GFP_KERNEL);
857 	if (!txbuf) {
858 		ret = -ENOMEM;
859 		goto err_free_rxbuf;
860 	}
861 
862 	memcpy(txbuf, payload, payload_len);
863 
864 	memset(&cmd, 0, sizeof(cmd));
865 	cmd.retcode = cpu_to_le32(0);
866 	cmd.rxbuf = cpu_to_le64(rxbuf_dma);
867 	cmd.rxlen = cpu_to_le32(output_len);
868 	cmd.txbuf = cpu_to_le64(txbuf_dma);
869 	cmd.txlen = cpu_to_le32(payload_len);
870 
871 	spin_lock_irqsave(&service->lock, flags);
872 	idx = bitmap_find_free_region(service->cmd_map, MAX_PENDING_CMDS, 0);
873 	if (idx < 0) {
874 		ret = -ENOSPC;
875 		goto err_unlock;
876 	}
877 
878 	tag = (service->cmd_tag & 0xff) << 8;
879 	tag |= idx & 0xff;
880 	service->cmd_tag++;
881 
882 	service->cmds[idx].tag = tag;
883 	service->cmds[idx].rxbuf = rxbuf;
884 	service->cmds[idx].txbuf = txbuf;
885 	service->cmds[idx].rxbuf_dma = rxbuf_dma;
886 	service->cmds[idx].txbuf_dma = txbuf_dma;
887 	service->cmds[idx].rxlen = output_len;
888 	service->cmds[idx].txlen = payload_len;
889 	service->cmds[idx].free_on_ack = false;
890 	service->cmds[idx].done = false;
891 	service->cmds[idx].completion = &completion;
892 	init_completion(&completion);
893 
894 	spin_unlock_irqrestore(&service->lock, flags);
895 
896 	ret = afk_send_epic(service->ep, service->channel, tag,
897 			    EPIC_TYPE_COMMAND, EPIC_CAT_COMMAND, type, &cmd,
898 			    sizeof(cmd));
899 	if (ret)
900 		goto err_free_cmd;
901 
902 	ret = wait_for_completion_timeout(&completion,
903 					  msecs_to_jiffies(MSEC_PER_SEC));
904 
905 	if (ret <= 0) {
906 		spin_lock_irqsave(&service->lock, flags);
907 		/*
908 		 * Check again while we're inside the lock to make sure
909 		 * the command wasn't completed just after
910 		 * wait_for_completion_timeout returned.
911 		 */
912 		if (!service->cmds[idx].done) {
913 			service->cmds[idx].completion = NULL;
914 			service->cmds[idx].free_on_ack = true;
915 			spin_unlock_irqrestore(&service->lock, flags);
916 			return -ETIMEDOUT;
917 		}
918 		spin_unlock_irqrestore(&service->lock, flags);
919 	}
920 
921 	ret = 0;
922 	if (retcode)
923 		*retcode = service->cmds[idx].retcode;
924 	if (output && output_len)
925 		memcpy(output, rxbuf, output_len);
926 
927 err_free_cmd:
928 	spin_lock_irqsave(&service->lock, flags);
929 	bitmap_release_region(service->cmd_map, idx, 0);
930 err_unlock:
931 	spin_unlock_irqrestore(&service->lock, flags);
932 	dma_free_coherent(ep->dcp->dev, payload_len, txbuf, txbuf_dma);
933 err_free_rxbuf:
934 	dma_free_coherent(ep->dcp->dev, output_len, rxbuf, rxbuf_dma);
935 	return ret;
936 }
937 
afk_service_call(struct apple_epic_service * service,u16 group,u32 command,const void * data,size_t data_len,size_t data_pad,void * output,size_t output_len,size_t output_pad)938 int afk_service_call(struct apple_epic_service *service, u16 group, u32 command,
939 		     const void *data, size_t data_len, size_t data_pad,
940 		     void *output, size_t output_len, size_t output_pad)
941 {
942 	struct epic_service_call *call;
943 	void *bfr;
944 	size_t bfr_len = max(data_len + data_pad, output_len + output_pad) +
945 			 sizeof(*call);
946 	int ret;
947 	u32 retcode;
948 	u32 retlen;
949 
950 	bfr = kzalloc(bfr_len, GFP_KERNEL);
951 	if (!bfr)
952 		return -ENOMEM;
953 
954 	call = bfr;
955 
956 	memset(call, 0, sizeof(*call));
957 	call->group = cpu_to_le16(group);
958 	call->command = cpu_to_le32(command);
959 	call->data_len = cpu_to_le32(data_len + data_pad);
960 	call->magic = cpu_to_le32(EPIC_SERVICE_CALL_MAGIC);
961 
962 	memcpy(bfr + sizeof(*call), data, data_len);
963 
964 	ret = afk_send_command(service, EPIC_SUBTYPE_STD_SERVICE, bfr, bfr_len,
965 			       bfr, bfr_len, &retcode);
966 	if (ret)
967 		goto out;
968 	if (retcode) {
969 		ret = -EINVAL;
970 		goto out;
971 	}
972 	if (le32_to_cpu(call->magic) != EPIC_SERVICE_CALL_MAGIC ||
973 	    le16_to_cpu(call->group) != group ||
974 	    le32_to_cpu(call->command) != command) {
975 		ret = -EINVAL;
976 		goto out;
977 	}
978 
979 	retlen = le32_to_cpu(call->data_len);
980 	if (output_len < retlen)
981 		retlen = output_len;
982 	if (output && output_len) {
983 		memset(output, 0, output_len);
984 		memcpy(output, bfr + sizeof(*call), retlen);
985 	}
986 
987 out:
988 	kfree(bfr);
989 	return ret;
990 }
991