1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Surface Book (gen. 2 and later) detachment system (DTX) driver.
4  *
5  * Provides a user-space interface to properly handle clipboard/tablet
6  * (containing screen and processor) detachment from the base of the device
7  * (containing the keyboard and optionally a discrete GPU). Allows to
8  * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
9  * use), or request detachment via user-space.
10  *
11  * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/input.h>
16 #include <linux/ioctl.h>
17 #include <linux/kernel.h>
18 #include <linux/kfifo.h>
19 #include <linux/kref.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/platform_device.h>
24 #include <linux/poll.h>
25 #include <linux/rwsem.h>
26 #include <linux/slab.h>
27 #include <linux/workqueue.h>
28 
29 #include <linux/surface_aggregator/controller.h>
30 #include <linux/surface_aggregator/device.h>
31 #include <linux/surface_aggregator/dtx.h>
32 
33 
34 /* -- SSAM interface. ------------------------------------------------------- */
35 
36 enum sam_event_cid_bas {
37 	SAM_EVENT_CID_DTX_CONNECTION			= 0x0c,
38 	SAM_EVENT_CID_DTX_REQUEST			= 0x0e,
39 	SAM_EVENT_CID_DTX_CANCEL			= 0x0f,
40 	SAM_EVENT_CID_DTX_LATCH_STATUS			= 0x11,
41 };
42 
43 enum ssam_bas_base_state {
44 	SSAM_BAS_BASE_STATE_DETACH_SUCCESS		= 0x00,
45 	SSAM_BAS_BASE_STATE_ATTACHED			= 0x01,
46 	SSAM_BAS_BASE_STATE_NOT_FEASIBLE		= 0x02,
47 };
48 
49 enum ssam_bas_latch_status {
50 	SSAM_BAS_LATCH_STATUS_CLOSED			= 0x00,
51 	SSAM_BAS_LATCH_STATUS_OPENED			= 0x01,
52 	SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN		= 0x02,
53 	SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN	= 0x03,
54 	SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE		= 0x04,
55 };
56 
57 enum ssam_bas_cancel_reason {
58 	SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE		= 0x00,  /* Low battery. */
59 	SSAM_BAS_CANCEL_REASON_TIMEOUT			= 0x02,
60 	SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN		= 0x03,
61 	SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN	= 0x04,
62 	SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE		= 0x05,
63 };
64 
65 struct ssam_bas_base_info {
66 	u8 state;
67 	u8 base_id;
68 } __packed;
69 
70 static_assert(sizeof(struct ssam_bas_base_info) == 2);
71 
72 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
73 	.target_category = SSAM_SSH_TC_BAS,
74 	.target_id       = 0x01,
75 	.command_id      = 0x06,
76 	.instance_id     = 0x00,
77 });
78 
79 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
80 	.target_category = SSAM_SSH_TC_BAS,
81 	.target_id       = 0x01,
82 	.command_id      = 0x07,
83 	.instance_id     = 0x00,
84 });
85 
86 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
87 	.target_category = SSAM_SSH_TC_BAS,
88 	.target_id       = 0x01,
89 	.command_id      = 0x08,
90 	.instance_id     = 0x00,
91 });
92 
93 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
94 	.target_category = SSAM_SSH_TC_BAS,
95 	.target_id       = 0x01,
96 	.command_id      = 0x09,
97 	.instance_id     = 0x00,
98 });
99 
100 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
101 	.target_category = SSAM_SSH_TC_BAS,
102 	.target_id       = 0x01,
103 	.command_id      = 0x0a,
104 	.instance_id     = 0x00,
105 });
106 
107 SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
108 	.target_category = SSAM_SSH_TC_BAS,
109 	.target_id       = 0x01,
110 	.command_id      = 0x0b,
111 	.instance_id     = 0x00,
112 });
113 
114 SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
115 	.target_category = SSAM_SSH_TC_BAS,
116 	.target_id       = 0x01,
117 	.command_id      = 0x0c,
118 	.instance_id     = 0x00,
119 });
120 
121 SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
122 	.target_category = SSAM_SSH_TC_BAS,
123 	.target_id       = 0x01,
124 	.command_id      = 0x0d,
125 	.instance_id     = 0x00,
126 });
127 
128 SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
129 	.target_category = SSAM_SSH_TC_BAS,
130 	.target_id       = 0x01,
131 	.command_id      = 0x11,
132 	.instance_id     = 0x00,
133 });
134 
135 
136 /* -- Main structures. ------------------------------------------------------ */
137 
138 enum sdtx_device_state {
139 	SDTX_DEVICE_SHUTDOWN_BIT    = BIT(0),
140 	SDTX_DEVICE_DIRTY_BASE_BIT  = BIT(1),
141 	SDTX_DEVICE_DIRTY_MODE_BIT  = BIT(2),
142 	SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
143 };
144 
145 struct sdtx_device {
146 	struct kref kref;
147 	struct rw_semaphore lock;         /* Guards device and controller reference. */
148 
149 	struct device *dev;
150 	struct ssam_controller *ctrl;
151 	unsigned long flags;
152 
153 	struct miscdevice mdev;
154 	wait_queue_head_t waitq;
155 	struct mutex write_lock;          /* Guards order of events/notifications. */
156 	struct rw_semaphore client_lock;  /* Guards client list.                   */
157 	struct list_head client_list;
158 
159 	struct delayed_work state_work;
160 	struct {
161 		struct ssam_bas_base_info base;
162 		u8 device_mode;
163 		u8 latch_status;
164 	} state;
165 
166 	struct delayed_work mode_work;
167 	struct input_dev *mode_switch;
168 
169 	struct ssam_event_notifier notif;
170 };
171 
172 enum sdtx_client_state {
173 	SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
174 };
175 
176 struct sdtx_client {
177 	struct sdtx_device *ddev;
178 	struct list_head node;
179 	unsigned long flags;
180 
181 	struct fasync_struct *fasync;
182 
183 	struct mutex read_lock;           /* Guards FIFO buffer read access. */
184 	DECLARE_KFIFO(buffer, u8, 512);
185 };
186 
__sdtx_device_release(struct kref * kref)187 static void __sdtx_device_release(struct kref *kref)
188 {
189 	struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
190 
191 	mutex_destroy(&ddev->write_lock);
192 	kfree(ddev);
193 }
194 
sdtx_device_get(struct sdtx_device * ddev)195 static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
196 {
197 	if (ddev)
198 		kref_get(&ddev->kref);
199 
200 	return ddev;
201 }
202 
sdtx_device_put(struct sdtx_device * ddev)203 static void sdtx_device_put(struct sdtx_device *ddev)
204 {
205 	if (ddev)
206 		kref_put(&ddev->kref, __sdtx_device_release);
207 }
208 
209 
210 /* -- Firmware value translations. ------------------------------------------ */
211 
sdtx_translate_base_state(struct sdtx_device * ddev,u8 state)212 static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
213 {
214 	switch (state) {
215 	case SSAM_BAS_BASE_STATE_ATTACHED:
216 		return SDTX_BASE_ATTACHED;
217 
218 	case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
219 		return SDTX_BASE_DETACHED;
220 
221 	case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
222 		return SDTX_DETACH_NOT_FEASIBLE;
223 
224 	default:
225 		dev_err(ddev->dev, "unknown base state: %#04x\n", state);
226 		return SDTX_UNKNOWN(state);
227 	}
228 }
229 
sdtx_translate_latch_status(struct sdtx_device * ddev,u8 status)230 static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
231 {
232 	switch (status) {
233 	case SSAM_BAS_LATCH_STATUS_CLOSED:
234 		return SDTX_LATCH_CLOSED;
235 
236 	case SSAM_BAS_LATCH_STATUS_OPENED:
237 		return SDTX_LATCH_OPENED;
238 
239 	case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
240 		return SDTX_ERR_FAILED_TO_OPEN;
241 
242 	case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
243 		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
244 
245 	case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
246 		return SDTX_ERR_FAILED_TO_CLOSE;
247 
248 	default:
249 		dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
250 		return SDTX_UNKNOWN(status);
251 	}
252 }
253 
sdtx_translate_cancel_reason(struct sdtx_device * ddev,u8 reason)254 static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
255 {
256 	switch (reason) {
257 	case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
258 		return SDTX_DETACH_NOT_FEASIBLE;
259 
260 	case SSAM_BAS_CANCEL_REASON_TIMEOUT:
261 		return SDTX_DETACH_TIMEDOUT;
262 
263 	case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
264 		return SDTX_ERR_FAILED_TO_OPEN;
265 
266 	case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
267 		return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
268 
269 	case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
270 		return SDTX_ERR_FAILED_TO_CLOSE;
271 
272 	default:
273 		dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
274 		return SDTX_UNKNOWN(reason);
275 	}
276 }
277 
278 
279 /* -- IOCTLs. --------------------------------------------------------------- */
280 
sdtx_ioctl_get_base_info(struct sdtx_device * ddev,struct sdtx_base_info __user * buf)281 static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
282 				    struct sdtx_base_info __user *buf)
283 {
284 	struct ssam_bas_base_info raw;
285 	struct sdtx_base_info info;
286 	int status;
287 
288 	lockdep_assert_held_read(&ddev->lock);
289 
290 	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
291 	if (status < 0)
292 		return status;
293 
294 	info.state = sdtx_translate_base_state(ddev, raw.state);
295 	info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
296 
297 	if (copy_to_user(buf, &info, sizeof(info)))
298 		return -EFAULT;
299 
300 	return 0;
301 }
302 
sdtx_ioctl_get_device_mode(struct sdtx_device * ddev,u16 __user * buf)303 static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
304 {
305 	u8 mode;
306 	int status;
307 
308 	lockdep_assert_held_read(&ddev->lock);
309 
310 	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
311 	if (status < 0)
312 		return status;
313 
314 	return put_user(mode, buf);
315 }
316 
sdtx_ioctl_get_latch_status(struct sdtx_device * ddev,u16 __user * buf)317 static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
318 {
319 	u8 latch;
320 	int status;
321 
322 	lockdep_assert_held_read(&ddev->lock);
323 
324 	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
325 	if (status < 0)
326 		return status;
327 
328 	return put_user(sdtx_translate_latch_status(ddev, latch), buf);
329 }
330 
__surface_dtx_ioctl(struct sdtx_client * client,unsigned int cmd,unsigned long arg)331 static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
332 {
333 	struct sdtx_device *ddev = client->ddev;
334 
335 	lockdep_assert_held_read(&ddev->lock);
336 
337 	switch (cmd) {
338 	case SDTX_IOCTL_EVENTS_ENABLE:
339 		set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
340 		return 0;
341 
342 	case SDTX_IOCTL_EVENTS_DISABLE:
343 		clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
344 		return 0;
345 
346 	case SDTX_IOCTL_LATCH_LOCK:
347 		return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
348 
349 	case SDTX_IOCTL_LATCH_UNLOCK:
350 		return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
351 
352 	case SDTX_IOCTL_LATCH_REQUEST:
353 		return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
354 
355 	case SDTX_IOCTL_LATCH_CONFIRM:
356 		return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
357 
358 	case SDTX_IOCTL_LATCH_HEARTBEAT:
359 		return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
360 
361 	case SDTX_IOCTL_LATCH_CANCEL:
362 		return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
363 
364 	case SDTX_IOCTL_GET_BASE_INFO:
365 		return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
366 
367 	case SDTX_IOCTL_GET_DEVICE_MODE:
368 		return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
369 
370 	case SDTX_IOCTL_GET_LATCH_STATUS:
371 		return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
372 
373 	default:
374 		return -EINVAL;
375 	}
376 }
377 
surface_dtx_ioctl(struct file * file,unsigned int cmd,unsigned long arg)378 static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
379 {
380 	struct sdtx_client *client = file->private_data;
381 	long status;
382 
383 	if (down_read_killable(&client->ddev->lock))
384 		return -ERESTARTSYS;
385 
386 	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
387 		up_read(&client->ddev->lock);
388 		return -ENODEV;
389 	}
390 
391 	status = __surface_dtx_ioctl(client, cmd, arg);
392 
393 	up_read(&client->ddev->lock);
394 	return status;
395 }
396 
397 
398 /* -- File operations. ------------------------------------------------------ */
399 
surface_dtx_open(struct inode * inode,struct file * file)400 static int surface_dtx_open(struct inode *inode, struct file *file)
401 {
402 	struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
403 	struct sdtx_client *client;
404 
405 	/* Initialize client. */
406 	client = kzalloc(sizeof(*client), GFP_KERNEL);
407 	if (!client)
408 		return -ENOMEM;
409 
410 	client->ddev = sdtx_device_get(ddev);
411 
412 	INIT_LIST_HEAD(&client->node);
413 
414 	mutex_init(&client->read_lock);
415 	INIT_KFIFO(client->buffer);
416 
417 	file->private_data = client;
418 
419 	/* Attach client. */
420 	down_write(&ddev->client_lock);
421 
422 	/*
423 	 * Do not add a new client if the device has been shut down. Note that
424 	 * it's enough to hold the client_lock here as, during shutdown, we
425 	 * only acquire that lock and remove clients after marking the device
426 	 * as shut down.
427 	 */
428 	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
429 		up_write(&ddev->client_lock);
430 		sdtx_device_put(client->ddev);
431 		kfree(client);
432 		return -ENODEV;
433 	}
434 
435 	list_add_tail(&client->node, &ddev->client_list);
436 	up_write(&ddev->client_lock);
437 
438 	stream_open(inode, file);
439 	return 0;
440 }
441 
surface_dtx_release(struct inode * inode,struct file * file)442 static int surface_dtx_release(struct inode *inode, struct file *file)
443 {
444 	struct sdtx_client *client = file->private_data;
445 
446 	/* Detach client. */
447 	down_write(&client->ddev->client_lock);
448 	list_del(&client->node);
449 	up_write(&client->ddev->client_lock);
450 
451 	/* Free client. */
452 	sdtx_device_put(client->ddev);
453 	mutex_destroy(&client->read_lock);
454 	kfree(client);
455 
456 	return 0;
457 }
458 
surface_dtx_read(struct file * file,char __user * buf,size_t count,loff_t * offs)459 static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
460 {
461 	struct sdtx_client *client = file->private_data;
462 	struct sdtx_device *ddev = client->ddev;
463 	unsigned int copied;
464 	int status = 0;
465 
466 	if (down_read_killable(&ddev->lock))
467 		return -ERESTARTSYS;
468 
469 	/* Make sure we're not shut down. */
470 	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
471 		up_read(&ddev->lock);
472 		return -ENODEV;
473 	}
474 
475 	do {
476 		/* Check availability, wait if necessary. */
477 		if (kfifo_is_empty(&client->buffer)) {
478 			up_read(&ddev->lock);
479 
480 			if (file->f_flags & O_NONBLOCK)
481 				return -EAGAIN;
482 
483 			status = wait_event_interruptible(ddev->waitq,
484 							  !kfifo_is_empty(&client->buffer) ||
485 							  test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
486 								   &ddev->flags));
487 			if (status < 0)
488 				return status;
489 
490 			if (down_read_killable(&ddev->lock))
491 				return -ERESTARTSYS;
492 
493 			/* Need to check that we're not shut down again. */
494 			if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
495 				up_read(&ddev->lock);
496 				return -ENODEV;
497 			}
498 		}
499 
500 		/* Try to read from FIFO. */
501 		if (mutex_lock_interruptible(&client->read_lock)) {
502 			up_read(&ddev->lock);
503 			return -ERESTARTSYS;
504 		}
505 
506 		status = kfifo_to_user(&client->buffer, buf, count, &copied);
507 		mutex_unlock(&client->read_lock);
508 
509 		if (status < 0) {
510 			up_read(&ddev->lock);
511 			return status;
512 		}
513 
514 		/* We might not have gotten anything, check this here. */
515 		if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
516 			up_read(&ddev->lock);
517 			return -EAGAIN;
518 		}
519 	} while (copied == 0);
520 
521 	up_read(&ddev->lock);
522 	return copied;
523 }
524 
surface_dtx_poll(struct file * file,struct poll_table_struct * pt)525 static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
526 {
527 	struct sdtx_client *client = file->private_data;
528 	__poll_t events = 0;
529 
530 	if (down_read_killable(&client->ddev->lock))
531 		return -ERESTARTSYS;
532 
533 	if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
534 		up_read(&client->ddev->lock);
535 		return EPOLLHUP | EPOLLERR;
536 	}
537 
538 	poll_wait(file, &client->ddev->waitq, pt);
539 
540 	if (!kfifo_is_empty(&client->buffer))
541 		events |= EPOLLIN | EPOLLRDNORM;
542 
543 	up_read(&client->ddev->lock);
544 	return events;
545 }
546 
surface_dtx_fasync(int fd,struct file * file,int on)547 static int surface_dtx_fasync(int fd, struct file *file, int on)
548 {
549 	struct sdtx_client *client = file->private_data;
550 
551 	return fasync_helper(fd, file, on, &client->fasync);
552 }
553 
554 static const struct file_operations surface_dtx_fops = {
555 	.owner          = THIS_MODULE,
556 	.open           = surface_dtx_open,
557 	.release        = surface_dtx_release,
558 	.read           = surface_dtx_read,
559 	.poll           = surface_dtx_poll,
560 	.fasync         = surface_dtx_fasync,
561 	.unlocked_ioctl = surface_dtx_ioctl,
562 	.compat_ioctl   = surface_dtx_ioctl,
563 	.llseek         = no_llseek,
564 };
565 
566 
567 /* -- Event handling/forwarding. -------------------------------------------- */
568 
569 /*
570  * The device operation mode is not immediately updated on the EC when the
571  * base has been connected, i.e. querying the device mode inside the
572  * connection event callback yields an outdated value. Thus, we can only
573  * determine the new tablet-mode switch and device mode values after some
574  * time.
575  *
576  * These delays have been chosen by experimenting. We first delay on connect
577  * events, then check and validate the device mode against the base state and
578  * if invalid delay again by the "recheck" delay.
579  */
580 #define SDTX_DEVICE_MODE_DELAY_CONNECT	msecs_to_jiffies(100)
581 #define SDTX_DEVICE_MODE_DELAY_RECHECK	msecs_to_jiffies(100)
582 
583 struct sdtx_status_event {
584 	struct sdtx_event e;
585 	__u16 v;
586 } __packed;
587 
588 struct sdtx_base_info_event {
589 	struct sdtx_event e;
590 	struct sdtx_base_info v;
591 } __packed;
592 
593 union sdtx_generic_event {
594 	struct sdtx_event common;
595 	struct sdtx_status_event status;
596 	struct sdtx_base_info_event base;
597 };
598 
599 static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
600 
601 /* Must be executed with ddev->write_lock held. */
sdtx_push_event(struct sdtx_device * ddev,struct sdtx_event * evt)602 static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
603 {
604 	const size_t len = sizeof(struct sdtx_event) + evt->length;
605 	struct sdtx_client *client;
606 
607 	lockdep_assert_held(&ddev->write_lock);
608 
609 	down_read(&ddev->client_lock);
610 	list_for_each_entry(client, &ddev->client_list, node) {
611 		if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
612 			continue;
613 
614 		if (likely(kfifo_avail(&client->buffer) >= len))
615 			kfifo_in(&client->buffer, (const u8 *)evt, len);
616 		else
617 			dev_warn(ddev->dev, "event buffer overrun\n");
618 
619 		kill_fasync(&client->fasync, SIGIO, POLL_IN);
620 	}
621 	up_read(&ddev->client_lock);
622 
623 	wake_up_interruptible(&ddev->waitq);
624 }
625 
sdtx_notifier(struct ssam_event_notifier * nf,const struct ssam_event * in)626 static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
627 {
628 	struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
629 	union sdtx_generic_event event;
630 	size_t len;
631 
632 	/* Validate event payload length. */
633 	switch (in->command_id) {
634 	case SAM_EVENT_CID_DTX_CONNECTION:
635 		len = 2 * sizeof(u8);
636 		break;
637 
638 	case SAM_EVENT_CID_DTX_REQUEST:
639 		len = 0;
640 		break;
641 
642 	case SAM_EVENT_CID_DTX_CANCEL:
643 		len = sizeof(u8);
644 		break;
645 
646 	case SAM_EVENT_CID_DTX_LATCH_STATUS:
647 		len = sizeof(u8);
648 		break;
649 
650 	default:
651 		return 0;
652 	}
653 
654 	if (in->length != len) {
655 		dev_err(ddev->dev,
656 			"unexpected payload size for event %#04x: got %u, expected %zu\n",
657 			in->command_id, in->length, len);
658 		return 0;
659 	}
660 
661 	mutex_lock(&ddev->write_lock);
662 
663 	/* Translate event. */
664 	switch (in->command_id) {
665 	case SAM_EVENT_CID_DTX_CONNECTION:
666 		clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
667 
668 		/* If state has not changed: do not send new event. */
669 		if (ddev->state.base.state == in->data[0] &&
670 		    ddev->state.base.base_id == in->data[1])
671 			goto out;
672 
673 		ddev->state.base.state = in->data[0];
674 		ddev->state.base.base_id = in->data[1];
675 
676 		event.base.e.length = sizeof(struct sdtx_base_info);
677 		event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
678 		event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
679 		event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
680 		break;
681 
682 	case SAM_EVENT_CID_DTX_REQUEST:
683 		event.common.code = SDTX_EVENT_REQUEST;
684 		event.common.length = 0;
685 		break;
686 
687 	case SAM_EVENT_CID_DTX_CANCEL:
688 		event.status.e.length = sizeof(u16);
689 		event.status.e.code = SDTX_EVENT_CANCEL;
690 		event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
691 		break;
692 
693 	case SAM_EVENT_CID_DTX_LATCH_STATUS:
694 		clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
695 
696 		/* If state has not changed: do not send new event. */
697 		if (ddev->state.latch_status == in->data[0])
698 			goto out;
699 
700 		ddev->state.latch_status = in->data[0];
701 
702 		event.status.e.length = sizeof(u16);
703 		event.status.e.code = SDTX_EVENT_LATCH_STATUS;
704 		event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
705 		break;
706 	}
707 
708 	sdtx_push_event(ddev, &event.common);
709 
710 	/* Update device mode on base connection change. */
711 	if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
712 		unsigned long delay;
713 
714 		delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
715 		sdtx_update_device_mode(ddev, delay);
716 	}
717 
718 out:
719 	mutex_unlock(&ddev->write_lock);
720 	return SSAM_NOTIF_HANDLED;
721 }
722 
723 
724 /* -- State update functions. ----------------------------------------------- */
725 
sdtx_device_mode_invalid(u8 mode,u8 base_state)726 static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
727 {
728 	return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
729 		(mode == SDTX_DEVICE_MODE_TABLET)) ||
730 	       ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
731 		(mode != SDTX_DEVICE_MODE_TABLET));
732 }
733 
sdtx_device_mode_workfn(struct work_struct * work)734 static void sdtx_device_mode_workfn(struct work_struct *work)
735 {
736 	struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
737 	struct sdtx_status_event event;
738 	struct ssam_bas_base_info base;
739 	int status, tablet;
740 	u8 mode;
741 
742 	/* Get operation mode. */
743 	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
744 	if (status) {
745 		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
746 		return;
747 	}
748 
749 	/* Get base info. */
750 	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
751 	if (status) {
752 		dev_err(ddev->dev, "failed to get base info: %d\n", status);
753 		return;
754 	}
755 
756 	/*
757 	 * In some cases (specifically when attaching the base), the device
758 	 * mode isn't updated right away. Thus we check if the device mode
759 	 * makes sense for the given base state and try again later if it
760 	 * doesn't.
761 	 */
762 	if (sdtx_device_mode_invalid(mode, base.state)) {
763 		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
764 		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
765 		return;
766 	}
767 
768 	mutex_lock(&ddev->write_lock);
769 	clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
770 
771 	/* Avoid sending duplicate device-mode events. */
772 	if (ddev->state.device_mode == mode) {
773 		mutex_unlock(&ddev->write_lock);
774 		return;
775 	}
776 
777 	ddev->state.device_mode = mode;
778 
779 	event.e.length = sizeof(u16);
780 	event.e.code = SDTX_EVENT_DEVICE_MODE;
781 	event.v = mode;
782 
783 	sdtx_push_event(ddev, &event.e);
784 
785 	/* Send SW_TABLET_MODE event. */
786 	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
787 	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
788 	input_sync(ddev->mode_switch);
789 
790 	mutex_unlock(&ddev->write_lock);
791 }
792 
sdtx_update_device_mode(struct sdtx_device * ddev,unsigned long delay)793 static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
794 {
795 	schedule_delayed_work(&ddev->mode_work, delay);
796 }
797 
798 /* Must be executed with ddev->write_lock held. */
__sdtx_device_state_update_base(struct sdtx_device * ddev,struct ssam_bas_base_info info)799 static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
800 					    struct ssam_bas_base_info info)
801 {
802 	struct sdtx_base_info_event event;
803 
804 	lockdep_assert_held(&ddev->write_lock);
805 
806 	/* Prevent duplicate events. */
807 	if (ddev->state.base.state == info.state &&
808 	    ddev->state.base.base_id == info.base_id)
809 		return;
810 
811 	ddev->state.base = info;
812 
813 	event.e.length = sizeof(struct sdtx_base_info);
814 	event.e.code = SDTX_EVENT_BASE_CONNECTION;
815 	event.v.state = sdtx_translate_base_state(ddev, info.state);
816 	event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
817 
818 	sdtx_push_event(ddev, &event.e);
819 }
820 
821 /* Must be executed with ddev->write_lock held. */
__sdtx_device_state_update_mode(struct sdtx_device * ddev,u8 mode)822 static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
823 {
824 	struct sdtx_status_event event;
825 	int tablet;
826 
827 	/*
828 	 * Note: This function must be called after updating the base state
829 	 * via __sdtx_device_state_update_base(), as we rely on the updated
830 	 * base state value in the validity check below.
831 	 */
832 
833 	lockdep_assert_held(&ddev->write_lock);
834 
835 	if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
836 		dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
837 		sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
838 		return;
839 	}
840 
841 	/* Prevent duplicate events. */
842 	if (ddev->state.device_mode == mode)
843 		return;
844 
845 	ddev->state.device_mode = mode;
846 
847 	/* Send event. */
848 	event.e.length = sizeof(u16);
849 	event.e.code = SDTX_EVENT_DEVICE_MODE;
850 	event.v = mode;
851 
852 	sdtx_push_event(ddev, &event.e);
853 
854 	/* Send SW_TABLET_MODE event. */
855 	tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
856 	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
857 	input_sync(ddev->mode_switch);
858 }
859 
860 /* Must be executed with ddev->write_lock held. */
__sdtx_device_state_update_latch(struct sdtx_device * ddev,u8 status)861 static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
862 {
863 	struct sdtx_status_event event;
864 
865 	lockdep_assert_held(&ddev->write_lock);
866 
867 	/* Prevent duplicate events. */
868 	if (ddev->state.latch_status == status)
869 		return;
870 
871 	ddev->state.latch_status = status;
872 
873 	event.e.length = sizeof(struct sdtx_base_info);
874 	event.e.code = SDTX_EVENT_BASE_CONNECTION;
875 	event.v = sdtx_translate_latch_status(ddev, status);
876 
877 	sdtx_push_event(ddev, &event.e);
878 }
879 
sdtx_device_state_workfn(struct work_struct * work)880 static void sdtx_device_state_workfn(struct work_struct *work)
881 {
882 	struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
883 	struct ssam_bas_base_info base;
884 	u8 mode, latch;
885 	int status;
886 
887 	/* Mark everything as dirty. */
888 	set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
889 	set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
890 	set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
891 
892 	/*
893 	 * Ensure that the state gets marked as dirty before continuing to
894 	 * query it. Necessary to ensure that clear_bit() calls in
895 	 * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
896 	 * bits if an event is received while updating the state here.
897 	 */
898 	smp_mb__after_atomic();
899 
900 	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
901 	if (status) {
902 		dev_err(ddev->dev, "failed to get base state: %d\n", status);
903 		return;
904 	}
905 
906 	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
907 	if (status) {
908 		dev_err(ddev->dev, "failed to get device mode: %d\n", status);
909 		return;
910 	}
911 
912 	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
913 	if (status) {
914 		dev_err(ddev->dev, "failed to get latch status: %d\n", status);
915 		return;
916 	}
917 
918 	mutex_lock(&ddev->write_lock);
919 
920 	/*
921 	 * If the respective dirty-bit has been cleared, an event has been
922 	 * received, updating this state. The queried state may thus be out of
923 	 * date. At this point, we can safely assume that the state provided
924 	 * by the event is either up to date, or we're about to receive
925 	 * another event updating it.
926 	 */
927 
928 	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
929 		__sdtx_device_state_update_base(ddev, base);
930 
931 	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
932 		__sdtx_device_state_update_mode(ddev, mode);
933 
934 	if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
935 		__sdtx_device_state_update_latch(ddev, latch);
936 
937 	mutex_unlock(&ddev->write_lock);
938 }
939 
sdtx_update_device_state(struct sdtx_device * ddev,unsigned long delay)940 static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
941 {
942 	schedule_delayed_work(&ddev->state_work, delay);
943 }
944 
945 
946 /* -- Common device initialization. ----------------------------------------- */
947 
sdtx_device_init(struct sdtx_device * ddev,struct device * dev,struct ssam_controller * ctrl)948 static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
949 			    struct ssam_controller *ctrl)
950 {
951 	int status, tablet_mode;
952 
953 	/* Basic initialization. */
954 	kref_init(&ddev->kref);
955 	init_rwsem(&ddev->lock);
956 	ddev->dev = dev;
957 	ddev->ctrl = ctrl;
958 
959 	ddev->mdev.minor = MISC_DYNAMIC_MINOR;
960 	ddev->mdev.name = "surface_dtx";
961 	ddev->mdev.nodename = "surface/dtx";
962 	ddev->mdev.fops = &surface_dtx_fops;
963 
964 	ddev->notif.base.priority = 1;
965 	ddev->notif.base.fn = sdtx_notifier;
966 	ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
967 	ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
968 	ddev->notif.event.id.instance = 0;
969 	ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
970 	ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
971 
972 	init_waitqueue_head(&ddev->waitq);
973 	mutex_init(&ddev->write_lock);
974 	init_rwsem(&ddev->client_lock);
975 	INIT_LIST_HEAD(&ddev->client_list);
976 
977 	INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
978 	INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
979 
980 	/*
981 	 * Get current device state. We want to guarantee that events are only
982 	 * sent when state actually changes. Thus we cannot use special
983 	 * "uninitialized" values, as that would cause problems when manually
984 	 * querying the state in surface_dtx_pm_complete(). I.e. we would not
985 	 * be able to detect state changes there if no change event has been
986 	 * received between driver initialization and first device suspension.
987 	 *
988 	 * Note that we also need to do this before registering the event
989 	 * notifier, as that may access the state values.
990 	 */
991 	status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
992 	if (status)
993 		return status;
994 
995 	status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
996 	if (status)
997 		return status;
998 
999 	status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
1000 	if (status)
1001 		return status;
1002 
1003 	/* Set up tablet mode switch. */
1004 	ddev->mode_switch = input_allocate_device();
1005 	if (!ddev->mode_switch)
1006 		return -ENOMEM;
1007 
1008 	ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
1009 	ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
1010 	ddev->mode_switch->id.bustype = BUS_HOST;
1011 	ddev->mode_switch->dev.parent = ddev->dev;
1012 
1013 	tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
1014 	input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
1015 	input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
1016 
1017 	status = input_register_device(ddev->mode_switch);
1018 	if (status) {
1019 		input_free_device(ddev->mode_switch);
1020 		return status;
1021 	}
1022 
1023 	/* Set up event notifier. */
1024 	status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
1025 	if (status)
1026 		goto err_notif;
1027 
1028 	/* Register miscdevice. */
1029 	status = misc_register(&ddev->mdev);
1030 	if (status)
1031 		goto err_mdev;
1032 
1033 	/*
1034 	 * Update device state in case it has changed between getting the
1035 	 * initial mode and registering the event notifier.
1036 	 */
1037 	sdtx_update_device_state(ddev, 0);
1038 	return 0;
1039 
1040 err_notif:
1041 	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1042 	cancel_delayed_work_sync(&ddev->mode_work);
1043 err_mdev:
1044 	input_unregister_device(ddev->mode_switch);
1045 	return status;
1046 }
1047 
sdtx_device_create(struct device * dev,struct ssam_controller * ctrl)1048 static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
1049 {
1050 	struct sdtx_device *ddev;
1051 	int status;
1052 
1053 	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
1054 	if (!ddev)
1055 		return ERR_PTR(-ENOMEM);
1056 
1057 	status = sdtx_device_init(ddev, dev, ctrl);
1058 	if (status) {
1059 		sdtx_device_put(ddev);
1060 		return ERR_PTR(status);
1061 	}
1062 
1063 	return ddev;
1064 }
1065 
sdtx_device_destroy(struct sdtx_device * ddev)1066 static void sdtx_device_destroy(struct sdtx_device *ddev)
1067 {
1068 	struct sdtx_client *client;
1069 
1070 	/*
1071 	 * Mark device as shut-down. Prevent new clients from being added and
1072 	 * new operations from being executed.
1073 	 */
1074 	set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
1075 
1076 	/* Disable notifiers, prevent new events from arriving. */
1077 	ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
1078 
1079 	/* Stop mode_work, prevent access to mode_switch. */
1080 	cancel_delayed_work_sync(&ddev->mode_work);
1081 
1082 	/* Stop state_work. */
1083 	cancel_delayed_work_sync(&ddev->state_work);
1084 
1085 	/* With mode_work canceled, we can unregister the mode_switch. */
1086 	input_unregister_device(ddev->mode_switch);
1087 
1088 	/* Wake up async clients. */
1089 	down_write(&ddev->client_lock);
1090 	list_for_each_entry(client, &ddev->client_list, node) {
1091 		kill_fasync(&client->fasync, SIGIO, POLL_HUP);
1092 	}
1093 	up_write(&ddev->client_lock);
1094 
1095 	/* Wake up blocking clients. */
1096 	wake_up_interruptible(&ddev->waitq);
1097 
1098 	/*
1099 	 * Wait for clients to finish their current operation. After this, the
1100 	 * controller and device references are guaranteed to be no longer in
1101 	 * use.
1102 	 */
1103 	down_write(&ddev->lock);
1104 	ddev->dev = NULL;
1105 	ddev->ctrl = NULL;
1106 	up_write(&ddev->lock);
1107 
1108 	/* Finally remove the misc-device. */
1109 	misc_deregister(&ddev->mdev);
1110 
1111 	/*
1112 	 * We're now guaranteed that sdtx_device_open() won't be called any
1113 	 * more, so we can now drop out reference.
1114 	 */
1115 	sdtx_device_put(ddev);
1116 }
1117 
1118 
1119 /* -- PM ops. --------------------------------------------------------------- */
1120 
1121 #ifdef CONFIG_PM_SLEEP
1122 
surface_dtx_pm_complete(struct device * dev)1123 static void surface_dtx_pm_complete(struct device *dev)
1124 {
1125 	struct sdtx_device *ddev = dev_get_drvdata(dev);
1126 
1127 	/*
1128 	 * Normally, the EC will store events while suspended (i.e. in
1129 	 * display-off state) and release them when resumed (i.e. transitioned
1130 	 * to display-on state). During hibernation, however, the EC will be
1131 	 * shut down and does not store events. Furthermore, events might be
1132 	 * dropped during prolonged suspension (it is currently unknown how
1133 	 * big this event buffer is and how it behaves on overruns).
1134 	 *
1135 	 * To prevent any problems, we update the device state here. We do
1136 	 * this delayed to ensure that any events sent by the EC directly
1137 	 * after resuming will be handled first. The delay below has been
1138 	 * chosen (experimentally), so that there should be ample time for
1139 	 * these events to be handled, before we check and, if necessary,
1140 	 * update the state.
1141 	 */
1142 	sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
1143 }
1144 
1145 static const struct dev_pm_ops surface_dtx_pm_ops = {
1146 	.complete = surface_dtx_pm_complete,
1147 };
1148 
1149 #else /* CONFIG_PM_SLEEP */
1150 
1151 static const struct dev_pm_ops surface_dtx_pm_ops = {};
1152 
1153 #endif /* CONFIG_PM_SLEEP */
1154 
1155 
1156 /* -- Platform driver. ------------------------------------------------------ */
1157 
surface_dtx_platform_probe(struct platform_device * pdev)1158 static int surface_dtx_platform_probe(struct platform_device *pdev)
1159 {
1160 	struct ssam_controller *ctrl;
1161 	struct sdtx_device *ddev;
1162 
1163 	/* Link to EC. */
1164 	ctrl = ssam_client_bind(&pdev->dev);
1165 	if (IS_ERR(ctrl))
1166 		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
1167 
1168 	ddev = sdtx_device_create(&pdev->dev, ctrl);
1169 	if (IS_ERR(ddev))
1170 		return PTR_ERR(ddev);
1171 
1172 	platform_set_drvdata(pdev, ddev);
1173 	return 0;
1174 }
1175 
surface_dtx_platform_remove(struct platform_device * pdev)1176 static int surface_dtx_platform_remove(struct platform_device *pdev)
1177 {
1178 	sdtx_device_destroy(platform_get_drvdata(pdev));
1179 	return 0;
1180 }
1181 
1182 static const struct acpi_device_id surface_dtx_acpi_match[] = {
1183 	{ "MSHW0133", 0 },
1184 	{ },
1185 };
1186 MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
1187 
1188 static struct platform_driver surface_dtx_platform_driver = {
1189 	.probe = surface_dtx_platform_probe,
1190 	.remove = surface_dtx_platform_remove,
1191 	.driver = {
1192 		.name = "surface_dtx_pltf",
1193 		.acpi_match_table = surface_dtx_acpi_match,
1194 		.pm = &surface_dtx_pm_ops,
1195 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1196 	},
1197 };
1198 
1199 
1200 /* -- SSAM device driver. --------------------------------------------------- */
1201 
1202 #ifdef CONFIG_SURFACE_AGGREGATOR_BUS
1203 
surface_dtx_ssam_probe(struct ssam_device * sdev)1204 static int surface_dtx_ssam_probe(struct ssam_device *sdev)
1205 {
1206 	struct sdtx_device *ddev;
1207 
1208 	ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
1209 	if (IS_ERR(ddev))
1210 		return PTR_ERR(ddev);
1211 
1212 	ssam_device_set_drvdata(sdev, ddev);
1213 	return 0;
1214 }
1215 
surface_dtx_ssam_remove(struct ssam_device * sdev)1216 static void surface_dtx_ssam_remove(struct ssam_device *sdev)
1217 {
1218 	sdtx_device_destroy(ssam_device_get_drvdata(sdev));
1219 }
1220 
1221 static const struct ssam_device_id surface_dtx_ssam_match[] = {
1222 	{ SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
1223 	{ },
1224 };
1225 MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
1226 
1227 static struct ssam_device_driver surface_dtx_ssam_driver = {
1228 	.probe = surface_dtx_ssam_probe,
1229 	.remove = surface_dtx_ssam_remove,
1230 	.match_table = surface_dtx_ssam_match,
1231 	.driver = {
1232 		.name = "surface_dtx",
1233 		.pm = &surface_dtx_pm_ops,
1234 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1235 	},
1236 };
1237 
ssam_dtx_driver_register(void)1238 static int ssam_dtx_driver_register(void)
1239 {
1240 	return ssam_device_driver_register(&surface_dtx_ssam_driver);
1241 }
1242 
ssam_dtx_driver_unregister(void)1243 static void ssam_dtx_driver_unregister(void)
1244 {
1245 	ssam_device_driver_unregister(&surface_dtx_ssam_driver);
1246 }
1247 
1248 #else /* CONFIG_SURFACE_AGGREGATOR_BUS */
1249 
ssam_dtx_driver_register(void)1250 static int ssam_dtx_driver_register(void)
1251 {
1252 	return 0;
1253 }
1254 
ssam_dtx_driver_unregister(void)1255 static void ssam_dtx_driver_unregister(void)
1256 {
1257 }
1258 
1259 #endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
1260 
1261 
1262 /* -- Module setup. --------------------------------------------------------- */
1263 
surface_dtx_init(void)1264 static int __init surface_dtx_init(void)
1265 {
1266 	int status;
1267 
1268 	status = ssam_dtx_driver_register();
1269 	if (status)
1270 		return status;
1271 
1272 	status = platform_driver_register(&surface_dtx_platform_driver);
1273 	if (status)
1274 		ssam_dtx_driver_unregister();
1275 
1276 	return status;
1277 }
1278 module_init(surface_dtx_init);
1279 
surface_dtx_exit(void)1280 static void __exit surface_dtx_exit(void)
1281 {
1282 	platform_driver_unregister(&surface_dtx_platform_driver);
1283 	ssam_dtx_driver_unregister();
1284 }
1285 module_exit(surface_dtx_exit);
1286 
1287 MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
1288 MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
1289 MODULE_LICENSE("GPL");
1290