1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
4  */
5 
6 #include <linux/miscdevice.h>
7 #include <linux/init.h>
8 #include <linux/wait.h>
9 #include <linux/file.h>
10 #include <linux/fs.h>
11 #include <linux/poll.h>
12 #include <linux/signal.h>
13 #include <linux/spinlock.h>
14 #include <linux/dlm.h>
15 #include <linux/dlm_device.h>
16 #include <linux/slab.h>
17 #include <linux/sched/signal.h>
18 
19 #include "dlm_internal.h"
20 #include "lockspace.h"
21 #include "lock.h"
22 #include "lvb_table.h"
23 #include "user.h"
24 #include "ast.h"
25 #include "config.h"
26 
27 static const char name_prefix[] = "dlm";
28 static const struct file_operations device_fops;
29 static atomic_t dlm_monitor_opened;
30 static int dlm_monitor_unused = 1;
31 
32 #ifdef CONFIG_COMPAT
33 
34 struct dlm_lock_params32 {
35 	__u8 mode;
36 	__u8 namelen;
37 	__u16 unused;
38 	__u32 flags;
39 	__u32 lkid;
40 	__u32 parent;
41 	__u64 xid;
42 	__u64 timeout;
43 	__u32 castparam;
44 	__u32 castaddr;
45 	__u32 bastparam;
46 	__u32 bastaddr;
47 	__u32 lksb;
48 	char lvb[DLM_USER_LVB_LEN];
49 	char name[];
50 };
51 
52 struct dlm_write_request32 {
53 	__u32 version[3];
54 	__u8 cmd;
55 	__u8 is64bit;
56 	__u8 unused[2];
57 
58 	union  {
59 		struct dlm_lock_params32 lock;
60 		struct dlm_lspace_params lspace;
61 		struct dlm_purge_params purge;
62 	} i;
63 };
64 
65 struct dlm_lksb32 {
66 	__u32 sb_status;
67 	__u32 sb_lkid;
68 	__u8 sb_flags;
69 	__u32 sb_lvbptr;
70 };
71 
72 struct dlm_lock_result32 {
73 	__u32 version[3];
74 	__u32 length;
75 	__u32 user_astaddr;
76 	__u32 user_astparam;
77 	__u32 user_lksb;
78 	struct dlm_lksb32 lksb;
79 	__u8 bast_mode;
80 	__u8 unused[3];
81 	/* Offsets may be zero if no data is present */
82 	__u32 lvb_offset;
83 };
84 
compat_input(struct dlm_write_request * kb,struct dlm_write_request32 * kb32,int namelen)85 static void compat_input(struct dlm_write_request *kb,
86 			 struct dlm_write_request32 *kb32,
87 			 int namelen)
88 {
89 	kb->version[0] = kb32->version[0];
90 	kb->version[1] = kb32->version[1];
91 	kb->version[2] = kb32->version[2];
92 
93 	kb->cmd = kb32->cmd;
94 	kb->is64bit = kb32->is64bit;
95 	if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
96 	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
97 		kb->i.lspace.flags = kb32->i.lspace.flags;
98 		kb->i.lspace.minor = kb32->i.lspace.minor;
99 		memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
100 	} else if (kb->cmd == DLM_USER_PURGE) {
101 		kb->i.purge.nodeid = kb32->i.purge.nodeid;
102 		kb->i.purge.pid = kb32->i.purge.pid;
103 	} else {
104 		kb->i.lock.mode = kb32->i.lock.mode;
105 		kb->i.lock.namelen = kb32->i.lock.namelen;
106 		kb->i.lock.flags = kb32->i.lock.flags;
107 		kb->i.lock.lkid = kb32->i.lock.lkid;
108 		kb->i.lock.parent = kb32->i.lock.parent;
109 		kb->i.lock.xid = kb32->i.lock.xid;
110 		kb->i.lock.timeout = kb32->i.lock.timeout;
111 		kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
112 		kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
113 		kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
114 		kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
115 		kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
116 		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
117 		memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
118 	}
119 }
120 
compat_output(struct dlm_lock_result * res,struct dlm_lock_result32 * res32)121 static void compat_output(struct dlm_lock_result *res,
122 			  struct dlm_lock_result32 *res32)
123 {
124 	memset(res32, 0, sizeof(*res32));
125 
126 	res32->version[0] = res->version[0];
127 	res32->version[1] = res->version[1];
128 	res32->version[2] = res->version[2];
129 
130 	res32->user_astaddr = (__u32)(long)res->user_astaddr;
131 	res32->user_astparam = (__u32)(long)res->user_astparam;
132 	res32->user_lksb = (__u32)(long)res->user_lksb;
133 	res32->bast_mode = res->bast_mode;
134 
135 	res32->lvb_offset = res->lvb_offset;
136 	res32->length = res->length;
137 
138 	res32->lksb.sb_status = res->lksb.sb_status;
139 	res32->lksb.sb_flags = res->lksb.sb_flags;
140 	res32->lksb.sb_lkid = res->lksb.sb_lkid;
141 	res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
142 }
143 #endif
144 
145 /* Figure out if this lock is at the end of its life and no longer
146    available for the application to use.  The lkb still exists until
147    the final ast is read.  A lock becomes EOL in three situations:
148      1. a noqueue request fails with EAGAIN
149      2. an unlock completes with EUNLOCK
150      3. a cancel of a waiting request completes with ECANCEL/EDEADLK
151    An EOL lock needs to be removed from the process's list of locks.
152    And we can't allow any new operation on an EOL lock.  This is
153    not related to the lifetime of the lkb struct which is managed
154    entirely by refcount. */
155 
lkb_is_endoflife(int mode,int status)156 static int lkb_is_endoflife(int mode, int status)
157 {
158 	switch (status) {
159 	case -DLM_EUNLOCK:
160 		return 1;
161 	case -DLM_ECANCEL:
162 	case -ETIMEDOUT:
163 	case -EDEADLK:
164 	case -EAGAIN:
165 		if (mode == DLM_LOCK_IV)
166 			return 1;
167 		break;
168 	}
169 	return 0;
170 }
171 
172 /* we could possibly check if the cancel of an orphan has resulted in the lkb
173    being removed and then remove that lkb from the orphans list and free it */
174 
dlm_user_add_ast(struct dlm_lkb * lkb,uint32_t flags,int mode,int status,uint32_t sbflags,uint64_t seq)175 void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
176 		      int status, uint32_t sbflags, uint64_t seq)
177 {
178 	struct dlm_ls *ls;
179 	struct dlm_user_args *ua;
180 	struct dlm_user_proc *proc;
181 	int rv;
182 
183 	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
184 		return;
185 
186 	ls = lkb->lkb_resource->res_ls;
187 	mutex_lock(&ls->ls_clear_proc_locks);
188 
189 	/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
190 	   can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
191 	   lkb->ua so we can't try to use it.  This second check is necessary
192 	   for cases where a completion ast is received for an operation that
193 	   began before clear_proc_locks did its cancel/unlock. */
194 
195 	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
196 		goto out;
197 
198 	DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
199 	ua = lkb->lkb_ua;
200 	proc = ua->proc;
201 
202 	if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
203 		goto out;
204 
205 	if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
206 		lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
207 
208 	spin_lock(&proc->asts_spin);
209 
210 	rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
211 	if (rv < 0) {
212 		spin_unlock(&proc->asts_spin);
213 		goto out;
214 	}
215 
216 	if (list_empty(&lkb->lkb_cb_list)) {
217 		kref_get(&lkb->lkb_ref);
218 		list_add_tail(&lkb->lkb_cb_list, &proc->asts);
219 		wake_up_interruptible(&proc->wait);
220 	}
221 	spin_unlock(&proc->asts_spin);
222 
223 	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
224 		/* N.B. spin_lock locks_spin, not asts_spin */
225 		spin_lock(&proc->locks_spin);
226 		if (!list_empty(&lkb->lkb_ownqueue)) {
227 			list_del_init(&lkb->lkb_ownqueue);
228 			dlm_put_lkb(lkb);
229 		}
230 		spin_unlock(&proc->locks_spin);
231 	}
232  out:
233 	mutex_unlock(&ls->ls_clear_proc_locks);
234 }
235 
device_user_lock(struct dlm_user_proc * proc,struct dlm_lock_params * params)236 static int device_user_lock(struct dlm_user_proc *proc,
237 			    struct dlm_lock_params *params)
238 {
239 	struct dlm_ls *ls;
240 	struct dlm_user_args *ua;
241 	uint32_t lkid;
242 	int error = -ENOMEM;
243 
244 	ls = dlm_find_lockspace_local(proc->lockspace);
245 	if (!ls)
246 		return -ENOENT;
247 
248 	if (!params->castaddr || !params->lksb) {
249 		error = -EINVAL;
250 		goto out;
251 	}
252 
253 	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
254 	if (!ua)
255 		goto out;
256 	ua->proc = proc;
257 	ua->user_lksb = params->lksb;
258 	ua->castparam = params->castparam;
259 	ua->castaddr = params->castaddr;
260 	ua->bastparam = params->bastparam;
261 	ua->bastaddr = params->bastaddr;
262 	ua->xid = params->xid;
263 
264 	if (params->flags & DLM_LKF_CONVERT) {
265 		error = dlm_user_convert(ls, ua,
266 				         params->mode, params->flags,
267 				         params->lkid, params->lvb,
268 					 (unsigned long) params->timeout);
269 	} else if (params->flags & DLM_LKF_ORPHAN) {
270 		error = dlm_user_adopt_orphan(ls, ua,
271 					 params->mode, params->flags,
272 					 params->name, params->namelen,
273 					 (unsigned long) params->timeout,
274 					 &lkid);
275 		if (!error)
276 			error = lkid;
277 	} else {
278 		error = dlm_user_request(ls, ua,
279 					 params->mode, params->flags,
280 					 params->name, params->namelen,
281 					 (unsigned long) params->timeout);
282 		if (!error)
283 			error = ua->lksb.sb_lkid;
284 	}
285  out:
286 	dlm_put_lockspace(ls);
287 	return error;
288 }
289 
device_user_unlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)290 static int device_user_unlock(struct dlm_user_proc *proc,
291 			      struct dlm_lock_params *params)
292 {
293 	struct dlm_ls *ls;
294 	struct dlm_user_args *ua;
295 	int error = -ENOMEM;
296 
297 	ls = dlm_find_lockspace_local(proc->lockspace);
298 	if (!ls)
299 		return -ENOENT;
300 
301 	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
302 	if (!ua)
303 		goto out;
304 	ua->proc = proc;
305 	ua->user_lksb = params->lksb;
306 	ua->castparam = params->castparam;
307 	ua->castaddr = params->castaddr;
308 
309 	if (params->flags & DLM_LKF_CANCEL)
310 		error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
311 	else
312 		error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
313 					params->lvb);
314  out:
315 	dlm_put_lockspace(ls);
316 	return error;
317 }
318 
device_user_deadlock(struct dlm_user_proc * proc,struct dlm_lock_params * params)319 static int device_user_deadlock(struct dlm_user_proc *proc,
320 				struct dlm_lock_params *params)
321 {
322 	struct dlm_ls *ls;
323 	int error;
324 
325 	ls = dlm_find_lockspace_local(proc->lockspace);
326 	if (!ls)
327 		return -ENOENT;
328 
329 	error = dlm_user_deadlock(ls, params->flags, params->lkid);
330 
331 	dlm_put_lockspace(ls);
332 	return error;
333 }
334 
dlm_device_register(struct dlm_ls * ls,char * name)335 static int dlm_device_register(struct dlm_ls *ls, char *name)
336 {
337 	int error, len;
338 
339 	/* The device is already registered.  This happens when the
340 	   lockspace is created multiple times from userspace. */
341 	if (ls->ls_device.name)
342 		return 0;
343 
344 	error = -ENOMEM;
345 	len = strlen(name) + strlen(name_prefix) + 2;
346 	ls->ls_device.name = kzalloc(len, GFP_NOFS);
347 	if (!ls->ls_device.name)
348 		goto fail;
349 
350 	snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
351 		 name);
352 	ls->ls_device.fops = &device_fops;
353 	ls->ls_device.minor = MISC_DYNAMIC_MINOR;
354 
355 	error = misc_register(&ls->ls_device);
356 	if (error) {
357 		kfree(ls->ls_device.name);
358 		/* this has to be set to NULL
359 		 * to avoid a double-free in dlm_device_deregister
360 		 */
361 		ls->ls_device.name = NULL;
362 	}
363 fail:
364 	return error;
365 }
366 
dlm_device_deregister(struct dlm_ls * ls)367 int dlm_device_deregister(struct dlm_ls *ls)
368 {
369 	/* The device is not registered.  This happens when the lockspace
370 	   was never used from userspace, or when device_create_lockspace()
371 	   calls dlm_release_lockspace() after the register fails. */
372 	if (!ls->ls_device.name)
373 		return 0;
374 
375 	misc_deregister(&ls->ls_device);
376 	kfree(ls->ls_device.name);
377 	return 0;
378 }
379 
device_user_purge(struct dlm_user_proc * proc,struct dlm_purge_params * params)380 static int device_user_purge(struct dlm_user_proc *proc,
381 			     struct dlm_purge_params *params)
382 {
383 	struct dlm_ls *ls;
384 	int error;
385 
386 	ls = dlm_find_lockspace_local(proc->lockspace);
387 	if (!ls)
388 		return -ENOENT;
389 
390 	error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
391 
392 	dlm_put_lockspace(ls);
393 	return error;
394 }
395 
device_create_lockspace(struct dlm_lspace_params * params)396 static int device_create_lockspace(struct dlm_lspace_params *params)
397 {
398 	dlm_lockspace_t *lockspace;
399 	struct dlm_ls *ls;
400 	int error;
401 
402 	if (!capable(CAP_SYS_ADMIN))
403 		return -EPERM;
404 
405 	error = dlm_new_lockspace(params->name, dlm_config.ci_cluster_name, params->flags,
406 				  DLM_USER_LVB_LEN, NULL, NULL, NULL,
407 				  &lockspace);
408 	if (error)
409 		return error;
410 
411 	ls = dlm_find_lockspace_local(lockspace);
412 	if (!ls)
413 		return -ENOENT;
414 
415 	error = dlm_device_register(ls, params->name);
416 	dlm_put_lockspace(ls);
417 
418 	if (error)
419 		dlm_release_lockspace(lockspace, 0);
420 	else
421 		error = ls->ls_device.minor;
422 
423 	return error;
424 }
425 
device_remove_lockspace(struct dlm_lspace_params * params)426 static int device_remove_lockspace(struct dlm_lspace_params *params)
427 {
428 	dlm_lockspace_t *lockspace;
429 	struct dlm_ls *ls;
430 	int error, force = 0;
431 
432 	if (!capable(CAP_SYS_ADMIN))
433 		return -EPERM;
434 
435 	ls = dlm_find_lockspace_device(params->minor);
436 	if (!ls)
437 		return -ENOENT;
438 
439 	if (params->flags & DLM_USER_LSFLG_FORCEFREE)
440 		force = 2;
441 
442 	lockspace = ls->ls_local_handle;
443 	dlm_put_lockspace(ls);
444 
445 	/* The final dlm_release_lockspace waits for references to go to
446 	   zero, so all processes will need to close their device for the
447 	   ls before the release will proceed.  release also calls the
448 	   device_deregister above.  Converting a positive return value
449 	   from release to zero means that userspace won't know when its
450 	   release was the final one, but it shouldn't need to know. */
451 
452 	error = dlm_release_lockspace(lockspace, force);
453 	if (error > 0)
454 		error = 0;
455 	return error;
456 }
457 
458 /* Check the user's version matches ours */
check_version(struct dlm_write_request * req)459 static int check_version(struct dlm_write_request *req)
460 {
461 	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
462 	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
463 	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
464 
465 		printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
466 		       "user (%d.%d.%d) kernel (%d.%d.%d)\n",
467 		       current->comm,
468 		       task_pid_nr(current),
469 		       req->version[0],
470 		       req->version[1],
471 		       req->version[2],
472 		       DLM_DEVICE_VERSION_MAJOR,
473 		       DLM_DEVICE_VERSION_MINOR,
474 		       DLM_DEVICE_VERSION_PATCH);
475 		return -EINVAL;
476 	}
477 	return 0;
478 }
479 
480 /*
481  * device_write
482  *
483  *   device_user_lock
484  *     dlm_user_request -> request_lock
485  *     dlm_user_convert -> convert_lock
486  *
487  *   device_user_unlock
488  *     dlm_user_unlock -> unlock_lock
489  *     dlm_user_cancel -> cancel_lock
490  *
491  *   device_create_lockspace
492  *     dlm_new_lockspace
493  *
494  *   device_remove_lockspace
495  *     dlm_release_lockspace
496  */
497 
498 /* a write to a lockspace device is a lock or unlock request, a write
499    to the control device is to create/remove a lockspace */
500 
device_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)501 static ssize_t device_write(struct file *file, const char __user *buf,
502 			    size_t count, loff_t *ppos)
503 {
504 	struct dlm_user_proc *proc = file->private_data;
505 	struct dlm_write_request *kbuf;
506 	int error;
507 
508 #ifdef CONFIG_COMPAT
509 	if (count < sizeof(struct dlm_write_request32))
510 #else
511 	if (count < sizeof(struct dlm_write_request))
512 #endif
513 		return -EINVAL;
514 
515 	/*
516 	 * can't compare against COMPAT/dlm_write_request32 because
517 	 * we don't yet know if is64bit is zero
518 	 */
519 	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
520 		return -EINVAL;
521 
522 	kbuf = memdup_user_nul(buf, count);
523 	if (IS_ERR(kbuf))
524 		return PTR_ERR(kbuf);
525 
526 	if (check_version(kbuf)) {
527 		error = -EBADE;
528 		goto out_free;
529 	}
530 
531 #ifdef CONFIG_COMPAT
532 	if (!kbuf->is64bit) {
533 		struct dlm_write_request32 *k32buf;
534 		int namelen = 0;
535 
536 		if (count > sizeof(struct dlm_write_request32))
537 			namelen = count - sizeof(struct dlm_write_request32);
538 
539 		k32buf = (struct dlm_write_request32 *)kbuf;
540 
541 		/* add 1 after namelen so that the name string is terminated */
542 		kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
543 			       GFP_NOFS);
544 		if (!kbuf) {
545 			kfree(k32buf);
546 			return -ENOMEM;
547 		}
548 
549 		if (proc)
550 			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
551 
552 		compat_input(kbuf, k32buf, namelen);
553 		kfree(k32buf);
554 	}
555 #endif
556 
557 	/* do we really need this? can a write happen after a close? */
558 	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
559 	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
560 		error = -EINVAL;
561 		goto out_free;
562 	}
563 
564 	error = -EINVAL;
565 
566 	switch (kbuf->cmd)
567 	{
568 	case DLM_USER_LOCK:
569 		if (!proc) {
570 			log_print("no locking on control device");
571 			goto out_free;
572 		}
573 		error = device_user_lock(proc, &kbuf->i.lock);
574 		break;
575 
576 	case DLM_USER_UNLOCK:
577 		if (!proc) {
578 			log_print("no locking on control device");
579 			goto out_free;
580 		}
581 		error = device_user_unlock(proc, &kbuf->i.lock);
582 		break;
583 
584 	case DLM_USER_DEADLOCK:
585 		if (!proc) {
586 			log_print("no locking on control device");
587 			goto out_free;
588 		}
589 		error = device_user_deadlock(proc, &kbuf->i.lock);
590 		break;
591 
592 	case DLM_USER_CREATE_LOCKSPACE:
593 		if (proc) {
594 			log_print("create/remove only on control device");
595 			goto out_free;
596 		}
597 		error = device_create_lockspace(&kbuf->i.lspace);
598 		break;
599 
600 	case DLM_USER_REMOVE_LOCKSPACE:
601 		if (proc) {
602 			log_print("create/remove only on control device");
603 			goto out_free;
604 		}
605 		error = device_remove_lockspace(&kbuf->i.lspace);
606 		break;
607 
608 	case DLM_USER_PURGE:
609 		if (!proc) {
610 			log_print("no locking on control device");
611 			goto out_free;
612 		}
613 		error = device_user_purge(proc, &kbuf->i.purge);
614 		break;
615 
616 	default:
617 		log_print("Unknown command passed to DLM device : %d\n",
618 			  kbuf->cmd);
619 	}
620 
621  out_free:
622 	kfree(kbuf);
623 	return error;
624 }
625 
626 /* Every process that opens the lockspace device has its own "proc" structure
627    hanging off the open file that's used to keep track of locks owned by the
628    process and asts that need to be delivered to the process. */
629 
device_open(struct inode * inode,struct file * file)630 static int device_open(struct inode *inode, struct file *file)
631 {
632 	struct dlm_user_proc *proc;
633 	struct dlm_ls *ls;
634 
635 	ls = dlm_find_lockspace_device(iminor(inode));
636 	if (!ls)
637 		return -ENOENT;
638 
639 	proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
640 	if (!proc) {
641 		dlm_put_lockspace(ls);
642 		return -ENOMEM;
643 	}
644 
645 	proc->lockspace = ls->ls_local_handle;
646 	INIT_LIST_HEAD(&proc->asts);
647 	INIT_LIST_HEAD(&proc->locks);
648 	INIT_LIST_HEAD(&proc->unlocking);
649 	spin_lock_init(&proc->asts_spin);
650 	spin_lock_init(&proc->locks_spin);
651 	init_waitqueue_head(&proc->wait);
652 	file->private_data = proc;
653 
654 	return 0;
655 }
656 
device_close(struct inode * inode,struct file * file)657 static int device_close(struct inode *inode, struct file *file)
658 {
659 	struct dlm_user_proc *proc = file->private_data;
660 	struct dlm_ls *ls;
661 
662 	ls = dlm_find_lockspace_local(proc->lockspace);
663 	if (!ls)
664 		return -ENOENT;
665 
666 	set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
667 
668 	dlm_clear_proc_locks(ls, proc);
669 
670 	/* at this point no more lkb's should exist for this lockspace,
671 	   so there's no chance of dlm_user_add_ast() being called and
672 	   looking for lkb->ua->proc */
673 
674 	kfree(proc);
675 	file->private_data = NULL;
676 
677 	dlm_put_lockspace(ls);
678 	dlm_put_lockspace(ls);  /* for the find in device_open() */
679 
680 	/* FIXME: AUTOFREE: if this ls is no longer used do
681 	   device_remove_lockspace() */
682 
683 	return 0;
684 }
685 
copy_result_to_user(struct dlm_user_args * ua,int compat,uint32_t flags,int mode,int copy_lvb,char __user * buf,size_t count)686 static int copy_result_to_user(struct dlm_user_args *ua, int compat,
687 			       uint32_t flags, int mode, int copy_lvb,
688 			       char __user *buf, size_t count)
689 {
690 #ifdef CONFIG_COMPAT
691 	struct dlm_lock_result32 result32;
692 #endif
693 	struct dlm_lock_result result;
694 	void *resultptr;
695 	int error=0;
696 	int len;
697 	int struct_len;
698 
699 	memset(&result, 0, sizeof(struct dlm_lock_result));
700 	result.version[0] = DLM_DEVICE_VERSION_MAJOR;
701 	result.version[1] = DLM_DEVICE_VERSION_MINOR;
702 	result.version[2] = DLM_DEVICE_VERSION_PATCH;
703 	memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
704 	result.user_lksb = ua->user_lksb;
705 
706 	/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
707 	   in a conversion unless the conversion is successful.  See code
708 	   in dlm_user_convert() for updating ua from ua_tmp.  OpenVMS, though,
709 	   notes that a new blocking AST address and parameter are set even if
710 	   the conversion fails, so maybe we should just do that. */
711 
712 	if (flags & DLM_CB_BAST) {
713 		result.user_astaddr = ua->bastaddr;
714 		result.user_astparam = ua->bastparam;
715 		result.bast_mode = mode;
716 	} else {
717 		result.user_astaddr = ua->castaddr;
718 		result.user_astparam = ua->castparam;
719 	}
720 
721 #ifdef CONFIG_COMPAT
722 	if (compat)
723 		len = sizeof(struct dlm_lock_result32);
724 	else
725 #endif
726 		len = sizeof(struct dlm_lock_result);
727 	struct_len = len;
728 
729 	/* copy lvb to userspace if there is one, it's been updated, and
730 	   the user buffer has space for it */
731 
732 	if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
733 		if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
734 				 DLM_USER_LVB_LEN)) {
735 			error = -EFAULT;
736 			goto out;
737 		}
738 
739 		result.lvb_offset = len;
740 		len += DLM_USER_LVB_LEN;
741 	}
742 
743 	result.length = len;
744 	resultptr = &result;
745 #ifdef CONFIG_COMPAT
746 	if (compat) {
747 		compat_output(&result, &result32);
748 		resultptr = &result32;
749 	}
750 #endif
751 
752 	if (copy_to_user(buf, resultptr, struct_len))
753 		error = -EFAULT;
754 	else
755 		error = len;
756  out:
757 	return error;
758 }
759 
copy_version_to_user(char __user * buf,size_t count)760 static int copy_version_to_user(char __user *buf, size_t count)
761 {
762 	struct dlm_device_version ver;
763 
764 	memset(&ver, 0, sizeof(struct dlm_device_version));
765 	ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
766 	ver.version[1] = DLM_DEVICE_VERSION_MINOR;
767 	ver.version[2] = DLM_DEVICE_VERSION_PATCH;
768 
769 	if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
770 		return -EFAULT;
771 	return sizeof(struct dlm_device_version);
772 }
773 
774 /* a read returns a single ast described in a struct dlm_lock_result */
775 
device_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)776 static ssize_t device_read(struct file *file, char __user *buf, size_t count,
777 			   loff_t *ppos)
778 {
779 	struct dlm_user_proc *proc = file->private_data;
780 	struct dlm_lkb *lkb;
781 	DECLARE_WAITQUEUE(wait, current);
782 	struct dlm_callback cb;
783 	int rv, resid, copy_lvb = 0;
784 	int old_mode, new_mode;
785 
786 	if (count == sizeof(struct dlm_device_version)) {
787 		rv = copy_version_to_user(buf, count);
788 		return rv;
789 	}
790 
791 	if (!proc) {
792 		log_print("non-version read from control device %zu", count);
793 		return -EINVAL;
794 	}
795 
796 #ifdef CONFIG_COMPAT
797 	if (count < sizeof(struct dlm_lock_result32))
798 #else
799 	if (count < sizeof(struct dlm_lock_result))
800 #endif
801 		return -EINVAL;
802 
803  try_another:
804 
805 	/* do we really need this? can a read happen after a close? */
806 	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
807 		return -EINVAL;
808 
809 	spin_lock(&proc->asts_spin);
810 	if (list_empty(&proc->asts)) {
811 		if (file->f_flags & O_NONBLOCK) {
812 			spin_unlock(&proc->asts_spin);
813 			return -EAGAIN;
814 		}
815 
816 		add_wait_queue(&proc->wait, &wait);
817 
818 	repeat:
819 		set_current_state(TASK_INTERRUPTIBLE);
820 		if (list_empty(&proc->asts) && !signal_pending(current)) {
821 			spin_unlock(&proc->asts_spin);
822 			schedule();
823 			spin_lock(&proc->asts_spin);
824 			goto repeat;
825 		}
826 		set_current_state(TASK_RUNNING);
827 		remove_wait_queue(&proc->wait, &wait);
828 
829 		if (signal_pending(current)) {
830 			spin_unlock(&proc->asts_spin);
831 			return -ERESTARTSYS;
832 		}
833 	}
834 
835 	/* if we empty lkb_callbacks, we don't want to unlock the spinlock
836 	   without removing lkb_cb_list; so empty lkb_cb_list is always
837 	   consistent with empty lkb_callbacks */
838 
839 	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
840 
841 	/* rem_lkb_callback sets a new lkb_last_cast */
842 	old_mode = lkb->lkb_last_cast.mode;
843 
844 	rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
845 	if (rv < 0) {
846 		/* this shouldn't happen; lkb should have been removed from
847 		   list when resid was zero */
848 		log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
849 		list_del_init(&lkb->lkb_cb_list);
850 		spin_unlock(&proc->asts_spin);
851 		/* removes ref for proc->asts, may cause lkb to be freed */
852 		dlm_put_lkb(lkb);
853 		goto try_another;
854 	}
855 	if (!resid)
856 		list_del_init(&lkb->lkb_cb_list);
857 	spin_unlock(&proc->asts_spin);
858 
859 	if (cb.flags & DLM_CB_SKIP) {
860 		/* removes ref for proc->asts, may cause lkb to be freed */
861 		if (!resid)
862 			dlm_put_lkb(lkb);
863 		goto try_another;
864 	}
865 
866 	if (cb.flags & DLM_CB_CAST) {
867 		new_mode = cb.mode;
868 
869 		if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
870 		    dlm_lvb_operations[old_mode + 1][new_mode + 1])
871 			copy_lvb = 1;
872 
873 		lkb->lkb_lksb->sb_status = cb.sb_status;
874 		lkb->lkb_lksb->sb_flags = cb.sb_flags;
875 	}
876 
877 	rv = copy_result_to_user(lkb->lkb_ua,
878 				 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
879 				 cb.flags, cb.mode, copy_lvb, buf, count);
880 
881 	/* removes ref for proc->asts, may cause lkb to be freed */
882 	if (!resid)
883 		dlm_put_lkb(lkb);
884 
885 	return rv;
886 }
887 
device_poll(struct file * file,poll_table * wait)888 static __poll_t device_poll(struct file *file, poll_table *wait)
889 {
890 	struct dlm_user_proc *proc = file->private_data;
891 
892 	poll_wait(file, &proc->wait, wait);
893 
894 	spin_lock(&proc->asts_spin);
895 	if (!list_empty(&proc->asts)) {
896 		spin_unlock(&proc->asts_spin);
897 		return EPOLLIN | EPOLLRDNORM;
898 	}
899 	spin_unlock(&proc->asts_spin);
900 	return 0;
901 }
902 
dlm_user_daemon_available(void)903 int dlm_user_daemon_available(void)
904 {
905 	/* dlm_controld hasn't started (or, has started, but not
906 	   properly populated configfs) */
907 
908 	if (!dlm_our_nodeid())
909 		return 0;
910 
911 	/* This is to deal with versions of dlm_controld that don't
912 	   know about the monitor device.  We assume that if the
913 	   dlm_controld was started (above), but the monitor device
914 	   was never opened, that it's an old version.  dlm_controld
915 	   should open the monitor device before populating configfs. */
916 
917 	if (dlm_monitor_unused)
918 		return 1;
919 
920 	return atomic_read(&dlm_monitor_opened) ? 1 : 0;
921 }
922 
ctl_device_open(struct inode * inode,struct file * file)923 static int ctl_device_open(struct inode *inode, struct file *file)
924 {
925 	file->private_data = NULL;
926 	return 0;
927 }
928 
ctl_device_close(struct inode * inode,struct file * file)929 static int ctl_device_close(struct inode *inode, struct file *file)
930 {
931 	return 0;
932 }
933 
monitor_device_open(struct inode * inode,struct file * file)934 static int monitor_device_open(struct inode *inode, struct file *file)
935 {
936 	atomic_inc(&dlm_monitor_opened);
937 	dlm_monitor_unused = 0;
938 	return 0;
939 }
940 
monitor_device_close(struct inode * inode,struct file * file)941 static int monitor_device_close(struct inode *inode, struct file *file)
942 {
943 	if (atomic_dec_and_test(&dlm_monitor_opened))
944 		dlm_stop_lockspaces();
945 	return 0;
946 }
947 
948 static const struct file_operations device_fops = {
949 	.open    = device_open,
950 	.release = device_close,
951 	.read    = device_read,
952 	.write   = device_write,
953 	.poll    = device_poll,
954 	.owner   = THIS_MODULE,
955 	.llseek  = noop_llseek,
956 };
957 
958 static const struct file_operations ctl_device_fops = {
959 	.open    = ctl_device_open,
960 	.release = ctl_device_close,
961 	.read    = device_read,
962 	.write   = device_write,
963 	.owner   = THIS_MODULE,
964 	.llseek  = noop_llseek,
965 };
966 
967 static struct miscdevice ctl_device = {
968 	.name  = "dlm-control",
969 	.fops  = &ctl_device_fops,
970 	.minor = MISC_DYNAMIC_MINOR,
971 };
972 
973 static const struct file_operations monitor_device_fops = {
974 	.open    = monitor_device_open,
975 	.release = monitor_device_close,
976 	.owner   = THIS_MODULE,
977 	.llseek  = noop_llseek,
978 };
979 
980 static struct miscdevice monitor_device = {
981 	.name  = "dlm-monitor",
982 	.fops  = &monitor_device_fops,
983 	.minor = MISC_DYNAMIC_MINOR,
984 };
985 
dlm_user_init(void)986 int __init dlm_user_init(void)
987 {
988 	int error;
989 
990 	atomic_set(&dlm_monitor_opened, 0);
991 
992 	error = misc_register(&ctl_device);
993 	if (error) {
994 		log_print("misc_register failed for control device");
995 		goto out;
996 	}
997 
998 	error = misc_register(&monitor_device);
999 	if (error) {
1000 		log_print("misc_register failed for monitor device");
1001 		misc_deregister(&ctl_device);
1002 	}
1003  out:
1004 	return error;
1005 }
1006 
dlm_user_exit(void)1007 void dlm_user_exit(void)
1008 {
1009 	misc_deregister(&ctl_device);
1010 	misc_deregister(&monitor_device);
1011 }
1012 
1013