xref: /dragonfly/lib/libc/sysvipc/sem.c (revision cfd1aba3)
1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
2 
3 /*
4  * Implementation of SVID semaphores
5  *
6  * Author:  Daniel Boulet
7  * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
8  *
9  * This software is provided ``AS IS'' without any warranties of any kind.
10  */
11 
12 #include "namespace.h"
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <errno.h>
16 #include <err.h>
17 #include <pthread.h>
18 #include <string.h>
19 #include <stdarg.h>
20 #include <sys/param.h>
21 #include <sys/queue.h>
22 #include <sys/mman.h>
23 #include <sys/sem.h>
24 #include "un-namespace.h"
25 
26 #include "sysvipc_lock.h"
27 #include "sysvipc_ipc.h"
28 #include "sysvipc_shm.h"
29 #include "sysvipc_sem.h"
30 #include "sysvipc_hash.h"
31 
32 
33 #define SYSV_MUTEX_LOCK(x)		if (__isthreaded) _pthread_mutex_lock(x)
34 #define SYSV_MUTEX_UNLOCK(x)	if (__isthreaded) _pthread_mutex_unlock(x)
35 #define SYSV_MUTEX_DESTROY(x)	if (__isthreaded) _pthread_mutex_destroy(x)
36 
37 extern struct hashtable *shmaddrs;
38 extern struct hashtable *shmres;
39 extern pthread_mutex_t lock_resources;
40 
41 struct sem_undo *undos = NULL;
42 pthread_mutex_t lock_undo = PTHREAD_MUTEX_INITIALIZER;
43 
44 static int semundo_clear(int, int);
45 
46 static int
47 put_shmdata(int id) {
48 	struct shm_data *data;
49 	int ret = -1;
50 
51 	SYSV_MUTEX_LOCK(&lock_resources);
52 	data = _hash_lookup(shmres, id);
53 	if (!data) {
54 		sysv_print_err("something wrong put_shmdata\n");
55 		goto done; /* It should not reach here. */
56 	}
57 
58 	data->used--;
59 	if (data->used == 0 && data->removed) {
60 		sysv_print("really remove the sem\n");
61 		SYSV_MUTEX_UNLOCK(&lock_resources);
62 		/* OBS: Even if the shmctl fails (the thread doesn't
63 		 * have IPC_M permissions), all structures associated
64 		 * with it will be removed in the current process.*/
65 		sysvipc_shmdt(data->internal);
66 		semundo_clear(id, -1);
67 		if (data->removed == SEG_ALREADY_REMOVED)
68 			return 1; /* The semaphore was removed
69 			by another process so there is nothing else
70 			we must do. */
71 		/* Else inform the daemon that the segment is removed. */
72 		return (sysvipc_shmctl(id, IPC_RMID, NULL));
73 	}
74 
75 	ret = 0;
76 done:
77 	SYSV_MUTEX_UNLOCK(&lock_resources);
78 	return (ret);
79 }
80 
81 static struct semid_pool*
82 get_semaptr(int semid, int to_remove, int shm_access) {
83 	struct semid_pool *semaptr;
84 
85 	struct shm_data *shmdata = get_shmdata(semid, to_remove, shm_access);
86 	if (!shmdata) {
87 		/* Error is set in get_shmdata. */
88 		return (NULL);
89 	}
90 
91 	semaptr = (struct semid_pool *)shmdata->internal;
92 	if (!semaptr) {
93 		put_shmdata(semid);
94 		errno = EINVAL;
95 		return (NULL);
96 	}
97 
98 	return (semaptr);
99 }
100 
101 static int
102 sema_exist(int semid, struct semid_pool *semaptr) {
103 	/* Was it removed? */
104 	if (semaptr->gen == -1 ||
105 			semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid))
106 		return (0);
107 
108 	return (1);
109 }
110 
111 /* This is the function called when a the semaphore
112  * is descovered as removed. It marks the process
113  * internal data and munmap the */
114 static void
115 mark_for_removal(int shmid) {
116 	sysv_print("Mark that the segment was removed\n");
117 	get_shmdata(shmid, SEG_ALREADY_REMOVED, 0);
118 	 /* Setting SEG_ALREADY_REMOVED parameter, when put_shmdata
119 	  * is called, the internal resources will be freed.
120 	  */
121 	/* Decrement the "usage" field. */
122 	put_shmdata(shmid);
123 }
124 
125 static int
126 try_rwlock_rdlock(int semid, struct semid_pool *semaptr) {
127 	sysv_print(" before rd lock id = %d %x\n", semid, semaptr);
128 #ifdef SYSV_RWLOCK
129 	sysv_rwlock_rdlock(&semaptr->rwlock);
130 	sysv_print("rd lock id = %d\n", semid);
131 #else
132 	sysv_mutex_lock(&semaptr->mutex);
133 	sysv_print("lock id = %d\n", semid);
134 #endif
135 	if (!sema_exist(semid, semaptr)) {
136 		errno = EINVAL;
137 		sysv_print("error sema %d doesn't exist\n", semid);
138 #ifdef SYSV_RWLOCK
139 		sysv_rwlock_unlock(&semaptr->rwlock);
140 #else
141 		sysv_mutex_unlock(&semaptr->mutex);
142 #endif
143 		/* Internal resources must be freed. */
144 		mark_for_removal(semid);
145 		return (-1);
146 	}
147 	return (0);
148 }
149 
150 static int
151 try_rwlock_wrlock(int semid, struct semid_pool *semaptr) {
152 #ifdef SYSV_RWLOCK
153 	sysv_print("before wrlock id = %d %x\n", semid, semaptr);
154 	sysv_rwlock_wrlock(&semaptr->rwlock);
155 #else
156 	sysv_print("before lock id = %d %x\n", semid, semaptr);
157 	sysv_mutex_lock(&semaptr->mutex);
158 #endif
159 	sysv_print("lock id = %d\n", semid);
160 	if (!sema_exist(semid, semaptr)) {
161 		errno = EINVAL;
162 		sysv_print("error sema %d doesn't exist\n", semid);
163 #ifdef SYSV_RWLOCK
164 		sysv_rwlock_unlock(&semaptr->rwlock);
165 #else
166 		sysv_mutex_unlock(&semaptr->mutex);
167 #endif
168 		/* Internal resources must be freed. */
169 		mark_for_removal(semid);
170 		return (-1);
171 	}
172 	return (0);
173 }
174 
175 static int
176 rwlock_unlock(int semid, struct semid_pool *semaptr) {
177 	sysv_print("unlock id = %d %x\n", semid, semaptr);
178 	if (!sema_exist(semid, semaptr)) {
179 		/* Internal resources must be freed. */
180 		mark_for_removal(semid);
181 		errno = EINVAL;
182 		return (-1);
183 	}
184 #ifdef SYSV_RWLOCK
185 	sysv_rwlock_unlock(&semaptr->rwlock);
186 #else
187 	sysv_mutex_unlock(&semaptr->mutex);
188 #endif
189 	return (0);
190 }
191 
192 int
193 sysvipc_semget(key_t key, int nsems, int semflg) {
194 	int semid;
195 	void *shmaddr;
196 	//int shm_access;
197 	int size = sizeof(struct semid_pool) + nsems * sizeof(struct sem);
198 
199 	//TODO resources limits
200 	sysv_print("handle semget\n");
201 
202 	semid = _shmget(key, size, semflg, SEMGET);
203 	if (semid == -1) {
204 		/* errno already set. */
205 		goto done;
206 	}
207 
208 	/* If the semaphore is in process of being removed there are two cases:
209 	 * - the daemon knows that and it will handle this situation.
210 	 * - one of the threads from this address space remove it and the daemon
211 	 *   wasn't announced yet; in this scenario, the semaphore is marked
212 	 *   using "removed" field of shm_data and future calls will return
213 	 *   EIDRM error.
214 	 */
215 
216 #if 0
217 	/* Set access type. */
218 	shm_access = semflg & (IPC_W | IPC_R);
219 	if(set_shmdata_access(semid, shm_access) != 0) {
220 		/* errno already set. */
221 		goto done;
222 	}
223 #endif
224 	shmaddr = sysvipc_shmat(semid, NULL, 0);
225 	if (!shmaddr) {
226 		semid = -1;
227 		sysvipc_shmctl(semid, IPC_RMID, NULL);
228 		goto done;
229 	}
230 
231 	//TODO more semaphores in a single file
232 
233 done:
234 	sysv_print("end handle semget %d\n", semid);
235 	return (semid);
236 }
237 
238 static int
239 semundo_clear(int semid, int semnum)
240 {
241 	struct undo *sunptr;
242 	int i;
243 
244 	sysv_print("semundo clear\n");
245 
246 	SYSV_MUTEX_LOCK(&lock_undo);
247 	if (!undos)
248 		goto done;
249 
250 	sunptr = &undos->un_ent[0];
251 	i = 0;
252 
253 	while (i < undos->un_cnt) {
254 		if (sunptr->un_id == semid) {
255 			if (semnum == -1 || sunptr->un_num == semnum) {
256 				undos->un_cnt--;
257 				if (i < undos->un_cnt) {
258 					undos->un_ent[i] =
259 					  undos->un_ent[undos->un_cnt];
260 					continue;
261 				}
262 			}
263 			if (semnum != -1)
264 				break;
265 		}
266 		++i;
267 		++sunptr;
268 	}
269 
270 	//TODO Shrink memory if case; not sure if necessary
271 done:
272 	SYSV_MUTEX_UNLOCK(&lock_undo);
273 	sysv_print("end semundo clear\n");
274 	return (0);
275 }
276 
277 int
278 sysvipc___semctl(int semid, int semnum , int cmd, union semun *arg)
279 {
280 	int i, error;
281 	struct semid_pool *semaptr = NULL;
282 	struct sem *semptr = NULL;
283 	struct shmid_ds shmds;
284 	int shm_access = 0;
285 
286 	/*if (!jail_sysvipc_allowed && cred->cr_prison != NULL)
287 		return (ENOSYS);
288 */
289 
290 	sysv_print("semctl cmd = %d\n", cmd);
291 
292 	error = 0;
293 
294 	switch (cmd) {
295 		case IPC_SET: /* Originally was IPC_M but this is checked
296 				 by daemon. */
297 		case SETVAL:
298 		case SETALL:
299 			shm_access = IPC_W;
300 			break;
301 		case IPC_STAT:
302 		case GETNCNT:
303 		case GETPID:
304 		case GETVAL:
305 		case GETALL:
306 		case GETZCNT:
307 			shm_access = IPC_R;
308 			break;
309 		default:
310 			break;
311 	}
312 
313 	semaptr = get_semaptr(semid, cmd==IPC_RMID, shm_access);
314 	if (!semaptr) {
315 		/* errno already set. */
316 		return (-1);
317 	}
318 
319 	switch (cmd) {
320 	case IPC_RMID:
321 		/* Mark that the segment is removed. This is done in
322 		 * get_semaptr call in order to announce other processes.
323 		 * It will be actually removed after put_shmdata call and
324 		 * not other thread from this address space use shm_data
325 		 * structure.
326 		 */
327 		break;
328 
329 	case IPC_SET:
330 		if (!arg->buf) {
331 			error = EFAULT;
332 			break;
333 		}
334 
335 		memset(&shmds, 0, sizeof(shmds)/sizeof(unsigned char));
336 		memcpy(&shmds.shm_perm, &arg->buf->sem_perm,
337 				sizeof(struct ipc_perm));
338 		error = sysvipc_shmctl(semid, cmd, &shmds);
339 		/* OBS: didn't update ctime and mode as in kernel implementation
340 		 * it is done. Those fields are already updated for shmid_ds
341 		 * struct when calling shmctl
342 		 */
343 		break;
344 
345 	case IPC_STAT:
346 		if (!arg->buf) {
347 			error = EFAULT;
348 			break;
349 		}
350 
351 		error = sysvipc_shmctl(semid, cmd, &shmds);
352 		if (error)
353 			break;
354 
355 		memcpy(&arg->buf->sem_perm, &shmds.shm_perm,
356 				sizeof(struct ipc_perm));
357 		arg->buf->sem_nsems = (shmds.shm_segsz - sizeof(struct semid_pool)) /
358 			sizeof(struct sem);
359 		arg->buf->sem_ctime = shmds.shm_ctime;
360 
361 		/* otime is semaphore specific so read it from
362 		 * semaptr
363 		 */
364 		error = try_rwlock_rdlock(semid, semaptr);
365 		if (error)
366 			break;
367 		arg->buf->sem_otime = semaptr->ds.sem_otime;
368 		rwlock_unlock(semid, semaptr);
369 		break;
370 
371 	case GETNCNT:
372 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
373 			errno = EINVAL;
374 			break;
375 		}
376 
377 		error = try_rwlock_rdlock(semid, semaptr);
378 		if (error)
379 			break;
380 		error = semaptr->ds.sem_base[semnum].semncnt;
381 		rwlock_unlock(semid, semaptr);
382 		break;
383 
384 	case GETPID:
385 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
386 			errno = EINVAL;
387 			break;
388 		}
389 
390 		error = try_rwlock_rdlock(semid, semaptr);
391 		if (error)
392 			break;
393 		error = semaptr->ds.sem_base[semnum].sempid;
394 		rwlock_unlock(semid, semaptr);
395 		break;
396 
397 	case GETVAL:
398 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
399 			errno = EINVAL;
400 			break;
401 		}
402 
403 		error = try_rwlock_rdlock(semid, semaptr);
404 		if (error)
405 			break;
406 		error = semaptr->ds.sem_base[semnum].semval;
407 		rwlock_unlock(semid, semaptr);
408 		break;
409 
410 	case GETALL:
411 		if (!arg->array) {
412 			error = EFAULT;
413 			break;
414 		}
415 
416 		error = try_rwlock_rdlock(semid, semaptr);
417 		if (error)
418 			break;
419 		for (i = 0; i < semaptr->ds.sem_nsems; i++) {
420 			arg->array[i] = semaptr->ds.sem_base[i].semval;
421 		}
422 		rwlock_unlock(semid, semaptr);
423 		break;
424 
425 	case GETZCNT:
426 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
427 			errno = EINVAL;
428 			break;
429 		}
430 
431 		error = try_rwlock_rdlock(semid, semaptr);
432 		if (error)
433 			break;
434 		error = semaptr->ds.sem_base[semnum].semzcnt;
435 		rwlock_unlock(semid, semaptr);
436 		break;
437 
438 	case SETVAL:
439 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
440 			errno = EINVAL;
441 			break;
442 		}
443 
444 		error = try_rwlock_wrlock(semid, semaptr);
445 		if (error)
446 			break;
447 		semptr = &semaptr->ds.sem_base[semnum];
448 		semptr->semval = arg->val;
449 		semundo_clear(semid, semnum);
450 		if (semptr->semzcnt || semptr->semncnt)
451 			umtx_wakeup((int *)&semptr->semval, 0);
452 		rwlock_unlock(semid, semaptr);
453 		break;
454 
455 	case SETALL:
456 		if (!arg->array) {
457 			error = EFAULT;
458 			break;
459 		}
460 
461 		error = try_rwlock_wrlock(semid, semaptr);
462 		if (error)
463 			break;
464 		for (i = 0; i < semaptr->ds.sem_nsems; i++) {
465 			semptr = &semaptr->ds.sem_base[i];
466 			semptr->semval = arg->array[i];
467 			if (semptr->semzcnt || semptr->semncnt)
468 				umtx_wakeup((int *)&semptr->semval, 0);
469 		}
470 		semundo_clear(semid, -1);
471 		rwlock_unlock(semid, semaptr);
472 		break;
473 
474 	default:
475 		errno = EINVAL;
476 		break;
477 	}
478 
479 	put_shmdata(semid);
480 
481 	sysv_print("end semctl\n");
482 	return (error);
483 }
484 
485 /*
486  * Adjust a particular entry for a particular proc
487  */
488 static int
489 semundo_adjust(int semid, int semnum, int adjval)
490 {
491 	struct undo *sunptr;
492 	int i;
493 	int error = 0;
494 	size_t size;
495 	int undoid;
496 	void *addr;
497 	struct shm_data *data;
498 
499 	sysv_print("semundo adjust\n");
500 	if (!adjval)
501 		goto done;
502 
503 	SYSV_MUTEX_LOCK(&lock_undo);
504 	if (!undos) {
505 		sysv_print("get undo segment\n");
506 		undoid = _shmget(IPC_PRIVATE, PAGE_SIZE, IPC_CREAT | IPC_EXCL | 0600,
507 				UNDOGET);
508 		if (undoid == -1) {
509 			sysv_print_err("no undo segment\n");
510 			return (-1);
511 		}
512 
513 		addr = sysvipc_shmat(undoid, NULL, 0);
514 		if (!addr) {
515 			sysv_print_err("can not map undo segment\n");
516 			sysvipc_shmctl(undoid, IPC_RMID, NULL);
517 			return (-1);
518 		}
519 
520 		undos = (struct sem_undo *)addr;
521 		undos->un_pages = 1;
522 		undos->un_cnt = 0;
523 	}
524 
525 	/*
526 	 * Look for the requested entry and adjust it (delete if adjval becomes
527 	 * 0).
528 	 */
529 	sunptr = &undos->un_ent[0];
530 	for (i = 0; i < undos->un_cnt; i++, sunptr++) {
531 		if (sunptr->un_id != semid && sunptr->un_num != semnum)
532 			continue;
533 		sunptr->un_adjval += adjval;
534 		if (sunptr->un_adjval == 0) {
535 			undos->un_cnt--;
536 			if (i < undos->un_cnt)
537 				undos->un_ent[i] = undos->un_ent[undos->un_cnt];
538 		}
539 		goto done;
540 	}
541 
542 	/* Didn't find the right entry - create it */
543 	size = sizeof(struct sem_undo) + (undos->un_cnt + 1) *
544 		sizeof(struct sem_undo);
545 	if (size > (unsigned int)(undos->un_pages * PAGE_SIZE)) {
546 		sysv_print("need more undo space\n");
547 		sysvipc_shmdt(undos);
548 		undos->un_pages++;
549 
550 		SYSV_MUTEX_LOCK(&lock_resources);
551 		data = _hash_lookup(shmaddrs, (u_long)undos);
552 		SYSV_MUTEX_UNLOCK(&lock_resources);
553 
554 		/* It is not necessary any lock on "size" because it is used
555 		 * only by shmat and shmdt.
556 		 * shmat for undoid is called only from this function and it
557 		 * is protected by undo_lock.
558 		 * shmdt for undoid is not called anywhere because the segment
559 		 * is destroyed by the daemon when the client dies.
560 		 */
561 		data->size = undos->un_pages * PAGE_SIZE;
562 		undos = sysvipc_shmat(data->shmid, NULL, 0);
563 	}
564 
565 	sunptr = &undos->un_ent[undos->un_cnt];
566 	undos->un_cnt++;
567 	sunptr->un_adjval = adjval;
568 	sunptr->un_id = semid;
569 	sunptr->un_num = semnum;
570 	//if (suptr->un_cnt == seminfo.semume) TODO move it in daemon
571 	/*} else {
572 	  error = EINVAL; //se face prin notificare
573 	  }*/
574 done:
575 	SYSV_MUTEX_UNLOCK(&lock_undo);
576 
577 	sysv_print("semundo adjust end\n");
578 	return (error);
579 }
580 
581 int sysvipc_semop (int semid, struct sembuf *sops, unsigned nsops) {
582 	struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
583 	struct sembuf *sopptr;
584 	struct sem *semptr = NULL;
585 	struct sem *xsemptr = NULL;
586 	int eval = 0;
587 	int i, j;
588 	int do_undos;
589 	int val_to_sleep;
590 
591 	sysv_print("[client %d] call to semop(%d, %u)\n",
592 			getpid(), semid, nsops);
593 //TODO
594 	/*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
595 	  return (ENOSYS);
596 	  */
597 
598 	semaptr = get_semaptr(semid, 0, IPC_W);
599 	if (!semaptr) {
600 		errno = EINVAL;
601 		return (-1);
602 	}
603 
604 #ifdef SYSV_SEMS
605 	if (try_rwlock_rdlock(semid, semaptr) == -1) {
606 #else
607 	if (try_rwlock_wrlock(semid, semaptr) == -1) {
608 #endif
609 		sysv_print("sema removed\n");
610 		errno = EIDRM;
611 		goto done2;
612 	}
613 
614 	if (nsops > MAX_SOPS) {
615 		sysv_print("too many sops (max=%d, nsops=%u)\n",
616 				getpid(), MAX_SOPS, nsops);
617 		eval = E2BIG;
618 		goto done;
619 	}
620 
621 	/*
622 	* Loop trying to satisfy the vector of requests.
623 	* If we reach a point where we must wait, any requests already
624 	* performed are rolled back and we go to sleep until some other
625 	* process wakes us up.  At this point, we start all over again.
626 	*
627 	* This ensures that from the perspective of other tasks, a set
628 	* of requests is atomic (never partially satisfied).
629 	*/
630 	do_undos = 0;
631 
632 	for (;;) {
633 
634 		semptr = NULL;
635 
636 		for (i = 0; i < (int)nsops; i++) {
637 			sopptr = &sops[i];
638 
639 			if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
640 				eval = EFBIG;
641 				goto done;
642 			}
643 
644 			semptr = &semaptr->ds.sem_base[sopptr->sem_num];
645 #ifdef SYSV_SEMS
646 			sysv_mutex_lock(&semptr->sem_mutex);
647 #endif
648 			sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
649 				sopptr->sem_num, semptr->semval, sopptr->sem_op,
650 				(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
651 
652 			if (sopptr->sem_op < 0) {
653 				if (semptr->semval + sopptr->sem_op < 0) {
654 					sysv_print("semop:  can't do it now\n");
655 					break;
656 				} else {
657 					semptr->semval += sopptr->sem_op;
658 					if (semptr->semval == 0 &&
659 						semptr->semzcnt > 0)
660 						umtx_wakeup((int *)&semptr->semval, 0);
661 				}
662 				if (sopptr->sem_flg & SEM_UNDO)
663 					do_undos = 1;
664 			} else if (sopptr->sem_op == 0) {
665 				if (semptr->semval > 0) {
666 					sysv_print("semop:  not zero now\n");
667 					break;
668 				}
669 			} else {
670 				semptr->semval += sopptr->sem_op;
671 				if (sopptr->sem_flg & SEM_UNDO)
672 					do_undos = 1;
673 				if (semptr->semncnt > 0)
674 					umtx_wakeup((int *)&semptr->semval, 0);
675 			}
676 #ifdef SYSV_SEMS
677 			sysv_mutex_unlock(&semptr->sem_mutex);
678 #endif
679 		}
680 
681 		/*
682 		 * Did we get through the entire vector?
683 		 */
684 		if (i >= (int)nsops)
685 			goto donex;
686 
687 		if (sopptr->sem_op == 0)
688 			semptr->semzcnt++;
689 		else
690 			semptr->semncnt++;
691 #ifdef SYSV_SEMS
692 		sysv_mutex_unlock(&semptr->sem_mutex);
693 #endif
694 		/*
695 		 * Rollback the semaphores we had acquired.
696 		 */
697 		sysv_print("semop:  rollback 0 through %d\n", i-1);
698 		for (j = 0; j < i; j++) {
699 			xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
700 #ifdef SYSV_SEMS
701 			sysv_mutex_lock(&semptr->sem_mutex);
702 #endif
703 			xsemptr->semval -= sops[j].sem_op;
704 			if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
705 				umtx_wakeup((int *)&xsemptr->semval, 0);
706 			if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
707 				umtx_wakeup((int *)&xsemptr->semval, 0); //?!
708 #ifdef SYSV_SEMS
709 			sysv_mutex_unlock(&semptr->sem_mutex);
710 #endif
711 		}
712 
713 		/*
714 		 * If the request that we couldn't satisfy has the
715 		 * NOWAIT flag set then return with EAGAIN.
716 		 */
717 		if (sopptr->sem_flg & IPC_NOWAIT) {
718 			eval = EAGAIN;
719 			goto done;
720 		}
721 
722 		/*
723 		 * Release semaptr->lock while sleeping, allowing other
724 		 * semops (like SETVAL, SETALL, etc), which require an
725 		 * exclusive lock and might wake us up.
726 		 *
727 		 * Reload and recheck the validity of semaptr on return.
728 		 * Note that semptr itself might have changed too, but
729 		 * we've already interlocked for semptr and that is what
730 		 * will be woken up if it wakes up the tsleep on a MP
731 		 * race.
732 		 *
733 		 */
734 
735 		sysv_print("semop:  good night!\n");
736 		val_to_sleep = semptr->semval;
737 		rwlock_unlock(semid, semaptr);
738 		put_shmdata(semid);
739 
740 		/* We don't sleep more than SYSV_TIMEOUT because we could
741 		 * go to sleep after another process calls wakeup and remain
742 		 * blocked.
743 		 */
744 		eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
745 		/* return code is checked below, after sem[nz]cnt-- */
746 
747 		/*
748 		 * Make sure that the semaphore still exists
749 		 */
750 
751 		/* Check if another thread didn't remove the semaphore. */
752 		auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
753 		if (!auxsemaptr) {
754 			errno = EIDRM;
755 			return (-1);
756 		}
757 
758 		if (auxsemaptr != semaptr) {
759 			errno = EIDRM;
760 			goto done;
761 		}
762 
763 		/* Check if another process didn't remove the semaphore. */
764 #ifdef SYSV_SEMS
765 		if (try_rwlock_rdlock(semid, semaptr) == -1) {
766 #else
767 		if (try_rwlock_wrlock(semid, semaptr) == -1) {
768 #endif
769 			errno = EIDRM;
770 			goto done;
771 		}
772 		sysv_print("semop:  good morning (eval=%d)!\n", eval);
773 
774 		/* The semaphore is still alive.  Readjust the count of
775 		 * waiting processes.
776 		 */
777 		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
778 #ifdef SYSV_SEMS
779 		sysv_mutex_lock(&semptr->sem_mutex);
780 #endif
781 		if (sopptr->sem_op == 0)
782 			semptr->semzcnt--;
783 		else
784 			semptr->semncnt--;
785 #ifdef SYSV_SEMS
786 		sysv_mutex_unlock(&semptr->sem_mutex);
787 #endif
788 
789 		/*
790 		 * Is it really morning, or was our sleep interrupted?
791 		 * (Delayed check of tsleep() return code because we
792 		 * need to decrement sem[nz]cnt either way.)
793 		 */
794 		if (eval) {
795 			eval = EINTR;
796 			goto done;
797 		}
798 
799 		sysv_print("semop:  good morning!\n");
800 		/* RETRY LOOP */
801 }
802 
803 donex:
804 	/*
805 	* Process any SEM_UNDO requests.
806 	*/
807 	if (do_undos) {
808 		for (i = 0; i < (int)nsops; i++) {
809 			/*
810 			 * We only need to deal with SEM_UNDO's for non-zero
811 			 * op's.
812 			 */
813 			int adjval;
814 
815 			if ((sops[i].sem_flg & SEM_UNDO) == 0)
816 				continue;
817 			adjval = sops[i].sem_op;
818 			if (adjval == 0)
819 				continue;
820 			eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
821 			if (eval == 0)
822 				continue;
823 
824 			/*
825 			 * Oh-Oh!  We ran out of either sem_undo's or undo's.
826 			 * Rollback the adjustments to this point and then
827 			 * rollback the semaphore ups and down so we can return
828 			 * with an error with all structures restored.  We
829 			 * rollback the undo's in the exact reverse order that
830 			 * we applied them.  This guarantees that we won't run
831 			 * out of space as we roll things back out.
832 			 */
833 			for (j = i - 1; j >= 0; j--) {
834 				if ((sops[j].sem_flg & SEM_UNDO) == 0)
835 					continue;
836 				adjval = sops[j].sem_op;
837 				if (adjval == 0)
838 					continue;
839 				if (semundo_adjust(semid, sops[j].sem_num,
840 							adjval) != 0)
841 					sysv_print("semop - can't undo undos");
842 			}
843 
844 			for (j = 0; j < (int)nsops; j++) {
845 				xsemptr = &semaptr->ds.sem_base[
846 					sops[j].sem_num];
847 #ifdef SYSV_SEMS
848 				sysv_mutex_lock(&semptr->sem_mutex);
849 #endif
850 				xsemptr->semval -= sops[j].sem_op;
851 				if (xsemptr->semval == 0 &&
852 						xsemptr->semzcnt > 0)
853 					umtx_wakeup((int *)&xsemptr->semval, 0);
854 				if (xsemptr->semval <= 0 &&
855 						xsemptr->semncnt > 0)
856 					umtx_wakeup((int *)&xsemptr->semval, 0); //?!
857 #ifdef SYSV_SEMS
858 				sysv_mutex_unlock(&semptr->sem_mutex);
859 #endif
860 			}
861 
862 			sysv_print("eval = %d from semundo_adjust\n", eval);
863 			goto done;
864 		}
865 	}
866 
867 	/* Set sempid field for each semaphore. */
868 	for (i = 0; i < (int)nsops; i++) {
869 		sopptr = &sops[i];
870 		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
871 #ifdef SYSV_SEMS
872 		sysv_mutex_lock(&semptr->sem_mutex);
873 #endif
874 		semptr->sempid = getpid();
875 #ifdef SYSV_SEMS
876 		sysv_mutex_unlock(&semptr->sem_mutex);
877 #endif
878 	}
879 
880 	sysv_print("semop:  done\n");
881 	semaptr->ds.sem_otime = time(NULL);
882 done:
883 	rwlock_unlock(semid, semaptr);
884 done2:
885 	put_shmdata(semid);
886 
887 	return (eval);
888 }
889