xref: /dragonfly/lib/libc/sysvipc/sem.c (revision 0fdb7d01)
1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
2 
3 /*
4  * Implementation of SVID semaphores
5  *
6  * Author:  Daniel Boulet
7  * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
8  *
9  * This software is provided ``AS IS'' without any warranties of any kind.
10  */
11 
12 #include "namespace.h"
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <errno.h>
16 #include <err.h>
17 #include <pthread.h>
18 #include <string.h>
19 #include <stdarg.h>
20 #include <sys/param.h>
21 #include <sys/queue.h>
22 #include <sys/mman.h>
23 #include <sys/sem.h>
24 #include "un-namespace.h"
25 
26 #include "sysvipc_lock.h"
27 #include "sysvipc_ipc.h"
28 #include "sysvipc_shm.h"
29 #include "sysvipc_sem.h"
30 #include "sysvipc_hash.h"
31 
32 
33 #define SYSV_MUTEX_LOCK(x)		if (__isthreaded) _pthread_mutex_lock(x)
34 #define SYSV_MUTEX_UNLOCK(x)	if (__isthreaded) _pthread_mutex_unlock(x)
35 #define SYSV_MUTEX_DESTROY(x)	if (__isthreaded) _pthread_mutex_destroy(x)
36 
37 extern struct hashtable *shmaddrs;
38 extern struct hashtable *shmres;
39 extern pthread_mutex_t lock_resources;
40 
41 struct sem_undo *undos = NULL;
42 pthread_mutex_t lock_undo = PTHREAD_MUTEX_INITIALIZER;
43 
44 static int semundo_clear(int, int);
45 
46 static int
47 put_shmdata(int id) {
48 	struct shm_data *data;
49 	int ret = -1;
50 
51 	SYSV_MUTEX_LOCK(&lock_resources);
52 	data = _hash_lookup(shmres, id);
53 	if (!data) {
54 		sysv_print_err("something wrong put_shmdata\n");
55 		goto done; /* It should not reach here. */
56 	}
57 
58 	data->used--;
59 	if (data->used == 0 && data->removed) {
60 		sysv_print("really remove the sem\n");
61 		SYSV_MUTEX_UNLOCK(&lock_resources);
62 		/* OBS: Even if the shmctl fails (the thread doesn't
63 		 * have IPC_M permissions), all structures associated
64 		 * with it will be removed in the current process.*/
65 		sysvipc_shmdt(data->internal);
66 		semundo_clear(id, -1);
67 		if (data->removed == SEG_ALREADY_REMOVED)
68 			return 1; /* The semaphore was removed
69 			by another process so there is nothing else
70 			we must do. */
71 		/* Else inform the daemon that the segment is removed. */
72 		return (sysvipc_shmctl(id, IPC_RMID, NULL));
73 	}
74 
75 	ret = 0;
76 done:
77 	SYSV_MUTEX_UNLOCK(&lock_resources);
78 	return (ret);
79 }
80 
81 static struct semid_pool*
82 get_semaptr(int semid, int to_remove, int shm_access) {
83 	struct semid_pool *semaptr;
84 
85 	struct shm_data *shmdata = get_shmdata(semid, to_remove, shm_access);
86 	if (!shmdata) {
87 		/* Error is set in get_shmdata. */
88 		return (NULL);
89 	}
90 
91 	semaptr = (struct semid_pool *)shmdata->internal;
92 	if (!semaptr) {
93 		put_shmdata(semid);
94 		errno = EINVAL;
95 		return (NULL);
96 	}
97 
98 	return (semaptr);
99 }
100 
101 static int
102 sema_exist(int semid, struct semid_pool *semaptr) {
103 	/* Was it removed? */
104 	if (semaptr->gen == -1 ||
105 			semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid))
106 		return (0);
107 
108 	return (1);
109 }
110 
111 /* This is the function called when a the semaphore
112  * is descovered as removed. It marks the process
113  * internal data and munmap the */
114 static void
115 mark_for_removal(int shmid) {
116 	sysv_print("Mark that the segment was removed\n");
117 	get_shmdata(shmid, SEG_ALREADY_REMOVED, 0);
118 	 /* Setting SEG_ALREADY_REMOVED parameter, when put_shmdata
119 	  * is called, the internal resources will be freed.
120 	  */
121 	/* Decrement the "usage" field. */
122 	put_shmdata(shmid);
123 }
124 
125 static int
126 try_rwlock_rdlock(int semid, struct semid_pool *semaptr) {
127 	sysv_print(" before rd lock id = %d %x\n", semid, semaptr);
128 #ifdef SYSV_RWLOCK
129 	sysv_rwlock_rdlock(&semaptr->rwlock);
130 	sysv_print("rd lock id = %d\n", semid);
131 #else
132 	sysv_mutex_lock(&semaptr->mutex);
133 	sysv_print("lock id = %d\n", semid);
134 #endif
135 	if (!sema_exist(semid, semaptr)) {
136 		errno = EINVAL;
137 		sysv_print("error sema %d doesn't exist\n", semid);
138 #ifdef SYSV_RWLOCK
139 		sysv_rwlock_unlock(&semaptr->rwlock);
140 #else
141 		sysv_mutex_unlock(&semaptr->mutex);
142 #endif
143 		/* Internal resources must be freed. */
144 		mark_for_removal(semid);
145 		return (-1);
146 	}
147 	return (0);
148 }
149 
150 static int
151 try_rwlock_wrlock(int semid, struct semid_pool *semaptr) {
152 #ifdef SYSV_RWLOCK
153 	sysv_print("before wrlock id = %d %x\n", semid, semaptr);
154 	sysv_rwlock_wrlock(&semaptr->rwlock);
155 #else
156 	sysv_print("before lock id = %d %x\n", semid, semaptr);
157 	sysv_mutex_lock(&semaptr->mutex);
158 #endif
159 	sysv_print("lock id = %d\n", semid);
160 	if (!sema_exist(semid, semaptr)) {
161 		errno = EINVAL;
162 		sysv_print("error sema %d doesn't exist\n", semid);
163 #ifdef SYSV_RWLOCK
164 		sysv_rwlock_unlock(&semaptr->rwlock);
165 #else
166 		sysv_mutex_unlock(&semaptr->mutex);
167 #endif
168 		/* Internal resources must be freed. */
169 		mark_for_removal(semid);
170 		return (-1);
171 	}
172 	return (0);
173 }
174 
175 static int
176 rwlock_unlock(int semid, struct semid_pool *semaptr) {
177 	sysv_print("unlock id = %d %x\n", semid, semaptr);
178 	if (!sema_exist(semid, semaptr)) {
179 		/* Internal resources must be freed. */
180 		mark_for_removal(semid);
181 		errno = EINVAL;
182 		return (-1);
183 	}
184 #ifdef SYSV_RWLOCK
185 	sysv_rwlock_unlock(&semaptr->rwlock);
186 #else
187 	sysv_mutex_unlock(&semaptr->mutex);
188 #endif
189 	return (0);
190 }
191 
192 int
193 sysvipc_semget(key_t key, int nsems, int semflg) {
194 	int semid;
195 	void *shmaddr;
196 	//int shm_access;
197 	int size = sizeof(struct semid_pool) + nsems * sizeof(struct sem);
198 
199 	//TODO resources limits
200 	sysv_print("handle semget\n");
201 
202 	semid = _shmget(key, size, semflg, SEMGET);
203 	if (semid == -1) {
204 		/* errno already set. */
205 		goto done;
206 	}
207 
208 	/* If the semaphore is in process of being removed there are two cases:
209 	 * - the daemon knows that and it will handle this situation.
210 	 * - one of the threads from this address space remove it and the daemon
211 	 *   wasn't announced yet; in this scenario, the semaphore is marked
212 	 *   using "removed" field of shm_data and future calls will return
213 	 *   EIDRM error.
214 	 */
215 
216 #if 0
217 	/* Set access type. */
218 	shm_access = semflg & (IPC_W | IPC_R);
219 	if(set_shmdata_access(semid, shm_access) != 0) {
220 		/* errno already set. */
221 		goto done;
222 	}
223 #endif
224 	shmaddr = sysvipc_shmat(semid, NULL, 0);
225 	if (!shmaddr) {
226 		semid = -1;
227 		sysvipc_shmctl(semid, IPC_RMID, NULL);
228 		goto done;
229 	}
230 
231 	//TODO more semaphores in a single file
232 
233 done:
234 	sysv_print("end handle semget %d\n", semid);
235 	return (semid);
236 }
237 
238 static int
239 semundo_clear(int semid, int semnum)
240 {
241 	struct undo *sunptr;
242 	int i;
243 
244 	sysv_print("semundo clear\n");
245 
246 	SYSV_MUTEX_LOCK(&lock_undo);
247 	if (!undos)
248 		goto done;
249 
250 	sunptr = &undos->un_ent[0];
251 	i = 0;
252 
253 	while (i < undos->un_cnt) {
254 		if (sunptr->un_id == semid) {
255 			if (semnum == -1 || sunptr->un_num == semnum) {
256 				undos->un_cnt--;
257 				if (i < undos->un_cnt) {
258 					undos->un_ent[i] =
259 					  undos->un_ent[undos->un_cnt];
260 					continue;
261 				}
262 			}
263 			if (semnum != -1)
264 				break;
265 		}
266 		++i;
267 		++sunptr;
268 	}
269 
270 	//TODO Shrink memory if case; not sure if necessary
271 done:
272 	SYSV_MUTEX_UNLOCK(&lock_undo);
273 	sysv_print("end semundo clear\n");
274 	return (0);
275 }
276 
277 int
278 sysvipc_semctl(int semid, int semnum , int cmd, union semun arg) {
279 	int i, error;
280 	struct semid_pool *semaptr = NULL;
281 	struct sem *semptr = NULL;
282 	struct shmid_ds shmds;
283 	int shm_access = 0;
284 
285 	/*if (!jail_sysvipc_allowed && cred->cr_prison != NULL)
286 		return (ENOSYS);
287 */
288 
289 	sysv_print("semctl cmd = %d\n", cmd);
290 
291 	error = 0;
292 
293 	switch (cmd) {
294 		case IPC_SET: /* Originally was IPC_M but this is checked
295 				 by daemon. */
296 		case SETVAL:
297 		case SETALL:
298 			shm_access = IPC_W;
299 			break;
300 		case IPC_STAT:
301 		case GETNCNT:
302 		case GETPID:
303 		case GETVAL:
304 		case GETALL:
305 		case GETZCNT:
306 			shm_access = IPC_R;
307 			break;
308 		default:
309 			break;
310 	}
311 
312 	semaptr = get_semaptr(semid, cmd==IPC_RMID, shm_access);
313 	if (!semaptr) {
314 		/* errno already set. */
315 		return (-1);
316 	}
317 
318 	switch (cmd) {
319 	case IPC_RMID:
320 		/* Mark that the segment is removed. This is done in
321 		 * get_semaptr call in order to announce other processes.
322 		 * It will be actually removed after put_shmdata call and
323 		 * not other thread from this address space use shm_data
324 		 * structure.
325 		 */
326 		break;
327 
328 	case IPC_SET:
329 		if (!arg.buf) {
330 			error = EFAULT;
331 			break;
332 		}
333 
334 		memset(&shmds, 0, sizeof(shmds)/sizeof(unsigned char));
335 		memcpy(&shmds.shm_perm, &arg.buf->sem_perm,
336 				sizeof(struct ipc_perm));
337 		error = sysvipc_shmctl(semid, cmd, &shmds);
338 		/* OBS: didn't update ctime and mode as in kernel implementation
339 		 * it is done. Those fields are already updated for shmid_ds
340 		 * struct when calling shmctl
341 		 */
342 		break;
343 
344 	case IPC_STAT:
345 		if (!arg.buf) {
346 			error = EFAULT;
347 			break;
348 		}
349 
350 		error = sysvipc_shmctl(semid, cmd, &shmds);
351 		if (error)
352 			break;
353 
354 		memcpy(&arg.buf->sem_perm, &shmds.shm_perm,
355 				sizeof(struct ipc_perm));
356 		arg.buf->sem_nsems = (shmds.shm_segsz - sizeof(struct semid_pool)) /
357 			sizeof(struct sem);
358 		arg.buf->sem_ctime = shmds.shm_ctime;
359 
360 		/* otime is semaphore specific so read it from
361 		 * semaptr
362 		 */
363 		error = try_rwlock_rdlock(semid, semaptr);
364 		if (error)
365 			break;
366 		arg.buf->sem_otime = semaptr->ds.sem_otime;
367 		rwlock_unlock(semid, semaptr);
368 		break;
369 
370 	case GETNCNT:
371 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
372 			errno = EINVAL;
373 			break;
374 		}
375 
376 		error = try_rwlock_rdlock(semid, semaptr);
377 		if (error)
378 			break;
379 		error = semaptr->ds.sem_base[semnum].semncnt;
380 		rwlock_unlock(semid, semaptr);
381 		break;
382 
383 	case GETPID:
384 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
385 			errno = EINVAL;
386 			break;
387 		}
388 
389 		error = try_rwlock_rdlock(semid, semaptr);
390 		if (error)
391 			break;
392 		error = semaptr->ds.sem_base[semnum].sempid;
393 		rwlock_unlock(semid, semaptr);
394 		break;
395 
396 	case GETVAL:
397 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
398 			errno = EINVAL;
399 			break;
400 		}
401 
402 		error = try_rwlock_rdlock(semid, semaptr);
403 		if (error)
404 			break;
405 		error = semaptr->ds.sem_base[semnum].semval;
406 		rwlock_unlock(semid, semaptr);
407 		break;
408 
409 	case GETALL:
410 		if (!arg.array) {
411 			error = EFAULT;
412 			break;
413 		}
414 
415 		error = try_rwlock_rdlock(semid, semaptr);
416 		if (error)
417 			break;
418 		for (i = 0; i < semaptr->ds.sem_nsems; i++) {
419 			arg.array[i] = semaptr->ds.sem_base[i].semval;
420 		}
421 		rwlock_unlock(semid, semaptr);
422 		break;
423 
424 	case GETZCNT:
425 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
426 			errno = EINVAL;
427 			break;
428 		}
429 
430 		error = try_rwlock_rdlock(semid, semaptr);
431 		if (error)
432 			break;
433 		error = semaptr->ds.sem_base[semnum].semzcnt;
434 		rwlock_unlock(semid, semaptr);
435 		break;
436 
437 	case SETVAL:
438 		if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
439 			errno = EINVAL;
440 			break;
441 		}
442 
443 		error = try_rwlock_wrlock(semid, semaptr);
444 		if (error)
445 			break;
446 		semptr = &semaptr->ds.sem_base[semnum];
447 		semptr->semval = arg.val;
448 		semundo_clear(semid, semnum);
449 		if (semptr->semzcnt || semptr->semncnt)
450 			umtx_wakeup((int *)&semptr->semval, 0);
451 		rwlock_unlock(semid, semaptr);
452 		break;
453 
454 	case SETALL:
455 		if (!arg.array) {
456 			error = EFAULT;
457 			break;
458 		}
459 
460 		error = try_rwlock_wrlock(semid, semaptr);
461 		if (error)
462 			break;
463 		for (i = 0; i < semaptr->ds.sem_nsems; i++) {
464 			semptr = &semaptr->ds.sem_base[i];
465 			semptr->semval = arg.array[i];
466 			if (semptr->semzcnt || semptr->semncnt)
467 				umtx_wakeup((int *)&semptr->semval, 0);
468 		}
469 		semundo_clear(semid, -1);
470 		rwlock_unlock(semid, semaptr);
471 		break;
472 
473 	default:
474 		errno = EINVAL;
475 		break;
476 	}
477 
478 	put_shmdata(semid);
479 
480 	sysv_print("end semctl\n");
481 	return (error);
482 }
483 
484 /*
485  * Adjust a particular entry for a particular proc
486  */
487 static int
488 semundo_adjust(int semid, int semnum, int adjval)
489 {
490 	struct undo *sunptr;
491 	int i;
492 	int error = 0;
493 	size_t size;
494 	int undoid;
495 	void *addr;
496 	struct shm_data *data;
497 
498 	sysv_print("semundo adjust\n");
499 	if (!adjval)
500 		goto done;
501 
502 	SYSV_MUTEX_LOCK(&lock_undo);
503 	if (!undos) {
504 		sysv_print("get undo segment\n");
505 		undoid = _shmget(IPC_PRIVATE, PAGE_SIZE, IPC_CREAT | IPC_EXCL | 0600,
506 				UNDOGET);
507 		if (undoid == -1) {
508 			sysv_print_err("no undo segment\n");
509 			return (-1);
510 		}
511 
512 		addr = sysvipc_shmat(undoid, NULL, 0);
513 		if (!addr) {
514 			sysv_print_err("can not map undo segment\n");
515 			sysvipc_shmctl(undoid, IPC_RMID, NULL);
516 			return (-1);
517 		}
518 
519 		undos = (struct sem_undo *)addr;
520 		undos->un_pages = 1;
521 		undos->un_cnt = 0;
522 	}
523 
524 	/*
525 	 * Look for the requested entry and adjust it (delete if adjval becomes
526 	 * 0).
527 	 */
528 	sunptr = &undos->un_ent[0];
529 	for (i = 0; i < undos->un_cnt; i++, sunptr++) {
530 		if (sunptr->un_id != semid && sunptr->un_num != semnum)
531 			continue;
532 		sunptr->un_adjval += adjval;
533 		if (sunptr->un_adjval == 0) {
534 			undos->un_cnt--;
535 			if (i < undos->un_cnt)
536 				undos->un_ent[i] = undos->un_ent[undos->un_cnt];
537 		}
538 		goto done;
539 	}
540 
541 	/* Didn't find the right entry - create it */
542 	size = sizeof(struct sem_undo) + (undos->un_cnt + 1) *
543 		sizeof(struct sem_undo);
544 	if (size > (unsigned int)(undos->un_pages * PAGE_SIZE)) {
545 		sysv_print("need more undo space\n");
546 		sysvipc_shmdt(undos);
547 		undos->un_pages++;
548 
549 		SYSV_MUTEX_LOCK(&lock_resources);
550 		data = _hash_lookup(shmaddrs, (u_long)undos);
551 		SYSV_MUTEX_UNLOCK(&lock_resources);
552 
553 		/* It is not necessary any lock on "size" because it is used
554 		 * only by shmat and shmdt.
555 		 * shmat for undoid is called only from this function and it
556 		 * is protected by undo_lock.
557 		 * shmdt for undoid is not called anywhere because the segment
558 		 * is destroyed by the daemon when the client dies.
559 		 */
560 		data->size = undos->un_pages * PAGE_SIZE;
561 		undos = sysvipc_shmat(data->shmid, NULL, 0);
562 	}
563 
564 	sunptr = &undos->un_ent[undos->un_cnt];
565 	undos->un_cnt++;
566 	sunptr->un_adjval = adjval;
567 	sunptr->un_id = semid;
568 	sunptr->un_num = semnum;
569 	//if (suptr->un_cnt == seminfo.semume) TODO move it in daemon
570 	/*} else {
571 	  error = EINVAL; //se face prin notificare
572 	  }*/
573 done:
574 	SYSV_MUTEX_UNLOCK(&lock_undo);
575 
576 	sysv_print("semundo adjust end\n");
577 	return (error);
578 }
579 
580 int sysvipc_semop (int semid, struct sembuf *sops, unsigned nsops) {
581 	struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
582 	struct sembuf *sopptr;
583 	struct sem *semptr = NULL;
584 	struct sem *xsemptr = NULL;
585 	int eval = 0;
586 	int i, j;
587 	int do_undos;
588 	int val_to_sleep;
589 
590 	sysv_print("[client %d] call to semop(%d, %u)\n",
591 			getpid(), semid, nsops);
592 //TODO
593 	/*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
594 	  return (ENOSYS);
595 	  */
596 
597 	semaptr = get_semaptr(semid, 0, IPC_W);
598 	if (!semaptr) {
599 		errno = EINVAL;
600 		return (-1);
601 	}
602 
603 #ifdef SYSV_SEMS
604 	if (try_rwlock_rdlock(semid, semaptr) == -1) {
605 #else
606 	if (try_rwlock_wrlock(semid, semaptr) == -1) {
607 #endif
608 		sysv_print("sema removed\n");
609 		errno = EIDRM;
610 		goto done2;
611 	}
612 
613 	if (nsops > MAX_SOPS) {
614 		sysv_print("too many sops (max=%d, nsops=%u)\n",
615 				getpid(), MAX_SOPS, nsops);
616 		eval = E2BIG;
617 		goto done;
618 	}
619 
620 	/*
621 	* Loop trying to satisfy the vector of requests.
622 	* If we reach a point where we must wait, any requests already
623 	* performed are rolled back and we go to sleep until some other
624 	* process wakes us up.  At this point, we start all over again.
625 	*
626 	* This ensures that from the perspective of other tasks, a set
627 	* of requests is atomic (never partially satisfied).
628 	*/
629 	do_undos = 0;
630 
631 	for (;;) {
632 
633 		semptr = NULL;
634 
635 		for (i = 0; i < (int)nsops; i++) {
636 			sopptr = &sops[i];
637 
638 			if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
639 				eval = EFBIG;
640 				goto done;
641 			}
642 
643 			semptr = &semaptr->ds.sem_base[sopptr->sem_num];
644 #ifdef SYSV_SEMS
645 			sysv_mutex_lock(&semptr->sem_mutex);
646 #endif
647 			sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
648 				sopptr->sem_num, semptr->semval, sopptr->sem_op,
649 				(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
650 
651 			if (sopptr->sem_op < 0) {
652 				if (semptr->semval + sopptr->sem_op < 0) {
653 					sysv_print("semop:  can't do it now\n");
654 					break;
655 				} else {
656 					semptr->semval += sopptr->sem_op;
657 					if (semptr->semval == 0 &&
658 						semptr->semzcnt > 0)
659 						umtx_wakeup((int *)&semptr->semval, 0);
660 				}
661 				if (sopptr->sem_flg & SEM_UNDO)
662 					do_undos = 1;
663 			} else if (sopptr->sem_op == 0) {
664 				if (semptr->semval > 0) {
665 					sysv_print("semop:  not zero now\n");
666 					break;
667 				}
668 			} else {
669 				semptr->semval += sopptr->sem_op;
670 				if (sopptr->sem_flg & SEM_UNDO)
671 					do_undos = 1;
672 				if (semptr->semncnt > 0)
673 					umtx_wakeup((int *)&semptr->semval, 0);
674 			}
675 #ifdef SYSV_SEMS
676 			sysv_mutex_unlock(&semptr->sem_mutex);
677 #endif
678 		}
679 
680 		/*
681 		 * Did we get through the entire vector?
682 		 */
683 		if (i >= (int)nsops)
684 			goto donex;
685 
686 		if (sopptr->sem_op == 0)
687 			semptr->semzcnt++;
688 		else
689 			semptr->semncnt++;
690 #ifdef SYSV_SEMS
691 		sysv_mutex_unlock(&semptr->sem_mutex);
692 #endif
693 		/*
694 		 * Rollback the semaphores we had acquired.
695 		 */
696 		sysv_print("semop:  rollback 0 through %d\n", i-1);
697 		for (j = 0; j < i; j++) {
698 			xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
699 #ifdef SYSV_SEMS
700 			sysv_mutex_lock(&semptr->sem_mutex);
701 #endif
702 			xsemptr->semval -= sops[j].sem_op;
703 			if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
704 				umtx_wakeup((int *)&xsemptr->semval, 0);
705 			if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
706 				umtx_wakeup((int *)&xsemptr->semval, 0); //?!
707 #ifdef SYSV_SEMS
708 			sysv_mutex_unlock(&semptr->sem_mutex);
709 #endif
710 		}
711 
712 		/*
713 		 * If the request that we couldn't satisfy has the
714 		 * NOWAIT flag set then return with EAGAIN.
715 		 */
716 		if (sopptr->sem_flg & IPC_NOWAIT) {
717 			eval = EAGAIN;
718 			goto done;
719 		}
720 
721 		/*
722 		 * Release semaptr->lock while sleeping, allowing other
723 		 * semops (like SETVAL, SETALL, etc), which require an
724 		 * exclusive lock and might wake us up.
725 		 *
726 		 * Reload and recheck the validity of semaptr on return.
727 		 * Note that semptr itself might have changed too, but
728 		 * we've already interlocked for semptr and that is what
729 		 * will be woken up if it wakes up the tsleep on a MP
730 		 * race.
731 		 *
732 		 */
733 
734 		sysv_print("semop:  good night!\n");
735 		val_to_sleep = semptr->semval;
736 		rwlock_unlock(semid, semaptr);
737 		put_shmdata(semid);
738 
739 		/* We don't sleep more than SYSV_TIMEOUT because we could
740 		 * go to sleep after another process calls wakeup and remain
741 		 * blocked.
742 		 */
743 		eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
744 		/* return code is checked below, after sem[nz]cnt-- */
745 
746 		/*
747 		 * Make sure that the semaphore still exists
748 		 */
749 
750 		/* Check if another thread didn't remove the semaphore. */
751 		auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
752 		if (!auxsemaptr) {
753 			errno = EIDRM;
754 			return (-1);
755 		}
756 
757 		if (auxsemaptr != semaptr) {
758 			errno = EIDRM;
759 			goto done;
760 		}
761 
762 		/* Check if another process didn't remove the semaphore. */
763 #ifdef SYSV_SEMS
764 		if (try_rwlock_rdlock(semid, semaptr) == -1) {
765 #else
766 		if (try_rwlock_wrlock(semid, semaptr) == -1) {
767 #endif
768 			errno = EIDRM;
769 			goto done;
770 		}
771 		sysv_print("semop:  good morning (eval=%d)!\n", eval);
772 
773 		/* The semaphore is still alive.  Readjust the count of
774 		 * waiting processes.
775 		 */
776 		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
777 #ifdef SYSV_SEMS
778 		sysv_mutex_lock(&semptr->sem_mutex);
779 #endif
780 		if (sopptr->sem_op == 0)
781 			semptr->semzcnt--;
782 		else
783 			semptr->semncnt--;
784 #ifdef SYSV_SEMS
785 		sysv_mutex_unlock(&semptr->sem_mutex);
786 #endif
787 
788 		/*
789 		 * Is it really morning, or was our sleep interrupted?
790 		 * (Delayed check of tsleep() return code because we
791 		 * need to decrement sem[nz]cnt either way.)
792 		 */
793 		if (eval) {
794 			eval = EINTR;
795 			goto done;
796 		}
797 
798 		sysv_print("semop:  good morning!\n");
799 		/* RETRY LOOP */
800 }
801 
802 donex:
803 	/*
804 	* Process any SEM_UNDO requests.
805 	*/
806 	if (do_undos) {
807 		for (i = 0; i < (int)nsops; i++) {
808 			/*
809 			 * We only need to deal with SEM_UNDO's for non-zero
810 			 * op's.
811 			 */
812 			int adjval;
813 
814 			if ((sops[i].sem_flg & SEM_UNDO) == 0)
815 				continue;
816 			adjval = sops[i].sem_op;
817 			if (adjval == 0)
818 				continue;
819 			eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
820 			if (eval == 0)
821 				continue;
822 
823 			/*
824 			 * Oh-Oh!  We ran out of either sem_undo's or undo's.
825 			 * Rollback the adjustments to this point and then
826 			 * rollback the semaphore ups and down so we can return
827 			 * with an error with all structures restored.  We
828 			 * rollback the undo's in the exact reverse order that
829 			 * we applied them.  This guarantees that we won't run
830 			 * out of space as we roll things back out.
831 			 */
832 			for (j = i - 1; j >= 0; j--) {
833 				if ((sops[j].sem_flg & SEM_UNDO) == 0)
834 					continue;
835 				adjval = sops[j].sem_op;
836 				if (adjval == 0)
837 					continue;
838 				if (semundo_adjust(semid, sops[j].sem_num,
839 							adjval) != 0)
840 					sysv_print("semop - can't undo undos");
841 			}
842 
843 			for (j = 0; j < (int)nsops; j++) {
844 				xsemptr = &semaptr->ds.sem_base[
845 					sops[j].sem_num];
846 #ifdef SYSV_SEMS
847 				sysv_mutex_lock(&semptr->sem_mutex);
848 #endif
849 				xsemptr->semval -= sops[j].sem_op;
850 				if (xsemptr->semval == 0 &&
851 						xsemptr->semzcnt > 0)
852 					umtx_wakeup((int *)&xsemptr->semval, 0);
853 				if (xsemptr->semval <= 0 &&
854 						xsemptr->semncnt > 0)
855 					umtx_wakeup((int *)&xsemptr->semval, 0); //?!
856 #ifdef SYSV_SEMS
857 				sysv_mutex_unlock(&semptr->sem_mutex);
858 #endif
859 			}
860 
861 			sysv_print("eval = %d from semundo_adjust\n", eval);
862 			goto done;
863 		}
864 	}
865 
866 	/* Set sempid field for each semaphore. */
867 	for (i = 0; i < (int)nsops; i++) {
868 		sopptr = &sops[i];
869 		semptr = &semaptr->ds.sem_base[sopptr->sem_num];
870 #ifdef SYSV_SEMS
871 		sysv_mutex_lock(&semptr->sem_mutex);
872 #endif
873 		semptr->sempid = getpid();
874 #ifdef SYSV_SEMS
875 		sysv_mutex_unlock(&semptr->sem_mutex);
876 #endif
877 	}
878 
879 	sysv_print("semop:  done\n");
880 	semaptr->ds.sem_otime = time(NULL);
881 done:
882 	rwlock_unlock(semid, semaptr);
883 done2:
884 	put_shmdata(semid);
885 
886 	return (eval);
887 }
888