1 /* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
2
3 /*
4 * Implementation of SVID semaphores
5 *
6 * Author: Daniel Boulet
7 * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
8 *
9 * This software is provided ``AS IS'' without any warranties of any kind.
10 */
11
12 #include "namespace.h"
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <errno.h>
16 #include <err.h>
17 #include <pthread.h>
18 #include <string.h>
19 #include <stdarg.h>
20 #include <sys/param.h>
21 #include <sys/queue.h>
22 #include <sys/mman.h>
23 #include <sys/sem.h>
24 #include "un-namespace.h"
25
26 #include "sysvipc_lock.h"
27 #include "sysvipc_ipc.h"
28 #include "sysvipc_shm.h"
29 #include "sysvipc_sem.h"
30 #include "sysvipc_hash.h"
31
32
33 #define SYSV_MUTEX_LOCK(x) if (__isthreaded) _pthread_mutex_lock(x)
34 #define SYSV_MUTEX_UNLOCK(x) if (__isthreaded) _pthread_mutex_unlock(x)
35 #define SYSV_MUTEX_DESTROY(x) if (__isthreaded) _pthread_mutex_destroy(x)
36
37 extern struct hashtable *shmaddrs;
38 extern struct hashtable *shmres;
39 extern pthread_mutex_t lock_resources;
40
41 struct sem_undo *undos = NULL;
42 pthread_mutex_t lock_undo = PTHREAD_MUTEX_INITIALIZER;
43
44 static int semundo_clear(int, int);
45
46 static int
put_shmdata(int id)47 put_shmdata(int id)
48 {
49 struct shm_data *data;
50 int ret = -1;
51
52 SYSV_MUTEX_LOCK(&lock_resources);
53 data = _hash_lookup(shmres, id);
54 if (!data) {
55 sysv_print_err("something wrong put_shmdata\n");
56 goto done; /* It should not reach here. */
57 }
58
59 data->used--;
60 if (data->used == 0 && data->removed) {
61 sysv_print("really remove the sem\n");
62 SYSV_MUTEX_UNLOCK(&lock_resources);
63 /* OBS: Even if the shmctl fails (the thread doesn't
64 * have IPC_M permissions), all structures associated
65 * with it will be removed in the current process.*/
66 sysvipc_shmdt(data->internal);
67 semundo_clear(id, -1);
68 if (data->removed == SEG_ALREADY_REMOVED)
69 return 1; /* The semaphore was removed
70 by another process so there is nothing else
71 we must do. */
72 /* Else inform the daemon that the segment is removed. */
73 return (sysvipc_shmctl(id, IPC_RMID, NULL));
74 }
75
76 ret = 0;
77 done:
78 SYSV_MUTEX_UNLOCK(&lock_resources);
79 return (ret);
80 }
81
82 static struct semid_pool *
get_semaptr(int semid,int to_remove,int shm_access)83 get_semaptr(int semid, int to_remove, int shm_access)
84 {
85 struct semid_pool *semaptr;
86
87 struct shm_data *shmdata = get_shmdata(semid, to_remove, shm_access);
88 if (!shmdata) {
89 /* Error is set in get_shmdata. */
90 return (NULL);
91 }
92
93 semaptr = (struct semid_pool *)shmdata->internal;
94 if (!semaptr) {
95 put_shmdata(semid);
96 errno = EINVAL;
97 return (NULL);
98 }
99
100 return (semaptr);
101 }
102
103 static int
sema_exist(int semid,struct semid_pool * semaptr)104 sema_exist(int semid, struct semid_pool *semaptr)
105 {
106 /* Was it removed? */
107 if (semaptr->gen == -1 ||
108 semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid))
109 return (0);
110
111 return (1);
112 }
113
114 /* This is the function called when a the semaphore
115 * is descovered as removed. It marks the process
116 * internal data and munmap the */
117 static void
mark_for_removal(int shmid)118 mark_for_removal(int shmid)
119 {
120 sysv_print("Mark that the segment was removed\n");
121 get_shmdata(shmid, SEG_ALREADY_REMOVED, 0);
122 /* Setting SEG_ALREADY_REMOVED parameter, when put_shmdata
123 * is called, the internal resources will be freed.
124 */
125 /* Decrement the "usage" field. */
126 put_shmdata(shmid);
127 }
128
129 static int
try_rwlock_rdlock(int semid,struct semid_pool * semaptr)130 try_rwlock_rdlock(int semid, struct semid_pool *semaptr)
131 {
132 sysv_print(" before rd lock id = %d %p\n", semid, semaptr);
133 #ifdef SYSV_RWLOCK
134 sysv_rwlock_rdlock(&semaptr->rwlock);
135 sysv_print("rd lock id = %d\n", semid);
136 #else
137 sysv_mutex_lock(&semaptr->mutex);
138 sysv_print("lock id = %d\n", semid);
139 #endif
140 if (!sema_exist(semid, semaptr)) {
141 errno = EINVAL;
142 sysv_print("error sema %d doesn't exist\n", semid);
143 #ifdef SYSV_RWLOCK
144 sysv_rwlock_unlock(&semaptr->rwlock);
145 #else
146 sysv_mutex_unlock(&semaptr->mutex);
147 #endif
148 /* Internal resources must be freed. */
149 mark_for_removal(semid);
150 return (-1);
151 }
152 return (0);
153 }
154
155 static int
try_rwlock_wrlock(int semid,struct semid_pool * semaptr)156 try_rwlock_wrlock(int semid, struct semid_pool *semaptr)
157 {
158 #ifdef SYSV_RWLOCK
159 sysv_print("before wrlock id = %d %p\n", semid, semaptr);
160 sysv_rwlock_wrlock(&semaptr->rwlock);
161 #else
162 sysv_print("before lock id = %d %x\n", semid, semaptr);
163 sysv_mutex_lock(&semaptr->mutex);
164 #endif
165 sysv_print("lock id = %d\n", semid);
166 if (!sema_exist(semid, semaptr)) {
167 errno = EINVAL;
168 sysv_print("error sema %d doesn't exist\n", semid);
169 #ifdef SYSV_RWLOCK
170 sysv_rwlock_unlock(&semaptr->rwlock);
171 #else
172 sysv_mutex_unlock(&semaptr->mutex);
173 #endif
174 /* Internal resources must be freed. */
175 mark_for_removal(semid);
176 return (-1);
177 }
178 return (0);
179 }
180
181 static int
rwlock_unlock(int semid,struct semid_pool * semaptr)182 rwlock_unlock(int semid, struct semid_pool *semaptr)
183 {
184 sysv_print("unlock id = %d %p\n", semid, semaptr);
185 if (!sema_exist(semid, semaptr)) {
186 /* Internal resources must be freed. */
187 mark_for_removal(semid);
188 errno = EINVAL;
189 return (-1);
190 }
191 #ifdef SYSV_RWLOCK
192 sysv_rwlock_unlock(&semaptr->rwlock);
193 #else
194 sysv_mutex_unlock(&semaptr->mutex);
195 #endif
196 return (0);
197 }
198
199 int
sysvipc_semget(key_t key,int nsems,int semflg)200 sysvipc_semget(key_t key, int nsems, int semflg)
201 {
202 int semid;
203 void *shmaddr;
204 //int shm_access;
205 int size = sizeof(struct semid_pool) + nsems * sizeof(struct sem);
206
207 //TODO resources limits
208 sysv_print("handle semget\n");
209
210 semid = _shmget(key, size, semflg, SEMGET);
211 if (semid == -1) {
212 /* errno already set. */
213 goto done;
214 }
215
216 /* If the semaphore is in process of being removed there are two cases:
217 * - the daemon knows that and it will handle this situation.
218 * - one of the threads from this address space remove it and the daemon
219 * wasn't announced yet; in this scenario, the semaphore is marked
220 * using "removed" field of shm_data and future calls will return
221 * EIDRM error.
222 */
223
224 #if 0
225 /* Set access type. */
226 shm_access = semflg & (IPC_W | IPC_R);
227 if(set_shmdata_access(semid, shm_access) != 0) {
228 /* errno already set. */
229 goto done;
230 }
231 #endif
232 shmaddr = sysvipc_shmat(semid, NULL, 0);
233 if (!shmaddr) {
234 semid = -1;
235 sysvipc_shmctl(semid, IPC_RMID, NULL);
236 goto done;
237 }
238
239 //TODO more semaphores in a single file
240
241 done:
242 sysv_print("end handle semget %d\n", semid);
243 return (semid);
244 }
245
246 static int
semundo_clear(int semid,int semnum)247 semundo_clear(int semid, int semnum)
248 {
249 struct undo *sunptr;
250 int i;
251
252 sysv_print("semundo clear\n");
253
254 SYSV_MUTEX_LOCK(&lock_undo);
255 if (!undos)
256 goto done;
257
258 sunptr = &undos->un_ent[0];
259 i = 0;
260
261 while (i < undos->un_cnt) {
262 if (sunptr->un_id == semid) {
263 if (semnum == -1 || sunptr->un_num == semnum) {
264 undos->un_cnt--;
265 if (i < undos->un_cnt) {
266 undos->un_ent[i] =
267 undos->un_ent[undos->un_cnt];
268 continue;
269 }
270 }
271 if (semnum != -1)
272 break;
273 }
274 ++i;
275 ++sunptr;
276 }
277
278 //TODO Shrink memory if case; not sure if necessary
279 done:
280 SYSV_MUTEX_UNLOCK(&lock_undo);
281 sysv_print("end semundo clear\n");
282 return (0);
283 }
284
285 int
sysvipc___semctl(int semid,int semnum,int cmd,union semun * arg)286 sysvipc___semctl(int semid, int semnum, int cmd, union semun *arg)
287 {
288 int i, error;
289 struct semid_pool *semaptr = NULL;
290 struct sem *semptr = NULL;
291 struct shmid_ds shmds;
292 int shm_access = 0;
293
294 /*if (!jail_sysvipc_allowed && cred->cr_prison != NULL)
295 return (ENOSYS);
296 */
297
298 sysv_print("semctl cmd = %d\n", cmd);
299
300 error = 0;
301
302 switch (cmd) {
303 case IPC_SET: /* Originally was IPC_M but this is checked
304 by daemon. */
305 case SETVAL:
306 case SETALL:
307 shm_access = IPC_W;
308 break;
309 case IPC_STAT:
310 case GETNCNT:
311 case GETPID:
312 case GETVAL:
313 case GETALL:
314 case GETZCNT:
315 shm_access = IPC_R;
316 break;
317 default:
318 break;
319 }
320
321 semaptr = get_semaptr(semid, cmd==IPC_RMID, shm_access);
322 if (!semaptr) {
323 /* errno already set. */
324 return (-1);
325 }
326
327 switch (cmd) {
328 case IPC_RMID:
329 /* Mark that the segment is removed. This is done in
330 * get_semaptr call in order to announce other processes.
331 * It will be actually removed after put_shmdata call and
332 * not other thread from this address space use shm_data
333 * structure.
334 */
335 break;
336
337 case IPC_SET:
338 if (!arg->buf) {
339 error = EFAULT;
340 break;
341 }
342
343 memset(&shmds, 0, sizeof(shmds)/sizeof(unsigned char));
344 memcpy(&shmds.shm_perm, &arg->buf->sem_perm,
345 sizeof(struct ipc_perm));
346 error = sysvipc_shmctl(semid, cmd, &shmds);
347 /* OBS: didn't update ctime and mode as in kernel implementation
348 * it is done. Those fields are already updated for shmid_ds
349 * struct when calling shmctl
350 */
351 break;
352
353 case IPC_STAT:
354 if (!arg->buf) {
355 error = EFAULT;
356 break;
357 }
358
359 error = sysvipc_shmctl(semid, cmd, &shmds);
360 if (error)
361 break;
362
363 memcpy(&arg->buf->sem_perm, &shmds.shm_perm,
364 sizeof(struct ipc_perm));
365 arg->buf->sem_nsems = (shmds.shm_segsz - sizeof(struct semid_pool)) /
366 sizeof(struct sem);
367 arg->buf->sem_ctime = shmds.shm_ctime;
368
369 /* otime is semaphore specific so read it from
370 * semaptr
371 */
372 error = try_rwlock_rdlock(semid, semaptr);
373 if (error)
374 break;
375 arg->buf->sem_otime = semaptr->ds.sem_otime;
376 rwlock_unlock(semid, semaptr);
377 break;
378
379 case GETNCNT:
380 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
381 errno = EINVAL;
382 break;
383 }
384
385 error = try_rwlock_rdlock(semid, semaptr);
386 if (error)
387 break;
388 error = semaptr->ds.sem_base[semnum].semncnt;
389 rwlock_unlock(semid, semaptr);
390 break;
391
392 case GETPID:
393 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
394 errno = EINVAL;
395 break;
396 }
397
398 error = try_rwlock_rdlock(semid, semaptr);
399 if (error)
400 break;
401 error = semaptr->ds.sem_base[semnum].sempid;
402 rwlock_unlock(semid, semaptr);
403 break;
404
405 case GETVAL:
406 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
407 errno = EINVAL;
408 break;
409 }
410
411 error = try_rwlock_rdlock(semid, semaptr);
412 if (error)
413 break;
414 error = semaptr->ds.sem_base[semnum].semval;
415 rwlock_unlock(semid, semaptr);
416 break;
417
418 case GETALL:
419 if (!arg->array) {
420 error = EFAULT;
421 break;
422 }
423
424 error = try_rwlock_rdlock(semid, semaptr);
425 if (error)
426 break;
427 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
428 arg->array[i] = semaptr->ds.sem_base[i].semval;
429 }
430 rwlock_unlock(semid, semaptr);
431 break;
432
433 case GETZCNT:
434 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
435 errno = EINVAL;
436 break;
437 }
438
439 error = try_rwlock_rdlock(semid, semaptr);
440 if (error)
441 break;
442 error = semaptr->ds.sem_base[semnum].semzcnt;
443 rwlock_unlock(semid, semaptr);
444 break;
445
446 case SETVAL:
447 if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
448 errno = EINVAL;
449 break;
450 }
451
452 error = try_rwlock_wrlock(semid, semaptr);
453 if (error)
454 break;
455 semptr = &semaptr->ds.sem_base[semnum];
456 semptr->semval = arg->val;
457 semundo_clear(semid, semnum);
458 if (semptr->semzcnt || semptr->semncnt)
459 umtx_wakeup((int *)&semptr->semval, 0);
460 rwlock_unlock(semid, semaptr);
461 break;
462
463 case SETALL:
464 if (!arg->array) {
465 error = EFAULT;
466 break;
467 }
468
469 error = try_rwlock_wrlock(semid, semaptr);
470 if (error)
471 break;
472 for (i = 0; i < semaptr->ds.sem_nsems; i++) {
473 semptr = &semaptr->ds.sem_base[i];
474 semptr->semval = arg->array[i];
475 if (semptr->semzcnt || semptr->semncnt)
476 umtx_wakeup((int *)&semptr->semval, 0);
477 }
478 semundo_clear(semid, -1);
479 rwlock_unlock(semid, semaptr);
480 break;
481
482 default:
483 errno = EINVAL;
484 break;
485 }
486
487 put_shmdata(semid);
488
489 sysv_print("end semctl\n");
490 return (error);
491 }
492
493 /*
494 * Adjust a particular entry for a particular proc
495 */
496 static int
semundo_adjust(int semid,int semnum,int adjval)497 semundo_adjust(int semid, int semnum, int adjval)
498 {
499 struct undo *sunptr;
500 int i;
501 int error = 0;
502 size_t size;
503 int undoid;
504 void *addr;
505 struct shm_data *data;
506
507 sysv_print("semundo adjust\n");
508 if (!adjval)
509 goto done;
510
511 SYSV_MUTEX_LOCK(&lock_undo);
512 if (!undos) {
513 sysv_print("get undo segment\n");
514 undoid = _shmget(IPC_PRIVATE, PAGE_SIZE, IPC_CREAT | IPC_EXCL | 0600,
515 UNDOGET);
516 if (undoid == -1) {
517 sysv_print_err("no undo segment\n");
518 return (-1);
519 }
520
521 addr = sysvipc_shmat(undoid, NULL, 0);
522 if (!addr) {
523 sysv_print_err("can not map undo segment\n");
524 sysvipc_shmctl(undoid, IPC_RMID, NULL);
525 return (-1);
526 }
527
528 undos = (struct sem_undo *)addr;
529 undos->un_pages = 1;
530 undos->un_cnt = 0;
531 }
532
533 /*
534 * Look for the requested entry and adjust it (delete if adjval becomes
535 * 0).
536 */
537 sunptr = &undos->un_ent[0];
538 for (i = 0; i < undos->un_cnt; i++, sunptr++) {
539 if (sunptr->un_id != semid && sunptr->un_num != semnum)
540 continue;
541 sunptr->un_adjval += adjval;
542 if (sunptr->un_adjval == 0) {
543 undos->un_cnt--;
544 if (i < undos->un_cnt)
545 undos->un_ent[i] = undos->un_ent[undos->un_cnt];
546 }
547 goto done;
548 }
549
550 /* Didn't find the right entry - create it */
551 size = sizeof(struct sem_undo) + (undos->un_cnt + 1) *
552 sizeof(struct sem_undo);
553 if (size > (unsigned int)(undos->un_pages * PAGE_SIZE)) {
554 sysv_print("need more undo space\n");
555 sysvipc_shmdt(undos);
556 undos->un_pages++;
557
558 SYSV_MUTEX_LOCK(&lock_resources);
559 data = _hash_lookup(shmaddrs, (u_long)undos);
560 SYSV_MUTEX_UNLOCK(&lock_resources);
561
562 /* It is not necessary any lock on "size" because it is used
563 * only by shmat and shmdt.
564 * shmat for undoid is called only from this function and it
565 * is protected by undo_lock.
566 * shmdt for undoid is not called anywhere because the segment
567 * is destroyed by the daemon when the client dies.
568 */
569 data->size = undos->un_pages * PAGE_SIZE;
570 undos = sysvipc_shmat(data->shmid, NULL, 0);
571 }
572
573 sunptr = &undos->un_ent[undos->un_cnt];
574 undos->un_cnt++;
575 sunptr->un_adjval = adjval;
576 sunptr->un_id = semid;
577 sunptr->un_num = semnum;
578 //if (suptr->un_cnt == seminfo.semume) TODO move it in daemon
579 /*} else {
580 error = EINVAL; //se face prin notificare
581 }*/
582 done:
583 SYSV_MUTEX_UNLOCK(&lock_undo);
584
585 sysv_print("semundo adjust end\n");
586 return (error);
587 }
588
589 int
sysvipc_semop(int semid,struct sembuf * sops,unsigned nsops)590 sysvipc_semop(int semid, struct sembuf *sops, unsigned nsops)
591 {
592 struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
593 struct sembuf *sopptr;
594 struct sem *semptr = NULL;
595 struct sem *xsemptr = NULL;
596 int eval = 0;
597 int i, j;
598 int do_undos;
599 int val_to_sleep;
600
601 sysv_print("[client %d] call to semop(%d, %u)\n",
602 getpid(), semid, nsops);
603 //TODO
604 /*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
605 return (ENOSYS);
606 */
607
608 semaptr = get_semaptr(semid, 0, IPC_W);
609 if (!semaptr) {
610 errno = EINVAL;
611 return (-1);
612 }
613
614 #ifdef SYSV_SEMS
615 if (try_rwlock_rdlock(semid, semaptr) == -1) {
616 #else
617 if (try_rwlock_wrlock(semid, semaptr) == -1) {
618 #endif
619 sysv_print("sema removed\n");
620 errno = EIDRM;
621 goto done2;
622 }
623
624 if (nsops > MAX_SOPS) {
625 sysv_print("too many sops (max=%d, nsops=%u)\n",
626 MAX_SOPS, nsops);
627 eval = E2BIG;
628 goto done;
629 }
630
631 /*
632 * Loop trying to satisfy the vector of requests.
633 * If we reach a point where we must wait, any requests already
634 * performed are rolled back and we go to sleep until some other
635 * process wakes us up. At this point, we start all over again.
636 *
637 * This ensures that from the perspective of other tasks, a set
638 * of requests is atomic (never partially satisfied).
639 */
640 do_undos = 0;
641
642 for (;;) {
643
644 semptr = NULL;
645
646 for (i = 0; i < (int)nsops; i++) {
647 sopptr = &sops[i];
648
649 if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
650 eval = EFBIG;
651 goto done;
652 }
653
654 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
655 #ifdef SYSV_SEMS
656 sysv_mutex_lock(&semptr->sem_mutex);
657 #endif
658 sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
659 sopptr->sem_num, semptr->semval, sopptr->sem_op,
660 (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
661
662 if (sopptr->sem_op < 0) {
663 if (semptr->semval + sopptr->sem_op < 0) {
664 sysv_print("semop: can't do it now\n");
665 break;
666 } else {
667 semptr->semval += sopptr->sem_op;
668 if (semptr->semval == 0 &&
669 semptr->semzcnt > 0)
670 umtx_wakeup((int *)&semptr->semval, 0);
671 }
672 if (sopptr->sem_flg & SEM_UNDO)
673 do_undos = 1;
674 } else if (sopptr->sem_op == 0) {
675 if (semptr->semval > 0) {
676 sysv_print("semop: not zero now\n");
677 break;
678 }
679 } else {
680 semptr->semval += sopptr->sem_op;
681 if (sopptr->sem_flg & SEM_UNDO)
682 do_undos = 1;
683 if (semptr->semncnt > 0)
684 umtx_wakeup((int *)&semptr->semval, 0);
685 }
686 #ifdef SYSV_SEMS
687 sysv_mutex_unlock(&semptr->sem_mutex);
688 #endif
689 }
690
691 /*
692 * Did we get through the entire vector?
693 */
694 if (i >= (int)nsops)
695 goto donex;
696
697 if (sopptr->sem_op == 0)
698 semptr->semzcnt++;
699 else
700 semptr->semncnt++;
701
702 /*
703 * Get interlock value before rleeasing sem_mutex.
704 *
705 * XXX horrible hack until we get a umtx_sleep16() (and a umtx_sleep64())
706 * system call.
707 */
708 val_to_sleep = *(int *)&semptr->semval;
709 #ifdef SYSV_SEMS
710 sysv_mutex_unlock(&semptr->sem_mutex);
711 #endif
712 /*
713 * Rollback the semaphores we had acquired.
714 */
715 sysv_print("semop: rollback 0 through %d\n", i-1);
716 for (j = 0; j < i; j++) {
717 xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
718 #ifdef SYSV_SEMS
719 sysv_mutex_lock(&xsemptr->sem_mutex);
720 #endif
721 xsemptr->semval -= sops[j].sem_op;
722 if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
723 umtx_wakeup((int *)&xsemptr->semval, 0);
724 if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
725 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
726 #ifdef SYSV_SEMS
727 sysv_mutex_unlock(&xsemptr->sem_mutex);
728 #endif
729 }
730
731 /*
732 * If the request that we couldn't satisfy has the
733 * NOWAIT flag set then return with EAGAIN.
734 */
735 if (sopptr->sem_flg & IPC_NOWAIT) {
736 eval = EAGAIN;
737 goto done;
738 }
739
740 /*
741 * Release semaptr->lock while sleeping, allowing other
742 * semops (like SETVAL, SETALL, etc), which require an
743 * exclusive lock and might wake us up.
744 *
745 * Reload and recheck the validity of semaptr on return.
746 * Note that semptr itself might have changed too, but
747 * we've already interlocked for semptr and that is what
748 * will be woken up if it wakes up the tsleep on a MP
749 * race.
750 *
751 */
752 sysv_print("semop: good night!\n");
753 rwlock_unlock(semid, semaptr);
754 put_shmdata(semid);
755
756 /* We don't sleep more than SYSV_TIMEOUT because we could
757 * go to sleep after another process calls wakeup and remain
758 * blocked.
759 */
760 eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
761 /* return code is checked below, after sem[nz]cnt-- */
762
763 /*
764 * Make sure that the semaphore still exists
765 */
766
767 /* Check if another thread didn't remove the semaphore. */
768 auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
769 if (!auxsemaptr) {
770 errno = EIDRM;
771 return (-1);
772 }
773
774 if (auxsemaptr != semaptr) {
775 errno = EIDRM;
776 goto done;
777 }
778
779 /* Check if another process didn't remove the semaphore. */
780 #ifdef SYSV_SEMS
781 if (try_rwlock_rdlock(semid, semaptr) == -1) {
782 #else
783 if (try_rwlock_wrlock(semid, semaptr) == -1) {
784 #endif
785 errno = EIDRM;
786 goto done;
787 }
788 sysv_print("semop: good morning (eval=%d)!\n", eval);
789
790 /* The semaphore is still alive. Readjust the count of
791 * waiting processes.
792 */
793 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
794 #ifdef SYSV_SEMS
795 sysv_mutex_lock(&semptr->sem_mutex);
796 #endif
797 if (sopptr->sem_op == 0)
798 semptr->semzcnt--;
799 else
800 semptr->semncnt--;
801 #ifdef SYSV_SEMS
802 sysv_mutex_unlock(&semptr->sem_mutex);
803 #endif
804
805 /*
806 * Is it really morning, or was our sleep interrupted?
807 * (Delayed check of tsleep() return code because we
808 * need to decrement sem[nz]cnt either way.)
809 *
810 * Always retry on EBUSY
811 */
812 if (eval == EAGAIN) {
813 eval = EINTR;
814 goto done;
815 }
816
817 sysv_print("semop: good morning!\n");
818 /* RETRY LOOP */
819 }
820
821 donex:
822 /*
823 * Process any SEM_UNDO requests.
824 */
825 if (do_undos) {
826 for (i = 0; i < (int)nsops; i++) {
827 /*
828 * We only need to deal with SEM_UNDO's for non-zero
829 * op's.
830 */
831 int adjval;
832
833 if ((sops[i].sem_flg & SEM_UNDO) == 0)
834 continue;
835 adjval = sops[i].sem_op;
836 if (adjval == 0)
837 continue;
838 eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
839 if (eval == 0)
840 continue;
841
842 /*
843 * Oh-Oh! We ran out of either sem_undo's or undo's.
844 * Rollback the adjustments to this point and then
845 * rollback the semaphore ups and down so we can return
846 * with an error with all structures restored. We
847 * rollback the undo's in the exact reverse order that
848 * we applied them. This guarantees that we won't run
849 * out of space as we roll things back out.
850 */
851 for (j = i - 1; j >= 0; j--) {
852 if ((sops[j].sem_flg & SEM_UNDO) == 0)
853 continue;
854 adjval = sops[j].sem_op;
855 if (adjval == 0)
856 continue;
857 if (semundo_adjust(semid, sops[j].sem_num,
858 adjval) != 0)
859 sysv_print("semop - can't undo undos");
860 }
861
862 for (j = 0; j < (int)nsops; j++) {
863 xsemptr = &semaptr->ds.sem_base[
864 sops[j].sem_num];
865 #ifdef SYSV_SEMS
866 sysv_mutex_lock(&semptr->sem_mutex);
867 #endif
868 xsemptr->semval -= sops[j].sem_op;
869 if (xsemptr->semval == 0 &&
870 xsemptr->semzcnt > 0)
871 umtx_wakeup((int *)&xsemptr->semval, 0);
872 if (xsemptr->semval <= 0 &&
873 xsemptr->semncnt > 0)
874 umtx_wakeup((int *)&xsemptr->semval, 0); //?!
875 #ifdef SYSV_SEMS
876 sysv_mutex_unlock(&semptr->sem_mutex);
877 #endif
878 }
879
880 sysv_print("eval = %d from semundo_adjust\n", eval);
881 goto done;
882 }
883 }
884
885 /* Set sempid field for each semaphore. */
886 for (i = 0; i < (int)nsops; i++) {
887 sopptr = &sops[i];
888 semptr = &semaptr->ds.sem_base[sopptr->sem_num];
889 #ifdef SYSV_SEMS
890 sysv_mutex_lock(&semptr->sem_mutex);
891 #endif
892 semptr->sempid = getpid();
893 #ifdef SYSV_SEMS
894 sysv_mutex_unlock(&semptr->sem_mutex);
895 #endif
896 }
897
898 sysv_print("semop: done\n");
899 semaptr->ds.sem_otime = time(NULL);
900 done:
901 rwlock_unlock(semid, semaptr);
902 done2:
903 put_shmdata(semid);
904
905 return (eval);
906 }
907