xref: /openbsd/sys/kern/subr_log.c (revision 3f55e363)
1 /*	$OpenBSD: subr_log.c,v 1.78 2023/09/22 20:03:05 mvs Exp $	*/
2 /*	$NetBSD: subr_log.c,v 1.11 1996/03/30 22:24:44 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)subr_log.c	8.1 (Berkeley) 6/10/93
33  */
34 
35 /*
36  * Error log buffer for kernel printf's.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ioctl.h>
44 #include <sys/msgbuf.h>
45 #include <sys/file.h>
46 #include <sys/tty.h>
47 #include <sys/signalvar.h>
48 #include <sys/syslog.h>
49 #include <sys/malloc.h>
50 #include <sys/filedesc.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/event.h>
54 #include <sys/fcntl.h>
55 #include <sys/mutex.h>
56 #include <sys/timeout.h>
57 
58 #ifdef KTRACE
59 #include <sys/ktrace.h>
60 #endif
61 
62 #include <sys/mount.h>
63 #include <sys/syscallargs.h>
64 
65 #include <dev/cons.h>
66 
67 #define LOG_RDPRI	(PZERO + 1)
68 #define LOG_TICK	50		/* log tick interval in msec */
69 
70 #define LOG_ASYNC	0x04
71 #define LOG_RDWAIT	0x08
72 
73 /*
74  * Locking:
75  *	L	log_mtx
76  */
77 struct logsoftc {
78 	int	sc_state;		/* [L] see above for possibilities */
79 	struct	klist sc_klist;		/* process waiting on kevent call */
80 	struct	sigio_ref sc_sigio;	/* async I/O registration */
81 	int	sc_need_wakeup;		/* if set, wake up waiters */
82 	struct timeout sc_tick;		/* wakeup poll timeout */
83 } logsoftc;
84 
85 int	log_open;			/* also used in log() */
86 int	msgbufmapped;			/* is the message buffer mapped */
87 struct	msgbuf *msgbufp;		/* the mapped buffer, itself. */
88 struct	msgbuf *consbufp;		/* console message buffer. */
89 
90 struct	file *syslogf;
91 struct	rwlock syslogf_rwlock = RWLOCK_INITIALIZER("syslogf");
92 
93 /*
94  * Lock that serializes access to log message buffers.
95  * This should be kept as a leaf lock in order not to constrain where
96  * printf(9) can be used.
97  */
98 struct	mutex log_mtx =
99     MUTEX_INITIALIZER_FLAGS(IPL_HIGH, "logmtx", MTX_NOWITNESS);
100 
101 void filt_logrdetach(struct knote *kn);
102 int filt_logread(struct knote *kn, long hint);
103 int filt_logmodify(struct kevent *, struct knote *);
104 int filt_logprocess(struct knote *, struct kevent *);
105 
106 const struct filterops logread_filtops = {
107 	.f_flags	= FILTEROP_ISFD | FILTEROP_MPSAFE,
108 	.f_attach	= NULL,
109 	.f_detach	= filt_logrdetach,
110 	.f_event	= filt_logread,
111 	.f_modify	= filt_logmodify,
112 	.f_process	= filt_logprocess,
113 };
114 
115 int dosendsyslog(struct proc *, const char *, size_t, int, enum uio_seg);
116 void logtick(void *);
117 size_t msgbuf_getlen(struct msgbuf *);
118 void msgbuf_putchar_locked(struct msgbuf *, const char);
119 
120 void
initmsgbuf(caddr_t buf,size_t bufsize)121 initmsgbuf(caddr_t buf, size_t bufsize)
122 {
123 	struct msgbuf *mbp;
124 	long new_bufs;
125 
126 	/* Sanity-check the given size. */
127 	if (bufsize < sizeof(struct msgbuf))
128 		return;
129 
130 	mbp = msgbufp = (struct msgbuf *)buf;
131 
132 	new_bufs = bufsize - offsetof(struct msgbuf, msg_bufc);
133 	if ((mbp->msg_magic != MSG_MAGIC) || (mbp->msg_bufs != new_bufs) ||
134 	    (mbp->msg_bufr < 0) || (mbp->msg_bufr >= mbp->msg_bufs) ||
135 	    (mbp->msg_bufx < 0) || (mbp->msg_bufx >= mbp->msg_bufs)) {
136 		/*
137 		 * If the buffer magic number is wrong, has changed
138 		 * size (which shouldn't happen often), or is
139 		 * internally inconsistent, initialize it.
140 		 */
141 
142 		memset(buf, 0, bufsize);
143 		mbp->msg_magic = MSG_MAGIC;
144 		mbp->msg_bufs = new_bufs;
145 	}
146 
147 	/*
148 	 * Always start new buffer data on a new line.
149 	 * Avoid using log_mtx because mutexes do not work during early boot
150 	 * on some architectures.
151 	 */
152 	if (mbp->msg_bufx > 0 && mbp->msg_bufc[mbp->msg_bufx - 1] != '\n')
153 		msgbuf_putchar_locked(mbp, '\n');
154 
155 	/* mark it as ready for use. */
156 	msgbufmapped = 1;
157 }
158 
159 void
initconsbuf(void)160 initconsbuf(void)
161 {
162 	/* Set up a buffer to collect /dev/console output */
163 	consbufp = malloc(CONSBUFSIZE, M_TTYS, M_WAITOK | M_ZERO);
164 	consbufp->msg_magic = MSG_MAGIC;
165 	consbufp->msg_bufs = CONSBUFSIZE - offsetof(struct msgbuf, msg_bufc);
166 }
167 
168 void
msgbuf_putchar(struct msgbuf * mbp,const char c)169 msgbuf_putchar(struct msgbuf *mbp, const char c)
170 {
171 	if (mbp->msg_magic != MSG_MAGIC)
172 		/* Nothing we can do */
173 		return;
174 
175 	mtx_enter(&log_mtx);
176 	msgbuf_putchar_locked(mbp, c);
177 	mtx_leave(&log_mtx);
178 }
179 
180 void
msgbuf_putchar_locked(struct msgbuf * mbp,const char c)181 msgbuf_putchar_locked(struct msgbuf *mbp, const char c)
182 {
183 	mbp->msg_bufc[mbp->msg_bufx++] = c;
184 	if (mbp->msg_bufx < 0 || mbp->msg_bufx >= mbp->msg_bufs)
185 		mbp->msg_bufx = 0;
186 	/* If the buffer is full, keep the most recent data. */
187 	if (mbp->msg_bufr == mbp->msg_bufx) {
188 		if (++mbp->msg_bufr >= mbp->msg_bufs)
189 			mbp->msg_bufr = 0;
190 		mbp->msg_bufd++;
191 	}
192 }
193 
194 size_t
msgbuf_getlen(struct msgbuf * mbp)195 msgbuf_getlen(struct msgbuf *mbp)
196 {
197 	long len;
198 
199 	len = mbp->msg_bufx - mbp->msg_bufr;
200 	if (len < 0)
201 		len += mbp->msg_bufs;
202 	return (len);
203 }
204 
205 int
logopen(dev_t dev,int flags,int mode,struct proc * p)206 logopen(dev_t dev, int flags, int mode, struct proc *p)
207 {
208 	if (log_open)
209 		return (EBUSY);
210 	log_open = 1;
211 	klist_init_mutex(&logsoftc.sc_klist, &log_mtx);
212 	sigio_init(&logsoftc.sc_sigio);
213 	timeout_set(&logsoftc.sc_tick, logtick, NULL);
214 	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
215 	return (0);
216 }
217 
218 int
logclose(dev_t dev,int flag,int mode,struct proc * p)219 logclose(dev_t dev, int flag, int mode, struct proc *p)
220 {
221 	struct file *fp;
222 
223 	rw_enter_write(&syslogf_rwlock);
224 	fp = syslogf;
225 	syslogf = NULL;
226 	rw_exit(&syslogf_rwlock);
227 
228 	if (fp)
229 		FRELE(fp, p);
230 	log_open = 0;
231 	timeout_del(&logsoftc.sc_tick);
232 
233 	klist_invalidate(&logsoftc.sc_klist);
234 	klist_free(&logsoftc.sc_klist);
235 
236 	logsoftc.sc_state = 0;
237 	sigio_free(&logsoftc.sc_sigio);
238 	return (0);
239 }
240 
241 int
logread(dev_t dev,struct uio * uio,int flag)242 logread(dev_t dev, struct uio *uio, int flag)
243 {
244 	struct msgbuf *mbp = msgbufp;
245 	size_t l, rpos;
246 	int error = 0;
247 
248 	mtx_enter(&log_mtx);
249 	while (mbp->msg_bufr == mbp->msg_bufx) {
250 		if (flag & IO_NDELAY) {
251 			error = EWOULDBLOCK;
252 			goto out;
253 		}
254 		logsoftc.sc_state |= LOG_RDWAIT;
255 		mtx_leave(&log_mtx);
256 		/*
257 		 * Set up and enter sleep manually instead of using msleep()
258 		 * to keep log_mtx as a leaf lock.
259 		 */
260 		sleep_setup(mbp, LOG_RDPRI | PCATCH, "klog");
261 		error = sleep_finish(0, logsoftc.sc_state & LOG_RDWAIT);
262 		mtx_enter(&log_mtx);
263 		if (error)
264 			goto out;
265 	}
266 
267 	if (mbp->msg_bufd > 0) {
268 		char buf[64];
269 		long ndropped;
270 
271 		ndropped = mbp->msg_bufd;
272 		mtx_leave(&log_mtx);
273 		l = snprintf(buf, sizeof(buf),
274 		    "<%d>klog: dropped %ld byte%s, message buffer full\n",
275 		    LOG_KERN|LOG_WARNING, ndropped,
276 		    ndropped == 1 ? "" : "s");
277 		error = uiomove(buf, ulmin(l, sizeof(buf) - 1), uio);
278 		mtx_enter(&log_mtx);
279 		if (error)
280 			goto out;
281 		mbp->msg_bufd -= ndropped;
282 	}
283 
284 	while (uio->uio_resid > 0) {
285 		if (mbp->msg_bufx >= mbp->msg_bufr)
286 			l = mbp->msg_bufx - mbp->msg_bufr;
287 		else
288 			l = mbp->msg_bufs - mbp->msg_bufr;
289 		l = ulmin(l, uio->uio_resid);
290 		if (l == 0)
291 			break;
292 		rpos = mbp->msg_bufr;
293 		mtx_leave(&log_mtx);
294 		/* Ignore that concurrent readers may consume the same data. */
295 		error = uiomove(&mbp->msg_bufc[rpos], l, uio);
296 		mtx_enter(&log_mtx);
297 		if (error)
298 			break;
299 		mbp->msg_bufr += l;
300 		if (mbp->msg_bufr < 0 || mbp->msg_bufr >= mbp->msg_bufs)
301 			mbp->msg_bufr = 0;
302 	}
303  out:
304 	mtx_leave(&log_mtx);
305 	return (error);
306 }
307 
308 int
logkqfilter(dev_t dev,struct knote * kn)309 logkqfilter(dev_t dev, struct knote *kn)
310 {
311 	struct klist *klist;
312 
313 	switch (kn->kn_filter) {
314 	case EVFILT_READ:
315 		klist = &logsoftc.sc_klist;
316 		kn->kn_fop = &logread_filtops;
317 		break;
318 	default:
319 		return (EINVAL);
320 	}
321 
322 	kn->kn_hook = (void *)msgbufp;
323 	klist_insert(klist, kn);
324 
325 	return (0);
326 }
327 
328 void
filt_logrdetach(struct knote * kn)329 filt_logrdetach(struct knote *kn)
330 {
331 	klist_remove(&logsoftc.sc_klist, kn);
332 }
333 
334 int
filt_logread(struct knote * kn,long hint)335 filt_logread(struct knote *kn, long hint)
336 {
337 	struct msgbuf *mbp = kn->kn_hook;
338 
339 	kn->kn_data = msgbuf_getlen(mbp);
340 	return (kn->kn_data != 0);
341 }
342 
343 int
filt_logmodify(struct kevent * kev,struct knote * kn)344 filt_logmodify(struct kevent *kev, struct knote *kn)
345 {
346 	int active;
347 
348 	mtx_enter(&log_mtx);
349 	active = knote_modify(kev, kn);
350 	mtx_leave(&log_mtx);
351 
352 	return (active);
353 }
354 
355 int
filt_logprocess(struct knote * kn,struct kevent * kev)356 filt_logprocess(struct knote *kn, struct kevent *kev)
357 {
358 	int active;
359 
360 	mtx_enter(&log_mtx);
361 	active = knote_process(kn, kev);
362 	mtx_leave(&log_mtx);
363 
364 	return (active);
365 }
366 
367 void
logwakeup(void)368 logwakeup(void)
369 {
370 	/*
371 	 * The actual wakeup has to be deferred because logwakeup() can be
372 	 * called in very varied contexts.
373 	 * Keep the print routines usable in as many situations as possible
374 	 * by not using locking here.
375 	 */
376 
377 	/*
378 	 * Ensure that preceding stores become visible to other CPUs
379 	 * before the flag.
380 	 */
381 	membar_producer();
382 
383 	logsoftc.sc_need_wakeup = 1;
384 }
385 
386 void
logtick(void * arg)387 logtick(void *arg)
388 {
389 	int state;
390 
391 	if (!log_open)
392 		return;
393 
394 	if (!logsoftc.sc_need_wakeup)
395 		goto out;
396 	logsoftc.sc_need_wakeup = 0;
397 
398 	/*
399 	 * sc_need_wakeup has to be cleared before handling the wakeup.
400 	 * Visiting log_mtx ensures the proper order.
401 	 */
402 
403 	mtx_enter(&log_mtx);
404 	state = logsoftc.sc_state;
405 	if (logsoftc.sc_state & LOG_RDWAIT)
406 		logsoftc.sc_state &= ~LOG_RDWAIT;
407 	knote_locked(&logsoftc.sc_klist, 0);
408 	mtx_leave(&log_mtx);
409 
410 	if (state & LOG_ASYNC)
411 		pgsigio(&logsoftc.sc_sigio, SIGIO, 0);
412 	if (state & LOG_RDWAIT)
413 		wakeup(msgbufp);
414 out:
415 	timeout_add_msec(&logsoftc.sc_tick, LOG_TICK);
416 }
417 
418 int
logioctl(dev_t dev,u_long com,caddr_t data,int flag,struct proc * p)419 logioctl(dev_t dev, u_long com, caddr_t data, int flag, struct proc *p)
420 {
421 	struct file *fp, *newfp;
422 	int error;
423 
424 	switch (com) {
425 
426 	/* return number of characters immediately available */
427 	case FIONREAD:
428 		mtx_enter(&log_mtx);
429 		*(int *)data = (int)msgbuf_getlen(msgbufp);
430 		mtx_leave(&log_mtx);
431 		break;
432 
433 	case FIONBIO:
434 		break;
435 
436 	case FIOASYNC:
437 		mtx_enter(&log_mtx);
438 		if (*(int *)data)
439 			logsoftc.sc_state |= LOG_ASYNC;
440 		else
441 			logsoftc.sc_state &= ~LOG_ASYNC;
442 		mtx_leave(&log_mtx);
443 		break;
444 
445 	case FIOSETOWN:
446 	case TIOCSPGRP:
447 		return (sigio_setown(&logsoftc.sc_sigio, com, data));
448 
449 	case FIOGETOWN:
450 	case TIOCGPGRP:
451 		sigio_getown(&logsoftc.sc_sigio, com, data);
452 		break;
453 
454 	case LIOCSFD:
455 		if ((error = suser(p)) != 0)
456 			return (error);
457 		if ((error = getsock(p, *(int *)data, &newfp)) != 0)
458 			return (error);
459 
460 		rw_enter_write(&syslogf_rwlock);
461 		fp = syslogf;
462 		syslogf = newfp;
463 		rw_exit(&syslogf_rwlock);
464 
465 		if (fp)
466 			FRELE(fp, p);
467 		break;
468 
469 	default:
470 		return (ENOTTY);
471 	}
472 	return (0);
473 }
474 
475 /*
476  * If syslogd is not running, temporarily store a limited amount of messages
477  * in kernel.  After log stash is full, drop messages and count them.  When
478  * syslogd is available again, next log message will flush the stashed
479  * messages and insert a message with drop count.  Calls to malloc(9) and
480  * copyin(9) may sleep, protect data structures with rwlock.
481  */
482 
483 #define LOGSTASH_SIZE	100
484 struct logstash_message {
485 	char	*lgs_buffer;
486 	size_t	 lgs_size;
487 } logstash_messages[LOGSTASH_SIZE];
488 
489 struct	logstash_message *logstash_in = &logstash_messages[0];
490 struct	logstash_message *logstash_out = &logstash_messages[0];
491 
492 struct	rwlock logstash_rwlock = RWLOCK_INITIALIZER("logstash");
493 
494 int	logstash_dropped, logstash_error, logstash_pid;
495 
496 int	logstash_insert(const char *, size_t, int, pid_t);
497 void	logstash_remove(void);
498 int	logstash_sendsyslog(struct proc *);
499 
500 static inline int
logstash_full(void)501 logstash_full(void)
502 {
503 	rw_assert_anylock(&logstash_rwlock);
504 
505 	return logstash_out->lgs_buffer != NULL &&
506 	    logstash_in == logstash_out;
507 }
508 
509 static inline void
logstash_increment(struct logstash_message ** msg)510 logstash_increment(struct logstash_message **msg)
511 {
512 	rw_assert_wrlock(&logstash_rwlock);
513 
514 	KASSERT((*msg) >= &logstash_messages[0]);
515 	KASSERT((*msg) < &logstash_messages[LOGSTASH_SIZE]);
516 	if ((*msg) == &logstash_messages[LOGSTASH_SIZE - 1])
517 		(*msg) = &logstash_messages[0];
518 	else
519 		(*msg)++;
520 }
521 
522 int
logstash_insert(const char * buf,size_t nbyte,int logerror,pid_t pid)523 logstash_insert(const char *buf, size_t nbyte, int logerror, pid_t pid)
524 {
525 	int error;
526 
527 	rw_enter_write(&logstash_rwlock);
528 
529 	if (logstash_full()) {
530 		if (logstash_dropped == 0) {
531 			logstash_error = logerror;
532 			logstash_pid = pid;
533 		}
534 		logstash_dropped++;
535 
536 		rw_exit(&logstash_rwlock);
537 		return (0);
538 	}
539 
540 	logstash_in->lgs_buffer = malloc(nbyte, M_LOG, M_WAITOK);
541 	error = copyin(buf, logstash_in->lgs_buffer, nbyte);
542 	if (error) {
543 		free(logstash_in->lgs_buffer, M_LOG, nbyte);
544 		logstash_in->lgs_buffer = NULL;
545 
546 		rw_exit(&logstash_rwlock);
547 		return (error);
548 	}
549 	logstash_in->lgs_size = nbyte;
550 	logstash_increment(&logstash_in);
551 
552 	rw_exit(&logstash_rwlock);
553 	return (0);
554 }
555 
556 void
logstash_remove(void)557 logstash_remove(void)
558 {
559 	rw_assert_wrlock(&logstash_rwlock);
560 
561 	KASSERT(logstash_out->lgs_buffer != NULL);
562 	free(logstash_out->lgs_buffer, M_LOG, logstash_out->lgs_size);
563 	logstash_out->lgs_buffer = NULL;
564 	logstash_increment(&logstash_out);
565 
566 	/* Insert dropped message in sequence where messages were dropped. */
567 	if (logstash_dropped) {
568 		size_t l, nbyte;
569 		char buf[80];
570 
571 		l = snprintf(buf, sizeof(buf),
572 		    "<%d>sendsyslog: dropped %d message%s, error %d, pid %d",
573 		    LOG_KERN|LOG_WARNING, logstash_dropped,
574 		    logstash_dropped == 1 ? "" : "s",
575 		    logstash_error, logstash_pid);
576 		logstash_dropped = 0;
577 		logstash_error = 0;
578 		logstash_pid = 0;
579 
580 		/* Cannot fail, we have just freed a slot. */
581 		KASSERT(!logstash_full());
582 		nbyte = ulmin(l, sizeof(buf) - 1);
583 		logstash_in->lgs_buffer = malloc(nbyte, M_LOG, M_WAITOK);
584 		memcpy(logstash_in->lgs_buffer, buf, nbyte);
585 		logstash_in->lgs_size = nbyte;
586 		logstash_increment(&logstash_in);
587 	}
588 }
589 
590 int
logstash_sendsyslog(struct proc * p)591 logstash_sendsyslog(struct proc *p)
592 {
593 	int error;
594 
595 	rw_enter_write(&logstash_rwlock);
596 
597 	while (logstash_out->lgs_buffer != NULL) {
598 		error = dosendsyslog(p, logstash_out->lgs_buffer,
599 		    logstash_out->lgs_size, 0, UIO_SYSSPACE);
600 		if (error) {
601 			rw_exit(&logstash_rwlock);
602 			return (error);
603 		}
604 		logstash_remove();
605 	}
606 
607 	rw_exit(&logstash_rwlock);
608 	return (0);
609 }
610 
611 /*
612  * Send syslog(3) message from userland to socketpair(2) created by syslogd(8).
613  * Store message in kernel log stash for later if syslogd(8) is not available
614  * or sending fails.  Send to console if LOG_CONS is set and syslogd(8) socket
615  * does not exist.
616  */
617 
618 int
sys_sendsyslog(struct proc * p,void * v,register_t * retval)619 sys_sendsyslog(struct proc *p, void *v, register_t *retval)
620 {
621 	struct sys_sendsyslog_args /* {
622 		syscallarg(const char *) buf;
623 		syscallarg(size_t) nbyte;
624 		syscallarg(int) flags;
625 	} */ *uap = v;
626 	size_t nbyte;
627 	int error;
628 
629 	nbyte = SCARG(uap, nbyte);
630 	if (nbyte > LOG_MAXLINE)
631 		nbyte = LOG_MAXLINE;
632 
633 	logstash_sendsyslog(p);
634 	error = dosendsyslog(p, SCARG(uap, buf), nbyte, SCARG(uap, flags),
635 	    UIO_USERSPACE);
636 	if (error && error != EFAULT)
637 		logstash_insert(SCARG(uap, buf), nbyte, error, p->p_p->ps_pid);
638 	return (error);
639 }
640 
641 int
dosendsyslog(struct proc * p,const char * buf,size_t nbyte,int flags,enum uio_seg sflg)642 dosendsyslog(struct proc *p, const char *buf, size_t nbyte, int flags,
643     enum uio_seg sflg)
644 {
645 #ifdef KTRACE
646 	struct iovec ktriov;
647 #endif
648 	struct file *fp;
649 	char pri[6], *kbuf;
650 	struct iovec aiov;
651 	struct uio auio;
652 	size_t i, len;
653 	int error;
654 
655 	/* Global variable syslogf may change during sleep, use local copy. */
656 	rw_enter_read(&syslogf_rwlock);
657 	fp = syslogf;
658 	if (fp)
659 		FREF(fp);
660 	rw_exit(&syslogf_rwlock);
661 
662 	if (fp == NULL) {
663 		if (!ISSET(flags, LOG_CONS))
664 			return (ENOTCONN);
665 		/*
666 		 * Strip off syslog priority when logging to console.
667 		 * LOG_PRIMASK | LOG_FACMASK is 0x03ff, so at most 4
668 		 * decimal digits may appear in priority as <1023>.
669 		 */
670 		len = MIN(nbyte, sizeof(pri));
671 		if (sflg == UIO_USERSPACE) {
672 			if ((error = copyin(buf, pri, len)))
673 				return (error);
674 		} else
675 			memcpy(pri, buf, len);
676 		if (0 < len && pri[0] == '<') {
677 			for (i = 1; i < len; i++) {
678 				if (pri[i] < '0' || pri[i] > '9')
679 					break;
680 			}
681 			if (i < len && pri[i] == '>') {
682 				i++;
683 				/* There must be at least one digit <0>. */
684 				if (i >= 3) {
685 					buf += i;
686 					nbyte -= i;
687 				}
688 			}
689 		}
690 	}
691 
692 	aiov.iov_base = (char *)buf;
693 	aiov.iov_len = nbyte;
694 	auio.uio_iov = &aiov;
695 	auio.uio_iovcnt = 1;
696 	auio.uio_segflg = sflg;
697 	auio.uio_rw = UIO_WRITE;
698 	auio.uio_procp = p;
699 	auio.uio_offset = 0;
700 	auio.uio_resid = aiov.iov_len;
701 #ifdef KTRACE
702 	if (sflg == UIO_USERSPACE && KTRPOINT(p, KTR_GENIO))
703 		ktriov = aiov;
704 	else
705 		ktriov.iov_len = 0;
706 #endif
707 
708 	len = auio.uio_resid;
709 	if (fp) {
710 		int flags = (fp->f_flag & FNONBLOCK) ? MSG_DONTWAIT : 0;
711 		error = sosend(fp->f_data, NULL, &auio, NULL, NULL, flags);
712 		if (error == 0)
713 			len -= auio.uio_resid;
714 	} else {
715 		KERNEL_LOCK();
716 		if (constty || cn_devvp) {
717 			error = cnwrite(0, &auio, 0);
718 			if (error == 0)
719 				len -= auio.uio_resid;
720 			aiov.iov_base = "\r\n";
721 			aiov.iov_len = 2;
722 			auio.uio_iov = &aiov;
723 			auio.uio_iovcnt = 1;
724 			auio.uio_segflg = UIO_SYSSPACE;
725 			auio.uio_rw = UIO_WRITE;
726 			auio.uio_procp = p;
727 			auio.uio_offset = 0;
728 			auio.uio_resid = aiov.iov_len;
729 			cnwrite(0, &auio, 0);
730 		} else {
731 			/* XXX console redirection breaks down... */
732 			if (sflg == UIO_USERSPACE) {
733 				kbuf = malloc(len, M_TEMP, M_WAITOK);
734 				error = copyin(aiov.iov_base, kbuf, len);
735 			} else {
736 				kbuf = aiov.iov_base;
737 				error = 0;
738 			}
739 			if (error == 0)
740 				for (i = 0; i < len; i++) {
741 					if (kbuf[i] == '\0')
742 						break;
743 					cnputc(kbuf[i]);
744 					auio.uio_resid--;
745 				}
746 			if (sflg == UIO_USERSPACE)
747 				free(kbuf, M_TEMP, len);
748 			if (error == 0)
749 				len -= auio.uio_resid;
750 			cnputc('\n');
751 		}
752 		KERNEL_UNLOCK();
753 	}
754 
755 #ifdef KTRACE
756 	if (error == 0 && ktriov.iov_len != 0)
757 		ktrgenio(p, -1, UIO_WRITE, &ktriov, len);
758 #endif
759 	if (fp)
760 		FRELE(fp, p);
761 	else if (error != EFAULT)
762 		error = ENOTCONN;
763 	return (error);
764 }
765