1 /*	$Id$ */
2 /*
3  * Copyright (c) 2019 Kristaps Dzonsons <kristaps@bsd.lv>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include "config.h"
18 
19 #include <sys/stat.h>
20 #include COMPAT_ENDIAN_H
21 
22 #include <assert.h>
23 #include <errno.h>
24 #include <poll.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <unistd.h>
30 
31 #include "extern.h"
32 
33 /*
34  * A non-blocking check to see whether there's POLLIN data in fd.
35  * Returns <0 on failure, 0 if there's no data, >0 if there is.
36  */
37 int
io_read_check(int fd)38 io_read_check(int fd)
39 {
40 	struct pollfd	pfd;
41 
42 	pfd.fd = fd;
43 	pfd.events = POLLIN;
44 
45 	if (poll(&pfd, 1, 0) == -1) {
46 		ERR("poll");
47 		return -1;
48 	}
49 	return (pfd.revents & POLLIN);
50 }
51 
52 /*
53  * Write buffer to non-blocking descriptor.
54  * Returns zero on failure, non-zero on success (zero or more bytes).
55  * On success, fills in "sz" with the amount written.
56  */
57 static int
io_write_nonblocking(int fd,const void * buf,size_t bsz,size_t * sz)58 io_write_nonblocking(int fd, const void *buf, size_t bsz,
59     size_t *sz)
60 {
61 	struct pollfd	pfd;
62 	ssize_t		wsz;
63 	int		c;
64 
65 	*sz = 0;
66 
67 	if (bsz == 0)
68 		return 1;
69 
70 	pfd.fd = fd;
71 	pfd.events = POLLOUT;
72 
73 	/* Poll and check for all possible errors. */
74 
75 	if ((c = poll(&pfd, 1, POLL_TIMEOUT)) == -1) {
76 		ERR("poll");
77 		return 0;
78 	} else if (c == 0) {
79 		ERRX("poll: timeout");
80 		return 0;
81 	} else if ((pfd.revents & (POLLERR|POLLNVAL))) {
82 		ERRX("poll: bad fd");
83 		return 0;
84 	} else if ((pfd.revents & POLLHUP)) {
85 		ERRX("poll: hangup");
86 		return 0;
87 	} else if (!(pfd.revents & POLLOUT)) {
88 		ERRX("poll: unknown event");
89 		return 0;
90 	}
91 
92 	/* Now the non-blocking write. */
93 
94 	if ((wsz = write(fd, buf, bsz)) == -1) {
95 		ERR("write");
96 		return 0;
97 	}
98 
99 	*sz = wsz;
100 	return 1;
101 }
102 
103 /*
104  * Blocking write of the full size of the buffer.
105  * Returns 0 on failure, non-zero on success (all bytes written).
106  */
107 static int
io_write_blocking(int fd,const void * buf,size_t sz)108 io_write_blocking(int fd, const void *buf, size_t sz)
109 {
110 	size_t		wsz;
111 	int		c;
112 
113 	while (sz > 0) {
114 		c = io_write_nonblocking(fd, buf, sz, &wsz);
115 		if (!c) {
116 			ERRX1("io_write_nonblocking");
117 			return 0;
118 		} else if (wsz == 0) {
119 			ERRX("io_write_nonblocking: short write");
120 			return 0;
121 		}
122 		buf += wsz;
123 		sz -= wsz;
124 	}
125 
126 	return 1;
127 }
128 
129 /*
130  * Write "buf" of size "sz" to non-blocking descriptor.
131  * Returns zero on failure, non-zero on success (all bytes written to
132  * the descriptor).
133  */
134 int
io_write_buf(struct sess * sess,int fd,const void * buf,size_t sz)135 io_write_buf(struct sess *sess, int fd, const void *buf, size_t sz)
136 {
137 	int32_t	 tag, tagbuf;
138 	size_t	 wsz;
139 	int	 c;
140 
141 	if (!sess->mplex_writes) {
142 		c = io_write_blocking(fd, buf, sz);
143 		sess->total_write += sz;
144 		return c;
145 	}
146 
147 	while (sz > 0) {
148 		wsz = sz & 0xFFFFFF;
149 		tag = (7 << 24) + wsz;
150 		tagbuf = htole32(tag);
151 		if (!io_write_blocking(fd, &tagbuf, sizeof(tagbuf))) {
152 			ERRX1("io_write_blocking");
153 			return 0;
154 		}
155 		if (!io_write_blocking(fd, buf, wsz)) {
156 			ERRX1("io_write_blocking");
157 			return 0;
158 		}
159 		sess->total_write += wsz;
160 		sz -= wsz;
161 		buf += wsz;
162 	}
163 
164 	return 1;
165 }
166 
167 /*
168  * Write "line" (NUL-terminated) followed by a newline.
169  * Returns zero on failure, non-zero on succcess.
170  */
171 int
io_write_line(struct sess * sess,int fd,const char * line)172 io_write_line(struct sess *sess, int fd, const char *line)
173 {
174 
175 	if (!io_write_buf(sess, fd, line, strlen(line)))
176 		ERRX1("io_write_buf");
177 	else if (!io_write_byte(sess, fd, '\n'))
178 		ERRX1("io_write_byte");
179 	else
180 		return 1;
181 
182 	return 0;
183 }
184 
185 /*
186  * Read buffer from non-blocking descriptor.
187  * Returns zero on failure, non-zero on success (zero or more bytes).
188  */
189 static int
io_read_nonblocking(int fd,void * buf,size_t bsz,size_t * sz)190 io_read_nonblocking(int fd, void *buf, size_t bsz, size_t *sz)
191 {
192 	struct pollfd	pfd;
193 	ssize_t		rsz;
194 	int		c;
195 
196 	*sz = 0;
197 
198 	if (bsz == 0)
199 		return 1;
200 
201 	pfd.fd = fd;
202 	pfd.events = POLLIN;
203 
204 	/* Poll and check for all possible errors. */
205 
206 	if ((c = poll(&pfd, 1, POLL_TIMEOUT)) == -1) {
207 		ERR("poll");
208 		return 0;
209 	} else if (c == 0) {
210 		ERRX("poll: timeout");
211 		return 0;
212 	} else if ((pfd.revents & (POLLERR|POLLNVAL))) {
213 		ERRX("poll: bad fd");
214 		return 0;
215 	} else if (!(pfd.revents & (POLLIN|POLLHUP))) {
216 		ERRX("poll: unknown event");
217 		return 0;
218 	}
219 
220 	/* Now the non-blocking read, checking for EOF. */
221 
222 	if ((rsz = read(fd, buf, bsz)) == -1) {
223 		ERR("read");
224 		return 0;
225 	} else if (rsz == 0) {
226 		ERRX("unexpected end of file");
227 		return 0;
228 	}
229 
230 	*sz = rsz;
231 	return 1;
232 }
233 
234 /*
235  * Blocking read of the full size of the buffer.
236  * This can be called from either the error type message or a regular
237  * message---or for that matter, multiplexed or not.
238  * Returns 0 on failure, non-zero on success (all bytes read).
239  */
240 static int
io_read_blocking(int fd,void * buf,size_t sz)241 io_read_blocking(int fd, void *buf, size_t sz)
242 {
243 	size_t	 rsz;
244 	int	 c;
245 
246 	while (sz > 0) {
247 		c = io_read_nonblocking(fd, buf, sz, &rsz);
248 		if (!c) {
249 			ERRX1("io_read_nonblocking");
250 			return 0;
251 		} else if (rsz == 0) {
252 			ERRX("io_read_nonblocking: short read");
253 			return 0;
254 		}
255 		buf += rsz;
256 		sz -= rsz;
257 	}
258 
259 	return 1;
260 }
261 
262 /*
263  * When we do a lot of writes in a row (such as when the sender emits
264  * the file list), the server might be sending us multiplexed log
265  * messages.
266  * If it sends too many, it clogs the socket.
267  * This function looks into the read buffer and clears out any log
268  * messages pending.
269  * If called when there are valid data reads available, this function
270  * does nothing.
271  * Returns zero on failure, non-zero on success.
272  */
273 int
io_read_flush(struct sess * sess,int fd)274 io_read_flush(struct sess *sess, int fd)
275 {
276 	int32_t	 tagbuf, tag;
277 	char	 mpbuf[1024];
278 
279 	if (sess->mplex_read_remain)
280 		return 1;
281 
282 	/*
283 	 * First, read the 4-byte multiplex tag.
284 	 * The first byte is the tag identifier (7 for normal
285 	 * data, !7 for out-of-band data), the last three are
286 	 * for the remaining data size.
287 	 */
288 
289 	if (!io_read_blocking(fd, &tagbuf, sizeof(tagbuf))) {
290 		ERRX1("io_read_blocking");
291 		return 0;
292 	}
293 	tag = le32toh(tagbuf);
294 	sess->mplex_read_remain = tag & 0xFFFFFF;
295 	tag >>= 24;
296 	if (tag == 7)
297 		return 1;
298 
299 	tag -= 7;
300 
301 	if (sess->mplex_read_remain > sizeof(mpbuf)) {
302 		ERRX("multiplex buffer overflow");
303 		return 0;
304 	} else if (sess->mplex_read_remain == 0)
305 		return 1;
306 
307 	if (!io_read_blocking(fd, mpbuf, sess->mplex_read_remain)) {
308 		ERRX1("io_read_blocking");
309 		return 0;
310 	}
311 	if (mpbuf[sess->mplex_read_remain - 1] == '\n')
312 		mpbuf[--sess->mplex_read_remain] = '\0';
313 
314 	/*
315 	 * Always print the server's messages, as the server
316 	 * will control its own log levelling.
317 	 */
318 
319 	LOG0("%.*s", (int)sess->mplex_read_remain, mpbuf);
320 	sess->mplex_read_remain = 0;
321 
322 	/*
323 	 * I only know that a tag of one means an error.
324 	 * This means that we should exit.
325 	 */
326 
327 	if (tag == 1) {
328 		ERRX1("error from remote host");
329 		return 0;
330 	}
331 	return 1;
332 }
333 
334 /*
335  * Read buffer from non-blocking descriptor, possibly in multiplex read
336  * mode.
337  * Returns zero on failure, non-zero on success (all bytes read from
338  * the descriptor).
339  */
340 int
io_read_buf(struct sess * sess,int fd,void * buf,size_t sz)341 io_read_buf(struct sess *sess, int fd, void *buf, size_t sz)
342 {
343 	size_t	 rsz;
344 	int	 c;
345 
346 	/* If we're not multiplexing, read directly. */
347 
348 	if (!sess->mplex_reads) {
349 		assert(sess->mplex_read_remain == 0);
350 		c = io_read_blocking(fd, buf, sz);
351 		sess->total_read += sz;
352 		return c;
353 	}
354 
355 	while (sz > 0) {
356 		/*
357 		 * First, check to see if we have any regular data
358 		 * hanging around waiting to be read.
359 		 * If so, read the lesser of that data and whatever
360 		 * amount we currently want.
361 		 */
362 
363 		if (sess->mplex_read_remain) {
364 			rsz = sess->mplex_read_remain < sz ?
365 				sess->mplex_read_remain : sz;
366 			if (!io_read_blocking(fd, buf, rsz)) {
367 				ERRX1("io_read_blocking");
368 				return 0;
369 			}
370 			sz -= rsz;
371 			sess->mplex_read_remain -= rsz;
372 			buf += rsz;
373 			sess->total_read += rsz;
374 			continue;
375 		}
376 
377 		assert(sess->mplex_read_remain == 0);
378 		if (!io_read_flush(sess, fd)) {
379 			ERRX1("io_read_flush");
380 			return 0;
381 		}
382 	}
383 
384 	return 1;
385 }
386 
387 /*
388  * Like io_write_buf(), but for a long (which is a composite type).
389  * Returns zero on failure, non-zero on success.
390  */
391 int
io_write_ulong(struct sess * sess,int fd,uint64_t val)392 io_write_ulong(struct sess *sess, int fd, uint64_t val)
393 {
394 	uint64_t	nv;
395 	int64_t		sval = (int64_t)val;
396 
397 	/* Short-circuit: send as an integer if possible. */
398 
399 	if (sval <= INT32_MAX && sval >= 0) {
400 		if (!io_write_int(sess, fd, (int32_t)val)) {
401 			ERRX1("io_write_int");
402 			return 0;
403 		}
404 		return 1;
405 	}
406 
407 	/* Otherwise, pad with -1 32-bit, then send 64-bit. */
408 
409 	nv = htole64(val);
410 
411 	if (!io_write_int(sess, fd, -1))
412 		ERRX1("io_write_int");
413 	else if (!io_write_buf(sess, fd, &nv, sizeof(int64_t)))
414 		ERRX1("io_write_buf");
415 	else
416 		return 1;
417 
418 	return 0;
419 }
420 
421 int
io_write_long(struct sess * sess,int fd,int64_t val)422 io_write_long(struct sess *sess, int fd, int64_t val)
423 {
424 	return io_write_ulong(sess, fd, (uint64_t)val);
425 }
426 
427 /*
428  * Like io_write_buf(), but for an unsigned integer.
429  * Returns zero on failure, non-zero on success.
430  */
431 int
io_write_uint(struct sess * sess,int fd,uint32_t val)432 io_write_uint(struct sess *sess, int fd, uint32_t val)
433 {
434 	uint32_t	nv;
435 
436 	nv = htole32(val);
437 
438 	if (!io_write_buf(sess, fd, &nv, sizeof(uint32_t))) {
439 		ERRX1("io_write_buf");
440 		return 0;
441 	}
442 	return 1;
443 }
444 
445 /*
446  * Like io_write_buf(), but for an integer.
447  * Returns zero on failure, non-zero on success.
448  */
449 int
io_write_int(struct sess * sess,int fd,int32_t val)450 io_write_int(struct sess *sess, int fd, int32_t val)
451 {
452 	return io_write_uint(sess, fd, (uint32_t)val);
453 }
454 
455 /*
456  * A simple assertion-protected memory copy from th einput "val" or size
457  * "valsz" into our buffer "buf", full size "buflen", position "bufpos".
458  * Increases our "bufpos" appropriately.
459  * This has no return value, but will assert() if the size of the buffer
460  * is insufficient for the new data.
461  */
462 void
io_buffer_buf(void * buf,size_t * bufpos,size_t buflen,const void * val,size_t valsz)463 io_buffer_buf(void *buf, size_t *bufpos, size_t buflen, const void *val,
464     size_t valsz)
465 {
466 
467 	assert(*bufpos + valsz <= buflen);
468 	memcpy(buf + *bufpos, val, valsz);
469 	*bufpos += valsz;
470 }
471 
472 /*
473  * Like io_buffer_buf(), but also accomodating for multiplexing codes.
474  * This should NEVER be passed to io_write_buf(), but instead passed
475  * directly to a write operation.
476  */
477 void
io_lowbuffer_buf(struct sess * sess,void * buf,size_t * bufpos,size_t buflen,const void * val,size_t valsz)478 io_lowbuffer_buf(struct sess *sess, void *buf,
479 	size_t *bufpos, size_t buflen, const void *val, size_t valsz)
480 {
481 	int32_t	tagbuf;
482 
483 	if (valsz == 0)
484 		return;
485 
486 	if (!sess->mplex_writes) {
487 		io_buffer_buf(buf, bufpos, buflen, val, valsz);
488 		return;
489 	}
490 
491 	assert(*bufpos + valsz + sizeof(int32_t) <= buflen);
492 	assert(valsz == (valsz & 0xFFFFFF));
493 	tagbuf = htole32((7 << 24) + valsz);
494 
495 	io_buffer_int(buf, bufpos, buflen, tagbuf);
496 	io_buffer_buf(buf, bufpos, buflen, val, valsz);
497 }
498 
499 /*
500  * Allocate the space needed for io_lowbuffer_buf() and friends.
501  * This should be called for *each* lowbuffer operation, so:
502  *   io_lowbuffer_alloc(... sizeof(int32_t));
503  *   io_lowbuffer_int(...);
504  *   io_lowbuffer_alloc(... sizeof(int32_t));
505  *   io_lowbuffer_int(...);
506  * And not sizeof(int32_t) * 2 or whatnot.
507  * Returns zero on failure, non-zero on succes.
508  */
509 int
io_lowbuffer_alloc(struct sess * sess,void ** buf,size_t * bufsz,size_t * bufmax,size_t sz)510 io_lowbuffer_alloc(struct sess *sess, void **buf,
511 	size_t *bufsz, size_t *bufmax, size_t sz)
512 {
513 	void	*pp;
514 	size_t	 extra;
515 
516 	extra = sess->mplex_writes ? sizeof(int32_t) : 0;
517 
518 	if (*bufsz + sz + extra > *bufmax) {
519 		pp = realloc(*buf, *bufsz + sz + extra);
520 		if (pp == NULL) {
521 			ERR("realloc");
522 			return 0;
523 		}
524 		*buf = pp;
525 		*bufmax = *bufsz + sz + extra;
526 	}
527 	*bufsz += sz + extra;
528 	return 1;
529 }
530 
531 /*
532  * Like io_lowbuffer_buf(), but for a single integer.
533  */
534 void
io_lowbuffer_int(struct sess * sess,void * buf,size_t * bufpos,size_t buflen,int32_t val)535 io_lowbuffer_int(struct sess *sess, void *buf,
536 	size_t *bufpos, size_t buflen, int32_t val)
537 {
538 	int32_t	nv = htole32(val);
539 
540 	io_lowbuffer_buf(sess, buf, bufpos, buflen, &nv, sizeof(int32_t));
541 }
542 
543 /*
544  * Like io_buffer_buf(), but for a single integer.
545  */
546 void
io_buffer_int(void * buf,size_t * bufpos,size_t buflen,int32_t val)547 io_buffer_int(void *buf, size_t *bufpos, size_t buflen, int32_t val)
548 {
549 	int32_t	nv = htole32(val);
550 
551 	io_buffer_buf(buf, bufpos, buflen, &nv, sizeof(int32_t));
552 }
553 
554 /*
555  * Like io_read_buf(), but for a long >=0.
556  * Returns zero on failure, non-zero on success.
557  */
558 int
io_read_long(struct sess * sess,int fd,int64_t * val)559 io_read_long(struct sess *sess, int fd, int64_t *val)
560 {
561 	uint64_t	uoval;
562 
563 	if (!io_read_ulong(sess, fd, &uoval)) {
564 		ERRX1("io_read_long");
565 		return 0;
566 	}
567 	*val = (int64_t)uoval;
568 	if (*val < 0) {
569 		ERRX1("io_read_long negative");
570 		return 0;
571 	}
572 	return 1;
573 }
574 
575 /*
576  * Like io_read_buf(), but for a long.
577  * Returns zero on failure, non-zero on success.
578  */
579 int
io_read_ulong(struct sess * sess,int fd,uint64_t * val)580 io_read_ulong(struct sess *sess, int fd, uint64_t *val)
581 {
582 	uint64_t	 oval;
583 	int32_t		 sval;
584 
585 	/* Start with the short-circuit: read as an int. */
586 
587 	if (!io_read_int(sess, fd, &sval)) {
588 		ERRX1("io_read_int");
589 		return 0;
590 	} else if (sval != -1) {
591 		*val = (uint64_t)le32toh(sval);
592 		return 1;
593 	}
594 
595 	/* If the int is -1, read as 64 bits. */
596 
597 	if (!io_read_buf(sess, fd, &oval, sizeof(uint64_t))) {
598 		ERRX1("io_read_buf");
599 		return 0;
600 	}
601 
602 	*val = le64toh(oval);
603 	return 1;
604 }
605 
606 /*
607  * One thing we often need to do is read a size_t.
608  * These are transmitted as int32_t, so make sure that the value
609  * transmitted is not out of range.
610  * FIXME: I assume that size_t can handle int32_t's max.
611  * Returns zero on failure, non-zero on success.
612  */
613 int
io_read_size(struct sess * sess,int fd,size_t * val)614 io_read_size(struct sess *sess, int fd, size_t *val)
615 {
616 	int32_t	oval;
617 
618 	if (!io_read_int(sess, fd, &oval)) {
619 		ERRX1("io_read_int");
620 		return 0;
621 	} else if (oval < 0) {
622 		ERRX("io_read_size: negative value");
623 		return 0;
624 	}
625 
626 	*val = oval;
627 	return 1;
628 }
629 
630 /*
631  * Like io_read_buf(), but for an integer.
632  * Returns zero on failure, non-zero on success.
633  */
634 int
io_read_uint(struct sess * sess,int fd,uint32_t * val)635 io_read_uint(struct sess *sess, int fd, uint32_t *val)
636 {
637 	uint32_t	oval;
638 
639 	if (!io_read_buf(sess, fd, &oval, sizeof(uint32_t))) {
640 		ERRX1("io_read_buf");
641 		return 0;
642 	}
643 
644 	*val = le32toh(oval);
645 	return 1;
646 }
647 
648 int
io_read_int(struct sess * sess,int fd,int32_t * val)649 io_read_int(struct sess *sess, int fd, int32_t *val)
650 {
651 	return io_read_uint(sess, fd, (uint32_t *)val);
652 }
653 
654 /*
655  * Copies "valsz" from "buf", full size "bufsz" at position" bufpos",
656  * into "val".
657  * Calls assert() if the source doesn't have enough data.
658  * Increases "bufpos" to the new position.
659  */
660 void
io_unbuffer_buf(const void * buf,size_t * bufpos,size_t bufsz,void * val,size_t valsz)661 io_unbuffer_buf(const void *buf, size_t *bufpos, size_t bufsz, void *val,
662     size_t valsz)
663 {
664 
665 	assert(*bufpos + valsz <= bufsz);
666 	memcpy(val, buf + *bufpos, valsz);
667 	*bufpos += valsz;
668 }
669 
670 /*
671  * Calls io_unbuffer_buf() and converts.
672  */
673 void
io_unbuffer_int(const void * buf,size_t * bufpos,size_t bufsz,int32_t * val)674 io_unbuffer_int(const void *buf, size_t *bufpos, size_t bufsz, int32_t *val)
675 {
676 	int32_t	oval;
677 
678 	io_unbuffer_buf(buf, bufpos, bufsz, &oval, sizeof(int32_t));
679 	*val = le32toh(oval);
680 }
681 
682 /*
683  * Calls io_unbuffer_buf() and converts.
684  */
685 int
io_unbuffer_size(const void * buf,size_t * bufpos,size_t bufsz,size_t * val)686 io_unbuffer_size(const void *buf, size_t *bufpos, size_t bufsz, size_t *val)
687 {
688 	int32_t	oval;
689 
690 	io_unbuffer_int(buf, bufpos, bufsz, &oval);
691 	if (oval < 0) {
692 		ERRX("io_unbuffer_size: negative value");
693 		return 0;
694 	}
695 	*val = oval;
696 	return 1;
697 }
698 
699 /*
700  * Like io_read_buf(), but for a single byte >=0.
701  * Returns zero on failure, non-zero on success.
702  */
703 int
io_read_byte(struct sess * sess,int fd,uint8_t * val)704 io_read_byte(struct sess *sess, int fd, uint8_t *val)
705 {
706 
707 	if (!io_read_buf(sess, fd, val, sizeof(uint8_t))) {
708 		ERRX1("io_read_buf");
709 		return 0;
710 	}
711 	return 1;
712 }
713 
714 /*
715  * Like io_write_buf(), but for a single byte.
716  * Returns zero on failure, non-zero on success.
717  */
718 int
io_write_byte(struct sess * sess,int fd,uint8_t val)719 io_write_byte(struct sess *sess, int fd, uint8_t val)
720 {
721 
722 	if (!io_write_buf(sess, fd, &val, sizeof(uint8_t))) {
723 		ERRX1("io_write_buf");
724 		return 0;
725 	}
726 	return 1;
727 }
728