1 /*
2 * Copyright 1994, 1995 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $
30 * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $
31 */
32
33 /*
34 * This code does two things necessary for the enhanced TCP metrics to
35 * function in a useful manner:
36 * 1) It marks all non-host routes as `cloning', thus ensuring that
37 * every actual reference to such a route actually gets turned
38 * into a reference to a host route to the specific destination
39 * requested.
40 * 2) When such routes lose all their references, it arranges for them
41 * to be deleted in some random collection of circumstances, so that
42 * a large quantity of stale routing data is not kept in kernel memory
43 * indefinitely. See in_rtqtimo() below for the exact mechanism.
44 */
45
46 #include "opt_carp.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52 #include <sys/socket.h>
53 #include <sys/mbuf.h>
54 #include <sys/syslog.h>
55 #include <sys/globaldata.h>
56 #include <sys/thread2.h>
57
58 #include <net/if.h>
59 #include <net/route.h>
60 #include <net/if_var.h>
61 #ifdef CARP
62 #include <net/if_types.h>
63 #endif
64 #include <net/netmsg2.h>
65 #include <net/netisr2.h>
66 #include <netinet/in.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_flow.h>
70
71 #define RTPRF_EXPIRING RTF_PROTO3 /* set on routes we manage */
72
73 struct in_rtq_pcpu {
74 struct radix_node_head *rnh;
75
76 struct callout timo_ch;
77 struct netmsg_base timo_nmsg;
78
79 time_t lastdrain;
80 int draining;
81 struct netmsg_base drain_nmsg;
82 } __cachealign;
83
84 static void in_rtqtimo(void *);
85
86 static struct in_rtq_pcpu in_rtq_pcpu[MAXCPU];
87
88 /*
89 * Do what we need to do when inserting a route.
90 */
91 static struct radix_node *
in_addroute(const void * key,const void * mask,struct radix_node_head * head,struct radix_node * nodes)92 in_addroute(const void *key, const void *mask, struct radix_node_head *head,
93 struct radix_node *nodes)
94 {
95 struct rtentry *rt = (struct rtentry *)nodes;
96 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
97 struct radix_node *ret;
98 struct in_ifaddr_container *iac;
99 struct in_ifaddr *ia;
100
101 /*
102 * For IP, mark routes to multicast addresses as such, because
103 * it's easy to do and might be useful (but this is much more
104 * dubious since it's so easy to inspect the address).
105 *
106 * For IP, all unicast non-host routes are automatically cloning.
107 */
108 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
109 rt->rt_flags |= RTF_MULTICAST;
110
111 if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST)))
112 rt->rt_flags |= RTF_PRCLONING;
113
114 /*
115 * Try to set RTF_BROADCAST or RTF_LOCAL for a host route.
116 *
117 * Skip this process if a host route already has RTF_LOCAL set,
118 * for example by ifa_maintain_loopback_route().
119 *
120 * For host routes, we make sure that RTF_BROADCAST is set for
121 * anything that looks like a broadcast address. This way, we can
122 * avoid an expensive call to in_broadcast() in ip_output() most of
123 * the time (because the route passed to ip_output() is almost always
124 * a host route).
125 *
126 * For local routes, we set RTF_LOCAL to allow various shortcuts.
127 *
128 * A cloned network route will point to one of several possible
129 * addresses if an interface has aliases and must be repointed back to
130 * the correct address or arp_rtrequest() will not properly detect the
131 * local IP.
132 */
133 if ((rt->rt_flags & (RTF_HOST | RTF_LOCAL)) == RTF_HOST) {
134 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
135 rt->rt_flags |= RTF_BROADCAST;
136 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
137 sin->sin_addr.s_addr) {
138 rt->rt_flags |= RTF_LOCAL;
139 } else {
140 LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr),
141 ia_hash) {
142 ia = iac->ia;
143 if (sin->sin_addr.s_addr ==
144 ia->ia_addr.sin_addr.s_addr) {
145 rt->rt_flags |= RTF_LOCAL;
146 IFAREF(&ia->ia_ifa);
147 IFAFREE(rt->rt_ifa);
148 rt->rt_ifa = &ia->ia_ifa;
149 rt->rt_ifp = rt->rt_ifa->ifa_ifp;
150 break;
151 }
152 }
153 }
154 }
155
156 if (rt->rt_rmx.rmx_mtu == 0 &&
157 !(rt->rt_rmx.rmx_locks & RTV_MTU) &&
158 rt->rt_ifp != NULL)
159 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
160
161 ret = rn_addroute(key, mask, head, nodes);
162 if (ret == NULL && (rt->rt_flags & RTF_HOST)) {
163 struct rtentry *oldrt;
164
165 /*
166 * We are trying to add a host route, but can't.
167 * Find out if it is because of an ARP entry and
168 * delete it if so.
169 */
170 oldrt = rtpurelookup((struct sockaddr *)sin);
171 if (oldrt != NULL) {
172 --oldrt->rt_refcnt;
173 if ((oldrt->rt_flags & RTF_LLINFO) &&
174 (oldrt->rt_flags & RTF_HOST) &&
175 oldrt->rt_gateway &&
176 oldrt->rt_gateway->sa_family == AF_LINK) {
177 rtrequest(RTM_DELETE, rt_key(oldrt),
178 oldrt->rt_gateway, rt_mask(oldrt),
179 oldrt->rt_flags, NULL);
180 ret = rn_addroute(key, mask, head, nodes);
181 }
182 }
183 }
184
185 /*
186 * If the new route has been created successfully, and it is
187 * not a multicast/broadcast or cloned route, then we will
188 * have to flush the ipflow. Otherwise, we may end up using
189 * the wrong route.
190 */
191 if (ret != NULL &&
192 (rt->rt_flags &
193 (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0)
194 ipflow_flush_oncpu();
195 return ret;
196 }
197
198 /*
199 * This code is the inverse of in_closeroute: on first reference, if we
200 * were managing the route, stop doing so and set the expiration timer
201 * back off again.
202 */
203 static struct radix_node *
in_matchroute(const void * key,struct radix_node_head * head)204 in_matchroute(const void *key, struct radix_node_head *head)
205 {
206 struct radix_node *rn = rn_match(key, head);
207 struct rtentry *rt = (struct rtentry *)rn;
208
209 if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */
210 if (rt->rt_flags & RTPRF_EXPIRING) {
211 rt->rt_flags &= ~RTPRF_EXPIRING;
212 rt->rt_rmx.rmx_expire = 0;
213 }
214 }
215 return rn;
216 }
217
218 static int rtq_reallyold = 60*60; /* one hour is ``really old'' */
219 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
220 &rtq_reallyold , 0,
221 "Default expiration time on cloned routes");
222
223 static int rtq_minreallyold = 10; /* never automatically crank down to less */
224 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
225 &rtq_minreallyold , 0,
226 "Minimum time to attempt to hold onto cloned routes");
227
228 static int rtq_toomany = 128; /* 128 cached routes is ``too many'' */
229 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
230 &rtq_toomany , 0, "Upper limit on cloned routes");
231
232 /*
233 * On last reference drop, mark the route as belong to us so that it can be
234 * timed out.
235 */
236 static void
in_closeroute(struct radix_node * rn,struct radix_node_head * head)237 in_closeroute(struct radix_node *rn, struct radix_node_head *head)
238 {
239 struct rtentry *rt = (struct rtentry *)rn;
240
241 if (!(rt->rt_flags & RTF_UP))
242 return; /* prophylactic measures */
243
244 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
245 return;
246
247 if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED)
248 return;
249
250 /*
251 * As requested by David Greenman:
252 * If rtq_reallyold is 0, just delete the route without
253 * waiting for a timeout cycle to kill it.
254 */
255 if (rtq_reallyold != 0) {
256 rt->rt_flags |= RTPRF_EXPIRING;
257 rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold;
258 } else {
259 /*
260 * Remove route from the radix tree, but defer deallocation
261 * until we return to rtfree().
262 */
263 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt),
264 rt->rt_flags, &rt);
265 }
266 }
267
268 struct rtqk_arg {
269 struct radix_node_head *rnh;
270 int draining;
271 int killed;
272 int found;
273 int updating;
274 time_t nextstop;
275 };
276
277 /*
278 * Get rid of old routes. When draining, this deletes everything, even when
279 * the timeout is not expired yet. When updating, this makes sure that
280 * nothing has a timeout longer than the current value of rtq_reallyold.
281 */
282 static int
in_rtqkill(struct radix_node * rn,void * rock)283 in_rtqkill(struct radix_node *rn, void *rock)
284 {
285 struct rtqk_arg *ap = rock;
286 struct rtentry *rt = (struct rtentry *)rn;
287 int err;
288
289 if (rt->rt_flags & RTPRF_EXPIRING) {
290 ap->found++;
291 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
292 if (rt->rt_refcnt > 0)
293 panic("rtqkill route really not free");
294
295 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
296 rt_mask(rt), rt->rt_flags, NULL);
297 if (err)
298 log(LOG_WARNING, "in_rtqkill: error %d\n", err);
299 else
300 ap->killed++;
301 } else {
302 if (ap->updating &&
303 (int)(rt->rt_rmx.rmx_expire - time_uptime) >
304 rtq_reallyold) {
305 rt->rt_rmx.rmx_expire = time_uptime +
306 rtq_reallyold;
307 }
308 ap->nextstop = lmin(ap->nextstop,
309 rt->rt_rmx.rmx_expire);
310 }
311 }
312
313 return 0;
314 }
315
316 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */
317 static int rtq_timeout = RTQ_TIMEOUT;
318
319 /*
320 * NOTE:
321 * 'last_adjusted_timeout' and 'rtq_reallyold' are _not_ read-only, and
322 * could be changed by all CPUs. However, they are changed at so low
323 * frequency that we could ignore the cache trashing issue and take them
324 * as read-mostly.
325 */
326 static void
in_rtqtimo_dispatch(netmsg_t nmsg)327 in_rtqtimo_dispatch(netmsg_t nmsg)
328 {
329 struct rtqk_arg arg;
330 struct timeval atv;
331 static time_t last_adjusted_timeout = 0;
332 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid];
333 struct radix_node_head *rnh = pcpu->rnh;
334
335 ASSERT_NETISR_NCPUS(mycpuid);
336
337 /* Reply ASAP */
338 crit_enter();
339 lwkt_replymsg(&nmsg->lmsg, 0);
340 crit_exit();
341
342 arg.found = arg.killed = 0;
343 arg.rnh = rnh;
344 arg.nextstop = time_uptime + rtq_timeout;
345 arg.draining = arg.updating = 0;
346 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
347
348 /*
349 * Attempt to be somewhat dynamic about this:
350 * If there are ``too many'' routes sitting around taking up space,
351 * then crank down the timeout, and see if we can't make some more
352 * go away. However, we make sure that we will never adjust more
353 * than once in rtq_timeout seconds, to keep from cranking down too
354 * hard.
355 */
356 if ((arg.found - arg.killed > rtq_toomany) &&
357 (int)(time_uptime - last_adjusted_timeout) >= rtq_timeout &&
358 rtq_reallyold > rtq_minreallyold) {
359 rtq_reallyold = 2*rtq_reallyold / 3;
360 if (rtq_reallyold < rtq_minreallyold) {
361 rtq_reallyold = rtq_minreallyold;
362 }
363
364 last_adjusted_timeout = time_uptime;
365 #ifdef DIAGNOSTIC
366 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
367 rtq_reallyold);
368 #endif
369 arg.found = arg.killed = 0;
370 arg.updating = 1;
371 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
372 }
373
374 atv.tv_usec = 0;
375 atv.tv_sec = arg.nextstop - time_uptime;
376 if ((int)atv.tv_sec < 1) { /* time shift safety */
377 atv.tv_sec = 1;
378 arg.nextstop = time_uptime + atv.tv_sec;
379 }
380 if ((int)atv.tv_sec > rtq_timeout) { /* time shift safety */
381 atv.tv_sec = rtq_timeout;
382 arg.nextstop = time_uptime + atv.tv_sec;
383 }
384 callout_reset(&pcpu->timo_ch, tvtohz_high(&atv), in_rtqtimo, NULL);
385 }
386
387 static void
in_rtqtimo(void * arg __unused)388 in_rtqtimo(void *arg __unused)
389 {
390 int cpuid = mycpuid;
391 struct lwkt_msg *lmsg = &in_rtq_pcpu[cpuid].timo_nmsg.lmsg;
392
393 crit_enter();
394 if (lmsg->ms_flags & MSGF_DONE)
395 lwkt_sendmsg_oncpu(netisr_cpuport(cpuid), lmsg);
396 crit_exit();
397 }
398
399 static void
in_rtqdrain_oncpu(struct in_rtq_pcpu * pcpu)400 in_rtqdrain_oncpu(struct in_rtq_pcpu *pcpu)
401 {
402 struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET];
403 struct rtqk_arg arg;
404
405 ASSERT_NETISR_NCPUS(mycpuid);
406
407 arg.found = arg.killed = 0;
408 arg.rnh = rnh;
409 arg.nextstop = 0;
410 arg.draining = 1;
411 arg.updating = 0;
412 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
413
414 pcpu->lastdrain = time_uptime;
415 }
416
417 static void
in_rtqdrain_dispatch(netmsg_t nmsg)418 in_rtqdrain_dispatch(netmsg_t nmsg)
419 {
420 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid];
421
422 /* Reply ASAP */
423 crit_enter();
424 lwkt_replymsg(&nmsg->lmsg, 0);
425 crit_exit();
426
427 in_rtqdrain_oncpu(pcpu);
428 pcpu->draining = 0;
429 }
430
431 static void
in_rtqdrain_ipi(void * arg __unused)432 in_rtqdrain_ipi(void *arg __unused)
433 {
434 int cpu = mycpuid;
435 struct lwkt_msg *msg = &in_rtq_pcpu[cpu].drain_nmsg.lmsg;
436
437 crit_enter();
438 if (msg->ms_flags & MSGF_DONE)
439 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg);
440 crit_exit();
441 }
442
443 void
in_rtqdrain(void)444 in_rtqdrain(void)
445 {
446 cpumask_t mask;
447 int cpu;
448
449 CPUMASK_ASSBMASK(mask, netisr_ncpus);
450 CPUMASK_ANDMASK(mask, smp_active_mask);
451
452 cpu = mycpuid;
453 if (IN_NETISR_NCPUS(cpu)) {
454 in_rtqdrain_oncpu(&in_rtq_pcpu[cpu]);
455 CPUMASK_NANDBIT(mask, cpu);
456 }
457
458 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
459 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[cpu];
460
461 if (!CPUMASK_TESTBIT(mask, cpu))
462 continue;
463
464 if (pcpu->draining || pcpu->lastdrain == time_uptime) {
465 /* Just drained or is draining; skip this cpu. */
466 CPUMASK_NANDBIT(mask, cpu);
467 continue;
468 }
469 pcpu->draining = 1;
470 }
471
472 if (CPUMASK_TESTNZERO(mask))
473 lwkt_send_ipiq_mask(mask, in_rtqdrain_ipi, NULL);
474 }
475
476 /*
477 * Initialize our routing tree.
478 */
479 int
in_inithead(void ** head,int off)480 in_inithead(void **head, int off)
481 {
482 struct radix_node_head *rnh;
483 struct in_rtq_pcpu *pcpu;
484 int cpuid = mycpuid;
485
486 rnh = *head;
487 KKASSERT(rnh == rt_tables[cpuid][AF_INET]);
488
489 if (!rn_inithead(&rnh, rn_cpumaskhead(cpuid), off))
490 return 0;
491
492 *head = rnh;
493 rnh->rnh_addaddr = in_addroute;
494 rnh->rnh_matchaddr = in_matchroute;
495 rnh->rnh_close = in_closeroute;
496
497 pcpu = &in_rtq_pcpu[cpuid];
498 pcpu->rnh = rnh;
499 callout_init_mp(&pcpu->timo_ch);
500 netmsg_init(&pcpu->timo_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY,
501 in_rtqtimo_dispatch);
502 netmsg_init(&pcpu->drain_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY,
503 in_rtqdrain_dispatch);
504
505 in_rtqtimo(NULL); /* kick off timeout first time */
506 return 1;
507 }
508
509 /*
510 * This zaps old routes when the interface goes down or interface
511 * address is deleted. In the latter case, it deletes static routes
512 * that point to this address. If we don't do this, we may end up
513 * using the old address in the future. The ones we always want to
514 * get rid of are things like ARP entries, since the user might down
515 * the interface, walk over to a completely different network, and
516 * plug back in.
517 *
518 * in_ifadown() is typically called when an interface is being brought
519 * down. We must iterate through all per-cpu route tables and clean
520 * them up.
521 */
522 struct in_ifadown_arg {
523 struct radix_node_head *rnh;
524 struct ifaddr *ifa;
525 int del;
526 };
527
528 static int
in_ifadownkill(struct radix_node * rn,void * xap)529 in_ifadownkill(struct radix_node *rn, void *xap)
530 {
531 struct in_ifadown_arg *ap = xap;
532 struct rtentry *rt = (struct rtentry *)rn;
533 int err;
534
535 if (rt->rt_ifa == ap->ifa &&
536 (ap->del || !(rt->rt_flags & RTF_STATIC))) {
537 /*
538 * We need to disable the automatic prune that happens
539 * in this case in rtrequest() because it will blow
540 * away the pointers that rn_walktree() needs in order
541 * continue our descent. We will end up deleting all
542 * the routes that rtrequest() would have in any case,
543 * so that behavior is not needed there.
544 */
545 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING);
546 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
547 rt_mask(rt), rt->rt_flags, NULL);
548 if (err)
549 log(LOG_WARNING, "in_ifadownkill: error %d\n", err);
550 }
551 return 0;
552 }
553
554 struct netmsg_ifadown {
555 struct netmsg_base base;
556 struct ifaddr *ifa;
557 int del;
558 };
559
560 static void
in_ifadown_dispatch(netmsg_t msg)561 in_ifadown_dispatch(netmsg_t msg)
562 {
563 struct netmsg_ifadown *rmsg = (void *)msg;
564 struct radix_node_head *rnh;
565 struct ifaddr *ifa = rmsg->ifa;
566 struct in_ifadown_arg arg;
567 int cpu;
568
569 cpu = mycpuid;
570 ASSERT_NETISR_NCPUS(cpu);
571
572 arg.rnh = rnh = rt_tables[cpu][AF_INET];
573 arg.ifa = ifa;
574 arg.del = rmsg->del;
575 rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
576 ifa->ifa_flags &= ~IFA_ROUTE;
577
578 netisr_forwardmsg(&msg->base, cpu + 1);
579 }
580
581 int
in_ifadown_force(struct ifaddr * ifa,int delete)582 in_ifadown_force(struct ifaddr *ifa, int delete)
583 {
584 struct netmsg_ifadown msg;
585
586 if (ifa->ifa_addr->sa_family != AF_INET)
587 return 1;
588
589 /*
590 * XXX individual requests are not independantly chained,
591 * which means that the per-cpu route tables will not be
592 * consistent in the middle of the operation. If routes
593 * related to the interface are manipulated while we are
594 * doing this the inconsistancy could trigger a panic.
595 */
596 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
597 in_ifadown_dispatch);
598 msg.ifa = ifa;
599 msg.del = delete;
600 netisr_domsg_global(&msg.base);
601
602 return 0;
603 }
604
605 int
in_ifadown(struct ifaddr * ifa,int delete)606 in_ifadown(struct ifaddr *ifa, int delete)
607 {
608 #ifdef CARP
609 if (ifa->ifa_ifp->if_type == IFT_CARP)
610 return 0;
611 #endif
612 return in_ifadown_force(ifa, delete);
613 }
614