1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: cxgb_l2t.c,v 1.3 2014/03/25 16:19:14 christos Exp $");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <net/if.h>
42 #include <netinet/in.h>
43 #include <netinet/in_var.h>
44 #include <netinet/if_inarp.h>
45 #include <net/if_dl.h>
46 #include <net/route.h>
47 #include <netinet/in.h>
48 
49 #ifdef CONFIG_DEFINED
50 #include <cxgb_include.h>
51 #else
52 #include "cxgb_include.h"
53 #endif
54 
55 #define VLAN_NONE 0xfff
56 #define SDL(s) ((struct sockaddr_dl *)s)
57 #define RT_ENADDR(rt)  ((u_char *)LLADDR(SDL((rt))))
58 #define rt_expire rt_rmx.rmx_expire
59 
60 /*
61  * Module locking notes:  There is a RW lock protecting the L2 table as a
62  * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
63  * under the protection of the table lock, individual entry changes happen
64  * while holding that entry's spinlock.  The table lock nests outside the
65  * entry locks.  Allocations of new entries take the table lock as writers so
66  * no other lookups can happen while allocating new entries.  Entry updates
67  * take the table lock as readers so multiple entries can be updated in
68  * parallel.  An L2T entry can be dropped by decrementing its reference count
69  * and therefore can happen in parallel with entry allocation but no entry
70  * can change state or increment its ref count during allocation as both of
71  * these perform lookups.
72  */
73 
74 static inline unsigned int
vlan_prio(const struct l2t_entry * e)75 vlan_prio(const struct l2t_entry *e)
76 {
77     return e->vlan >> 13;
78 }
79 
80 static inline unsigned int
arp_hash(u32 key,int ifindex,const struct l2t_data * d)81 arp_hash(u32 key, int ifindex, const struct l2t_data *d)
82 {
83     return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
84 }
85 
86 static inline void
neigh_replace(struct l2t_entry * e,struct rtentry * rt)87 neigh_replace(struct l2t_entry *e, struct rtentry *rt)
88 {
89     RT_LOCK(rt);
90     RT_ADDREF(rt);
91     RT_UNLOCK(rt);
92 
93     if (e->neigh) {
94         RT_LOCK(e->neigh);
95         RT_REMREF(e->neigh);
96         RT_UNLOCK(e->neigh);
97     }
98     e->neigh = rt;
99 }
100 
101 /*
102  * Set up an L2T entry and send any packets waiting in the arp queue.  The
103  * supplied mbuf is used for the CPL_L2T_WRITE_REQ.  Must be called with the
104  * entry locked.
105  */
106 static int
setup_l2e_send_pending(struct toedev * dev,struct mbuf * m,struct l2t_entry * e)107 setup_l2e_send_pending(struct toedev *dev, struct mbuf *m,
108             struct l2t_entry *e)
109 {
110     struct cpl_l2t_write_req *req;
111 
112     if (!m) {
113         if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
114             return (ENOMEM);
115     }
116     /*
117      * XXX MH_ALIGN
118      */
119     req = mtod(m, struct cpl_l2t_write_req *);
120     req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
121     OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
122     req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
123                 V_L2T_W_VLAN(e->vlan & EVL_VLID_MASK) |
124                 V_L2T_W_PRIO(vlan_prio(e)));
125 
126     memcpy(e->dmac, RT_ENADDR(e->neigh), sizeof(e->dmac));
127     memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
128     m_set_priority(m, CPL_PRIORITY_CONTROL);
129     while (e->arpq_head) {
130         m = e->arpq_head;
131         e->arpq_head = m->m_next;
132         m->m_next = NULL;
133     }
134     e->arpq_tail = NULL;
135     e->state = L2T_STATE_VALID;
136 
137     return 0;
138 }
139 
140 /*
141  * Add a packet to the an L2T entry's queue of packets awaiting resolution.
142  * Must be called with the entry's lock held.
143  */
144 static inline void
arpq_enqueue(struct l2t_entry * e,struct mbuf * m)145 arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
146 {
147     m->m_next = NULL;
148     if (e->arpq_head)
149         e->arpq_tail->m_next = m;
150     else
151         e->arpq_head = m;
152     e->arpq_tail = m;
153 }
154 
155 int
t3_l2t_send_slow(struct toedev * dev,struct mbuf * m,struct l2t_entry * e)156 t3_l2t_send_slow(struct toedev *dev, struct mbuf *m,
157              struct l2t_entry *e)
158 {
159     struct rtentry *rt;
160     struct mbuf *m0;
161 
162     if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
163         return (ENOMEM);
164 
165     rt = e->neigh;
166 
167 again:
168     switch (e->state) {
169     case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
170         arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
171         mtx_lock(&e->lock);
172         if (e->state == L2T_STATE_STALE)
173             e->state = L2T_STATE_VALID;
174         mtx_unlock(&e->lock);
175     case L2T_STATE_VALID:     /* fast-path, send the packet on */
176     case L2T_STATE_RESOLVING:
177         mtx_lock(&e->lock);
178         if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
179             mtx_unlock(&e->lock);
180             goto again;
181         }
182         arpq_enqueue(e, m);
183         mtx_unlock(&e->lock);
184 
185         if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
186             return (ENOMEM);
187         /*
188          * Only the first packet added to the arpq should kick off
189          * resolution.  However, because the m_gethdr below can fail,
190          * we allow each packet added to the arpq to retry resolution
191          * as a way of recovering from transient memory exhaustion.
192          * A better way would be to use a work request to retry L2T
193          * entries when there's no memory.
194          */
195         if (arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt)) == 0) {
196 
197             mtx_lock(&e->lock);
198             if (e->arpq_head)
199                 setup_l2e_send_pending(dev, m, e);
200             else
201                 m_freem(m);
202             mtx_unlock(&e->lock);
203         }
204     }
205     return 0;
206 }
207 
208 void
t3_l2t_send_event(struct toedev * dev,struct l2t_entry * e)209 t3_l2t_send_event(struct toedev *dev, struct l2t_entry *e)
210 {
211     struct rtentry *rt;
212     struct mbuf *m0;
213 
214     if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
215         return;
216 
217     rt = e->neigh;
218 again:
219     switch (e->state) {
220     case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
221         arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
222         mtx_lock(&e->lock);
223         if (e->state == L2T_STATE_STALE) {
224             e->state = L2T_STATE_VALID;
225         }
226         mtx_unlock(&e->lock);
227         return;
228     case L2T_STATE_VALID:     /* fast-path, send the packet on */
229         return;
230     case L2T_STATE_RESOLVING:
231         mtx_lock(&e->lock);
232         if (e->state != L2T_STATE_RESOLVING) { // ARP already completed
233             mtx_unlock(&e->lock);
234             goto again;
235         }
236         mtx_unlock(&e->lock);
237 
238         if ((m0 = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
239             return;
240         /*
241          * Only the first packet added to the arpq should kick off
242          * resolution.  However, because the alloc_skb below can fail,
243          * we allow each packet added to the arpq to retry resolution
244          * as a way of recovering from transient memory exhaustion.
245          * A better way would be to use a work request to retry L2T
246          * entries when there's no memory.
247          */
248         arpresolve(rt->rt_ifp, rt, m0, rt->rt_gateway, RT_ENADDR(rt));
249 
250     }
251     return;
252 }
253 /*
254  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
255  */
256 static struct l2t_entry *
alloc_l2e(struct l2t_data * d)257 alloc_l2e(struct l2t_data *d)
258 {
259     struct l2t_entry *end, *e, **p;
260 
261     if (!atomic_load_acq_int(&d->nfree))
262         return NULL;
263 
264     /* there's definitely a free entry */
265     for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
266         if (atomic_load_acq_int(&e->refcnt) == 0)
267             goto found;
268 
269     for (e = &d->l2tab[1]; atomic_load_acq_int(&e->refcnt); ++e) ;
270 found:
271     d->rover = e + 1;
272     atomic_add_int(&d->nfree, -1);
273 
274     /*
275      * The entry we found may be an inactive entry that is
276      * presently in the hash table.  We need to remove it.
277      */
278     if (e->state != L2T_STATE_UNUSED) {
279         int hash = arp_hash(e->addr, e->ifindex, d);
280 
281         for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
282             if (*p == e) {
283                 *p = e->next;
284                 break;
285             }
286         e->state = L2T_STATE_UNUSED;
287     }
288     return e;
289 }
290 
291 /*
292  * Called when an L2T entry has no more users.  The entry is left in the hash
293  * table since it is likely to be reused but we also bump nfree to indicate
294  * that the entry can be reallocated for a different neighbor.  We also drop
295  * the existing neighbor reference in case the neighbor is going away and is
296  * waiting on our reference.
297  *
298  * Because entries can be reallocated to other neighbors once their ref count
299  * drops to 0 we need to take the entry's lock to avoid races with a new
300  * incarnation.
301  */
302 void
t3_l2e_free(struct l2t_data * d,struct l2t_entry * e)303 t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
304 {
305     mtx_lock(&e->lock);
306     if (atomic_load_acq_int(&e->refcnt) == 0) {  /* hasn't been recycled */
307         if (e->neigh) {
308             RT_LOCK(e->neigh);
309             RT_REMREF(e->neigh);
310             RT_UNLOCK(e->neigh);
311             e->neigh = NULL;
312         }
313     }
314     mtx_unlock(&e->lock);
315     atomic_add_int(&d->nfree, 1);
316 }
317 
318 /*
319  * Update an L2T entry that was previously used for the same next hop as neigh.
320  * Must be called with softirqs disabled.
321  */
322 static inline void
reuse_entry(struct l2t_entry * e,struct rtentry * neigh)323 reuse_entry(struct l2t_entry *e, struct rtentry *neigh)
324 {
325     struct llinfo_arp *la;
326 
327     la = (struct llinfo_arp *)neigh->rt_llinfo;
328 
329     mtx_lock(&e->lock);                /* avoid race with t3_l2t_free */
330     if (neigh != e->neigh)
331         neigh_replace(e, neigh);
332 
333     if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)) ||
334         (neigh->rt_expire > time_uptime))
335         e->state = L2T_STATE_RESOLVING;
336     else if (la->la_hold == NULL)
337         e->state = L2T_STATE_VALID;
338     else
339         e->state = L2T_STATE_STALE;
340     mtx_unlock(&e->lock);
341 }
342 
343 struct l2t_entry *
t3_l2t_get(struct toedev * dev,struct rtentry * neigh,unsigned int smt_idx)344 t3_l2t_get(struct toedev *dev, struct rtentry *neigh,
345                  unsigned int smt_idx)
346 {
347     struct l2t_entry *e;
348     struct l2t_data *d = L2DATA(dev);
349     u32 addr = ((struct sockaddr_in *)rt_getkey(neigh))->sin_addr.s_addr;
350     int ifidx = neigh->rt_ifp->if_index;
351     int hash = arp_hash(addr, ifidx, d);
352 
353     rw_wlock(&d->lock);
354     for (e = d->l2tab[hash].first; e; e = e->next)
355         if (e->addr == addr && e->ifindex == ifidx &&
356             e->smt_idx == smt_idx) {
357             l2t_hold(d, e);
358             if (atomic_load_acq_int(&e->refcnt) == 1)
359                 reuse_entry(e, neigh);
360             goto done;
361         }
362 
363     /* Need to allocate a new entry */
364     e = alloc_l2e(d);
365     if (e) {
366         mtx_lock(&e->lock);          /* avoid race with t3_l2t_free */
367         e->next = d->l2tab[hash].first;
368         d->l2tab[hash].first = e;
369         e->state = L2T_STATE_RESOLVING;
370         e->addr = addr;
371         e->ifindex = ifidx;
372         e->smt_idx = smt_idx;
373         atomic_store_rel_int(&e->refcnt, 1);
374         neigh_replace(e, neigh);
375 #ifdef notyet
376         /*
377          * XXX need to add accessor function for vlan tag
378          */
379         if (neigh->rt_ifp->if_vlantrunk)
380             e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
381         else
382 #endif
383             e->vlan = VLAN_NONE;
384         mtx_unlock(&e->lock);
385     }
386 done:
387     rw_wunlock(&d->lock);
388     return e;
389 }
390 
391 /*
392  * Called when address resolution fails for an L2T entry to handle packets
393  * on the arpq head.  If a packet specifies a failure handler it is invoked,
394  * otherwise the packets is sent to the TOE.
395  *
396  * XXX: maybe we should abandon the latter behavior and just require a failure
397  * handler.
398  */
399 static void
handle_failed_resolution(struct toedev * dev,struct mbuf * arpq)400 handle_failed_resolution(struct toedev *dev, struct mbuf *arpq)
401 {
402 
403     while (arpq) {
404         struct mbuf *m = arpq;
405 #ifdef notyet
406         struct l2t_mbuf_cb *cb = L2T_MBUF_CB(m);
407 #endif
408         arpq = m->m_next;
409         m->m_next = NULL;
410 #ifdef notyet
411         if (cb->arp_failure_handler)
412             cb->arp_failure_handler(dev, m);
413         else
414 #endif
415     }
416 
417 }
418 
419 #if defined(NETEVENT) || !defined(CONFIG_CHELSIO_T3_MODULE)
420 /*
421  * Called when the host's ARP layer makes a change to some entry that is
422  * loaded into the HW L2 table.
423  */
424 void
t3_l2t_update(struct toedev * dev,struct rtentry * neigh)425 t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
426 {
427     struct l2t_entry *e;
428     struct mbuf *arpq = NULL;
429     struct l2t_data *d = L2DATA(dev);
430     u32 addr = ((struct sockaddr_in *)rt_getkey(neigh))->sin_addr.s_addr;
431     int ifidx = neigh->rt_ifp->if_index;
432     int hash = arp_hash(addr, ifidx, d);
433     struct llinfo_arp *la;
434 
435     rw_rlock(&d->lock);
436     for (e = d->l2tab[hash].first; e; e = e->next)
437         if (e->addr == addr && e->ifindex == ifidx) {
438             mtx_lock(&e->lock);
439             goto found;
440         }
441     rw_runlock(&d->lock);
442     return;
443 
444 found:
445     rw_runlock(&d->lock);
446     if (atomic_load_acq_int(&e->refcnt)) {
447         if (neigh != e->neigh)
448             neigh_replace(e, neigh);
449 
450         la = (struct llinfo_arp *)neigh->rt_llinfo;
451         if (e->state == L2T_STATE_RESOLVING) {
452 
453             if (la->la_asked >= 5 /* arp_maxtries */) {
454                 arpq = e->arpq_head;
455                 e->arpq_head = e->arpq_tail = NULL;
456             } else if (la->la_hold == NULL)
457                 setup_l2e_send_pending(dev, NULL, e);
458         } else {
459             e->state = (la->la_hold == NULL) ?
460                 L2T_STATE_VALID : L2T_STATE_STALE;
461             if (memcmp(e->dmac, RT_ENADDR(neigh), 6))
462                 setup_l2e_send_pending(dev, NULL, e);
463         }
464     }
465     mtx_unlock(&e->lock);
466 
467     if (arpq)
468         handle_failed_resolution(dev, arpq);
469 }
470 #else
471 /*
472  * Called from a kprobe, interrupts are off.
473  */
474 void
t3_l2t_update(struct toedev * dev,struct rtentry * neigh)475 t3_l2t_update(struct toedev *dev, struct rtentry *neigh)
476 {
477     struct l2t_entry *e;
478     struct l2t_data *d = L2DATA(dev);
479     u32 addr = *(u32 *) rt_key(neigh);
480     int ifidx = neigh->dev->ifindex;
481     int hash = arp_hash(addr, ifidx, d);
482 
483     rw_rlock(&d->lock);
484     for (e = d->l2tab[hash].first; e; e = e->next)
485         if (e->addr == addr && e->ifindex == ifidx) {
486             mtx_lock(&e->lock);
487             if (atomic_load_acq_int(&e->refcnt)) {
488                 if (neigh != e->neigh)
489                     neigh_replace(e, neigh);
490                 e->tdev = dev;
491                 mod_timer(&e->update_timer, jiffies + 1);
492             }
493             mtx_unlock(&e->lock);
494             break;
495         }
496     rw_runlock(&d->lock);
497 }
498 
499 static void
update_timer_cb(unsigned long data)500 update_timer_cb(unsigned long data)
501 {
502     struct mbuf *arpq = NULL;
503     struct l2t_entry *e = (struct l2t_entry *)data;
504     struct rtentry *neigh = e->neigh;
505     struct toedev *dev = e->tdev;
506 
507     barrier();
508     if (!atomic_load_acq_int(&e->refcnt))
509         return;
510 
511     rw_rlock(&neigh->lock);
512     mtx_lock(&e->lock);
513 
514     if (atomic_load_acq_int(&e->refcnt)) {
515         if (e->state == L2T_STATE_RESOLVING) {
516             if (neigh->nud_state & NUD_FAILED) {
517                 arpq = e->arpq_head;
518                 e->arpq_head = e->arpq_tail = NULL;
519             } else if (neigh_is_connected(neigh) && e->arpq_head)
520                 setup_l2e_send_pending(dev, NULL, e);
521         } else {
522             e->state = neigh_is_connected(neigh) ?
523                 L2T_STATE_VALID : L2T_STATE_STALE;
524             if (memcmp(e->dmac, RT_ENADDR(neigh), sizeof(e->dmac)))
525                 setup_l2e_send_pending(dev, NULL, e);
526         }
527     }
528     mtx_unlock(&e->lock);
529     rw_runlock(&neigh->lock);
530 
531     if (arpq)
532         handle_failed_resolution(dev, arpq);
533 }
534 #endif
535 
536 struct l2t_data *
t3_init_l2t(unsigned int l2t_capacity)537 t3_init_l2t(unsigned int l2t_capacity)
538 {
539     struct l2t_data *d;
540     int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
541 
542     d = cxgb_alloc_mem(size);
543     if (!d)
544         return NULL;
545 
546     d->nentries = l2t_capacity;
547     d->rover = &d->l2tab[1];    /* entry 0 is not used */
548     atomic_store_rel_int(&d->nfree, l2t_capacity - 1);
549     rw_init(&d->lock, "L2T");
550 
551     for (i = 0; i < l2t_capacity; ++i) {
552         d->l2tab[i].idx = i;
553         d->l2tab[i].state = L2T_STATE_UNUSED;
554         mtx_init(&d->l2tab[i].lock, "L2TAB", NULL, MTX_DEF);
555         atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
556 #ifndef NETEVENT
557 #ifdef CONFIG_CHELSIO_T3_MODULE
558         setup_timer(&d->l2tab[i].update_timer, update_timer_cb,
559                 (unsigned long)&d->l2tab[i]);
560 #endif
561 #endif
562     }
563     return d;
564 }
565 
566 void
t3_free_l2t(struct l2t_data * d)567 t3_free_l2t(struct l2t_data *d)
568 {
569 #ifndef NETEVENT
570 #ifdef CONFIG_CHELSIO_T3_MODULE
571     int i;
572 
573     /* Stop all L2T timers */
574     for (i = 0; i < d->nentries; ++i)
575         del_timer_sync(&d->l2tab[i].update_timer);
576 #endif
577 #endif
578     cxgb_free_mem(d);
579 }
580 
581 #ifdef CONFIG_PROC_FS
582 #include <linux/module.h>
583 #include <linux/proc_fs.h>
584 #include <linux/seq_file.h>
585 
586 static inline void *
l2t_get_idx(struct seq_file * seq,loff_t pos)587 l2t_get_idx(struct seq_file *seq, loff_t pos)
588 {
589     struct l2t_data *d = seq->private;
590 
591     return pos >= d->nentries ? NULL : &d->l2tab[pos];
592 }
593 
594 static void *
l2t_seq_start(struct seq_file * seq,loff_t * pos)595 l2t_seq_start(struct seq_file *seq, loff_t *pos)
596 {
597     return *pos ? l2t_get_idx(seq, *pos) : SEQ_START_TOKEN;
598 }
599 
600 static void *
l2t_seq_next(struct seq_file * seq,void * v,loff_t * pos)601 l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
602 {
603     v = l2t_get_idx(seq, *pos + 1);
604     if (v)
605         ++*pos;
606     return v;
607 }
608 
609 static void
l2t_seq_stop(struct seq_file * seq,void * v)610 l2t_seq_stop(struct seq_file *seq, void *v)
611 {
612 }
613 
614 static char
l2e_state(const struct l2t_entry * e)615 l2e_state(const struct l2t_entry *e)
616 {
617     switch (e->state) {
618     case L2T_STATE_VALID: return 'V';  /* valid, fast-path entry */
619     case L2T_STATE_STALE: return 'S';  /* needs revalidation, but usable */
620     case L2T_STATE_RESOLVING:
621         return e->arpq_head ? 'A' : 'R';
622     default:
623         return 'U';
624     }
625 }
626 
627 static int
l2t_seq_show(struct seq_file * seq,void * v)628 l2t_seq_show(struct seq_file *seq, void *v)
629 {
630     if (v == SEQ_START_TOKEN)
631         seq_puts(seq, "Index IP address      Ethernet address   VLAN  "
632              "Prio  State   Users SMTIDX  Port\n");
633     else {
634         char ip[20];
635         struct l2t_entry *e = v;
636 
637         mtx_lock(&e->lock);
638         snprintf(ip, sizeof(ip), "%u.%u.%u.%u", NIPQUAD(e->addr));
639         seq_printf(seq, "%-5u %-15s %02x:%02x:%02x:%02x:%02x:%02x  %4d"
640                "  %3u     %c   %7u   %4u %s\n",
641                e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
642                e->dmac[3], e->dmac[4], e->dmac[5],
643                e->vlan & EVL_VLID_MASK, vlan_prio(e),
644                l2e_state(e), atomic_load_acq_int(&e->refcnt), e->smt_idx,
645                e->neigh ? e->neigh->dev->name : "");
646         mtx_unlock(&e->lock);
647     }
648     return 0;
649 }
650 
651 #endif
652