1 /* $Id: bsd-bpf.c,v 1.9 2007/02/21 01:24:50 fredette Exp $ */
2 
3 /* host/bsd/bsd-bpf.c - BSD Berkeley Packet Filter Ethernet support: */
4 
5 /*
6  * Copyright (c) 2001, 2003 Matt Fredette
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Matt Fredette.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <tme/common.h>
37 _TME_RCSID("$Id: bsd-bpf.c,v 1.9 2007/02/21 01:24:50 fredette Exp $");
38 
39 /* includes: */
40 #include "bsd-impl.h"
41 #include <tme/generic/ethernet.h>
42 #include <tme/threads.h>
43 #include <tme/misc.h>
44 #include <stdio.h>
45 #include <string.h>
46 #include <errno.h>
47 #include <fcntl.h>
48 #include <netdb.h>
49 #include <sys/param.h>
50 #include <sys/socket.h>
51 #include <sys/stat.h>
52 #include <net/if.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in.h>
55 #if defined(HAVE_SYS_SOCKIO_H)
56 #include <sys/sockio.h>
57 #elif defined(HAVE_SYS_SOCKETIO_H)
58 #include <sys/socketio.h>
59 #endif /* HAVE_SYS_SOCKETIO_H */
60 #include <sys/ioctl.h>
61 #ifdef HAVE_IOCTLS_H
62 #include <ioctls.h>
63 #endif /* HAVE_IOCTLS_H */
64 #ifdef HAVE_NET_IF_ETHER_H
65 #include <net/if_ether.h>
66 #endif /* HAVE_NET_IF_ETHER_H */
67 #ifdef HAVE_NET_ETHERNET_H
68 #include <net/ethernet.h>
69 #endif /* HAVE_NET_ETHERNET_H */
70 #include <netinet/ip.h>
71 #ifdef HAVE_NET_IF_DL_H
72 #include <net/if_dl.h>
73 #endif /* HAVE_NET_IF_DL_H */
74 #include <arpa/inet.h>
75 #include <net/bpf.h>
76 
77 /* macros: */
78 
79 /* ARP and RARP opcodes: */
80 #define TME_NET_ARP_OPCODE_REQUEST	(0x0001)
81 #define TME_NET_ARP_OPCODE_REPLY	(0x0002)
82 #define TME_NET_ARP_OPCODE_REV_REQUEST	(0x0003)
83 #define TME_NET_ARP_OPCODE_REV_REPLY	(0x0004)
84 
85 /* the callout flags: */
86 #define TME_BSD_BPF_CALLOUT_CHECK	(0)
87 #define TME_BSD_BPF_CALLOUT_RUNNING	TME_BIT(0)
88 #define TME_BSD_BPF_CALLOUTS_MASK	(-2)
89 #define  TME_BSD_BPF_CALLOUT_CTRL	TME_BIT(1)
90 #define  TME_BSD_BPF_CALLOUT_READ	TME_BIT(2)
91 
92 /* structures: */
93 
94 /* our internal data structure: */
95 struct tme_bsd_bpf {
96 
97   /* backpointer to our element: */
98   struct tme_element *tme_bsd_bpf_element;
99 
100   /* our mutex: */
101   tme_mutex_t tme_bsd_bpf_mutex;
102 
103   /* our reader condition: */
104   tme_cond_t tme_bsd_bpf_cond_reader;
105 
106   /* the callout flags: */
107   unsigned int tme_bsd_bpf_callout_flags;
108 
109   /* our Ethernet connection: */
110   struct tme_ethernet_connection *tme_bsd_bpf_eth_connection;
111 
112   /* the BPF file descriptor: */
113   int tme_bsd_bpf_fd;
114 
115   /* the size of the packet buffer for the interface: */
116   size_t tme_bsd_bpf_buffer_size;
117 
118   /* the packet buffer for the interface: */
119   tme_uint8_t *tme_bsd_bpf_buffer;
120 
121   /* the next offset within the packet buffer, and the end of the data
122      in the packet buffer: */
123   size_t tme_bsd_bpf_buffer_offset;
124   size_t tme_bsd_bpf_buffer_end;
125 
126   /* when nonzero, the packet delay time, in microseconds: */
127   unsigned long tme_bsd_bpf_delay_time;
128 
129   /* all packets received on or before this time can be released: */
130   struct timeval tme_bsd_bpf_delay_release;
131 
132   /* when nonzero, the packet delay sleep time, in microseconds: */
133   unsigned long tme_bsd_bpf_delay_sleep;
134 
135   /* when nonzero, the packet delay is sleeping: */
136   int tme_bsd_bpf_delay_sleeping;
137 };
138 
139 /* a crude ARP header: */
140 struct tme_net_arp_header {
141   tme_uint8_t tme_net_arp_header_hardware[2];
142   tme_uint8_t tme_net_arp_header_protocol[2];
143   tme_uint8_t tme_net_arp_header_hardware_length;
144   tme_uint8_t tme_net_arp_header_protocol_length;
145   tme_uint8_t tme_net_arp_header_opcode[2];
146 };
147 
148 /* a crude partial IPv4 header: */
149 struct tme_net_ipv4_header {
150   tme_uint8_t tme_net_ipv4_header_v_hl;
151   tme_uint8_t tme_net_ipv4_header_tos;
152   tme_uint8_t tme_net_ipv4_header_length[2];
153 };
154 
155 /* the accept and reject packet insns: */
156 static const struct bpf_insn _tme_bsd_bpf_insn_accept = BPF_STMT(BPF_RET + BPF_K, (u_int) -1);
157 static const struct bpf_insn _tme_bsd_bpf_insn_reject = BPF_STMT(BPF_RET + BPF_K, 0);
158 
159 /* this creates a BPF filter that accepts Ethernet packets with
160    destination addresses in the configured set.  the broadcast address
161    must be in this set, it isn't accepted automatically: */
162 static int
_tme_bsd_bpf_filter(struct tme_ethernet_config * config,const tme_uint8_t * prefix,unsigned int prefix_len,struct bpf_insn * bpf_filter,int bpf_filter_size,int * _first_pc)163 _tme_bsd_bpf_filter(struct tme_ethernet_config *config,
164 		    const tme_uint8_t *prefix,
165 		    unsigned int prefix_len,
166 		    struct bpf_insn *bpf_filter,
167 		    int bpf_filter_size,
168 		    int *_first_pc)
169 {
170   unsigned int addr_i;
171   tme_uint8_t byte;
172   tme_uint8_t byte_bitmap[(1 << (8 * sizeof(byte))) >> 3];
173   int match_pc, miss_pc, this_pc;
174 
175   /* clear the byte bitmap: */
176   memset(byte_bitmap, 0, sizeof(byte_bitmap));
177 
178   /* the last instruction jumps to the reject insn when it fails: */
179   miss_pc = bpf_filter_size - 1;
180 
181   /* loop over all of the addresses: */
182   for (addr_i = 0;
183        addr_i < config->tme_ethernet_config_addr_count;
184        addr_i++) {
185 
186     /* skip this address if it doesn't match the prefix: */
187     if (prefix_len > 0
188 	&& memcmp(config->tme_ethernet_config_addrs[addr_i],
189 		  prefix,
190 		  prefix_len)) {
191       continue;
192     }
193 
194     /* get the next byte, and skip this address if this byte has
195        already been done: */
196     byte = config->tme_ethernet_config_addrs[addr_i][prefix_len];
197     if (byte_bitmap[byte >> 3] & TME_BIT(byte & 7)) {
198       continue;
199     }
200     byte_bitmap[byte >> 3] |= TME_BIT(byte & 7);
201 
202     /* get the PC of the instruction to branch to if this byte
203        matches.  if this is the last byte of the address, the branch
204        target is the accept insn, otherwise recurse and get the first
205        insn of the rest of the matcher: */
206     match_pc = ((prefix_len == (TME_ETHERNET_ADDR_SIZE - 1))
207 		? bpf_filter_size - 2
208 		: _tme_bsd_bpf_filter(config,
209 				      config->tme_ethernet_config_addrs[addr_i],
210 				      prefix_len + 1,
211 				      bpf_filter,
212 				      bpf_filter_size,
213 				      _first_pc));
214 
215     /* add this testing instruction: */
216     this_pc = --(*_first_pc);
217     assert(this_pc >= 0);
218     bpf_filter[this_pc].code = BPF_JMP + BPF_JEQ + BPF_K;
219     bpf_filter[this_pc].jt = match_pc - (this_pc + 1);
220     bpf_filter[this_pc].jf = miss_pc - (this_pc + 1);
221     bpf_filter[this_pc].k = byte;
222 
223     /* update the miss pc: */
224     miss_pc = this_pc;
225   }
226 
227   /* add this load instruction: */
228   this_pc = --(*_first_pc);
229   assert(this_pc >= 0);
230   bpf_filter[this_pc].code = BPF_LD + BPF_B + BPF_ABS;
231   bpf_filter[this_pc].k = prefix_len;
232 
233   /* return our pc: */
234   return (this_pc);
235 }
236 
237 /* this dumps a BPF filter.  not all insns are supported, just
238    those used by our address matching filters: */
239 void
_tme_bsd_bpf_dump_filter(const struct bpf_program * program)240 _tme_bsd_bpf_dump_filter(const struct bpf_program *program)
241 {
242   unsigned int pc;
243   FILE *fp;
244   const struct bpf_insn *insn;
245   char ldsize;
246   const char *opc;
247 
248   fp = stderr;
249   for (pc = 0, insn = program->bf_insns;
250        pc < (unsigned int) program->bf_len;
251        pc++, insn++) {
252 
253     /* the PC: */
254     fprintf(fp, "%d:\t", pc);
255 
256     /* dispatch on the instruction class: */
257     switch (BPF_CLASS(insn->code)) {
258 
259     case BPF_LD:
260 
261       switch (BPF_SIZE(insn->code)) {
262       case BPF_B: ldsize = 'b'; break;
263       case BPF_H: ldsize = 'w'; break;
264       case BPF_W: ldsize = 'l'; break;
265       default: ldsize = '?'; break;
266       }
267       fprintf(fp, "ld.%c ", ldsize);
268 
269       switch (BPF_MODE(insn->code)) {
270       case BPF_ABS: fprintf(fp, "0x%x", insn->k); break;
271       default: fprintf(fp, "??");
272       }
273 
274       break;
275 
276     case BPF_JMP:
277 
278       switch (BPF_OP(insn->code)) {
279       case BPF_JEQ: opc = "jeq"; break;
280       default: opc = "??"; break;
281       }
282       fprintf(fp, "%s ", opc);
283 
284       switch (BPF_SRC(insn->code)) {
285       case BPF_K: fprintf(fp, "#0x%x", insn->k); break;
286       case BPF_X: fprintf(fp, "x"); break;
287       default: fprintf(fp, "??"); break;
288       }
289 
290       fprintf(fp, ", %d, %d", pc + 1 + insn->jt, pc + 1 + insn->jf);
291       break;
292 
293     case BPF_RET:
294       switch (BPF_RVAL(insn->code)) {
295       case BPF_A: fprintf(fp, "ret a"); break;
296       case BPF_X: fprintf(fp, "ret x"); break;
297       case BPF_K: fprintf(fp, "ret #0x%x", insn->k); break;
298       default: fprintf(fp, "ret ??"); break;
299       }
300       break;
301 
302     default:
303       fprintf(fp, "??");
304       break;
305     }
306 
307     putc('\n', fp);
308   }
309 }
310 
311 /* the bpf callout function.  it must be called with the mutex locked: */
312 static void
_tme_bsd_bpf_callout(struct tme_bsd_bpf * bpf,int new_callouts)313 _tme_bsd_bpf_callout(struct tme_bsd_bpf *bpf, int new_callouts)
314 {
315   struct tme_ethernet_connection *conn_eth;
316   int callouts, later_callouts;
317   unsigned int ctrl;
318   int rc;
319   int status;
320   tme_ethernet_fid_t frame_id;
321   struct tme_ethernet_frame_chunk frame_chunk_buffer;
322   tme_uint8_t frame[TME_ETHERNET_FRAME_MAX];
323 
324   /* add in any new callouts: */
325   bpf->tme_bsd_bpf_callout_flags |= new_callouts;
326 
327   /* if this function is already running in another thread, simply
328      return now.  the other thread will do our work: */
329   if (bpf->tme_bsd_bpf_callout_flags & TME_BSD_BPF_CALLOUT_RUNNING) {
330     return;
331   }
332 
333   /* callouts are now running: */
334   bpf->tme_bsd_bpf_callout_flags |= TME_BSD_BPF_CALLOUT_RUNNING;
335 
336   /* assume that we won't need any later callouts: */
337   later_callouts = 0;
338 
339   /* loop while callouts are needed: */
340   for (; (callouts = bpf->tme_bsd_bpf_callout_flags) & TME_BSD_BPF_CALLOUTS_MASK; ) {
341 
342     /* clear the needed callouts: */
343     bpf->tme_bsd_bpf_callout_flags = callouts & ~TME_BSD_BPF_CALLOUTS_MASK;
344     callouts &= TME_BSD_BPF_CALLOUTS_MASK;
345 
346     /* get our Ethernet connection: */
347     conn_eth = bpf->tme_bsd_bpf_eth_connection;
348 
349     /* if we need to call out new control information: */
350     if (callouts & TME_BSD_BPF_CALLOUT_CTRL) {
351 
352       /* form the new ctrl: */
353       ctrl = 0;
354       if (bpf->tme_bsd_bpf_buffer_offset
355 	  < bpf->tme_bsd_bpf_buffer_end) {
356 	ctrl |= TME_ETHERNET_CTRL_OK_READ;
357       }
358 
359       /* unlock the mutex: */
360       tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
361 
362       /* do the callout: */
363       rc = (conn_eth != NULL
364 	    ? ((*conn_eth->tme_ethernet_connection_ctrl)
365 	       (conn_eth,
366 		ctrl))
367 	    : TME_OK);
368 
369       /* lock the mutex: */
370       tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
371 
372       /* if the callout was unsuccessful, remember that at some later
373 	 time this callout should be attempted again: */
374       if (rc != TME_OK) {
375 	later_callouts |= TME_BSD_BPF_CALLOUT_CTRL;
376       }
377     }
378 
379     /* if the Ethernet is readable: */
380     if (callouts & TME_BSD_BPF_CALLOUT_READ) {
381 
382       /* unlock the mutex: */
383       tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
384 
385       /* make a frame chunk to receive this frame: */
386       frame_chunk_buffer.tme_ethernet_frame_chunk_next = NULL;
387       frame_chunk_buffer.tme_ethernet_frame_chunk_bytes = frame;
388       frame_chunk_buffer.tme_ethernet_frame_chunk_bytes_count
389 	= sizeof(frame);
390 
391       /* do the callout: */
392       rc = (conn_eth == NULL
393 	    ? TME_OK
394 	    : ((*conn_eth->tme_ethernet_connection_read)
395 	       (conn_eth,
396 		&frame_id,
397 		&frame_chunk_buffer,
398 		TME_ETHERNET_READ_NEXT)));
399 
400       /* lock the mutex: */
401       tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
402 
403       /* if the read was successful: */
404       if (rc > 0) {
405 
406 	/* check the size of the frame: */
407 	assert(rc <= sizeof(frame));
408 
409 	/* do the write: */
410 	status = tme_thread_write(bpf->tme_bsd_bpf_fd, frame, rc);
411 
412 	/* writes must succeed: */
413 	assert (status == rc);
414 
415 	/* mark that we need to loop to callout to read more frames: */
416 	bpf->tme_bsd_bpf_callout_flags |= TME_BSD_BPF_CALLOUT_READ;
417       }
418 
419       /* otherwise, the read failed.  convention dictates that we
420 	 forget that the connection was readable, which we already
421 	 have done by clearing the CALLOUT_READ flag: */
422     }
423 
424   }
425 
426   /* put in any later callouts, and clear that callouts are running: */
427   bpf->tme_bsd_bpf_callout_flags = later_callouts;
428 }
429 
430 /* the BPF reader thread: */
431 static void
_tme_bsd_bpf_th_reader(struct tme_bsd_bpf * bpf)432 _tme_bsd_bpf_th_reader(struct tme_bsd_bpf *bpf)
433 {
434   ssize_t buffer_end;
435   unsigned long sleep_usec;
436 
437   /* lock the mutex: */
438   tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
439 
440   /* loop forever: */
441   for (;;) {
442 
443     /* if the delay sleeping flag is set: */
444     if (bpf->tme_bsd_bpf_delay_sleeping) {
445 
446       /* clear the delay sleeping flag: */
447       bpf->tme_bsd_bpf_delay_sleeping = FALSE;
448 
449       /* call out that we can be read again: */
450       _tme_bsd_bpf_callout(bpf, TME_BSD_BPF_CALLOUT_CTRL);
451     }
452 
453     /* if a delay has been requested: */
454     sleep_usec = bpf->tme_bsd_bpf_delay_sleep;
455     if (sleep_usec > 0) {
456 
457       /* clear the delay sleep time: */
458       bpf->tme_bsd_bpf_delay_sleep = 0;
459 
460       /* set the delay sleeping flag: */
461       bpf->tme_bsd_bpf_delay_sleeping = TRUE;
462 
463       /* unlock our mutex: */
464       tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
465 
466       /* sleep for the delay sleep time: */
467       tme_thread_sleep_yield(0, sleep_usec);
468 
469       /* lock our mutex: */
470       tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
471 
472       continue;
473     }
474 
475     /* if the buffer is not empty, wait until either it is,
476        or we're asked to do a delay: */
477     if (bpf->tme_bsd_bpf_buffer_offset
478 	< bpf->tme_bsd_bpf_buffer_end) {
479       tme_cond_wait_yield(&bpf->tme_bsd_bpf_cond_reader,
480 			  &bpf->tme_bsd_bpf_mutex);
481       continue;
482     }
483 
484     /* unlock the mutex: */
485     tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
486 
487     /* read the BPF socket: */
488     tme_log(&bpf->tme_bsd_bpf_element->tme_element_log_handle, 1, TME_OK,
489 	    (&bpf->tme_bsd_bpf_element->tme_element_log_handle,
490 	     _("calling read")));
491     buffer_end =
492       tme_thread_read_yield(bpf->tme_bsd_bpf_fd,
493 			    bpf->tme_bsd_bpf_buffer,
494 			    bpf->tme_bsd_bpf_buffer_size);
495 
496     /* lock the mutex: */
497     tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
498 
499     /* if the read failed: */
500     if (buffer_end <= 0) {
501       tme_log(&bpf->tme_bsd_bpf_element->tme_element_log_handle, 1, errno,
502 	      (&bpf->tme_bsd_bpf_element->tme_element_log_handle,
503 	       _("failed to read packets")));
504       continue;
505     }
506 
507     /* the read succeeded: */
508     tme_log(&bpf->tme_bsd_bpf_element->tme_element_log_handle, 1, TME_OK,
509 	    (&bpf->tme_bsd_bpf_element->tme_element_log_handle,
510 	     _("read %ld bytes of packets"), (long) buffer_end));
511     bpf->tme_bsd_bpf_buffer_offset = 0;
512     bpf->tme_bsd_bpf_buffer_end = buffer_end;
513 
514     /* call out that we can be read again: */
515     _tme_bsd_bpf_callout(bpf, TME_BSD_BPF_CALLOUT_CTRL);
516   }
517   /* NOTREACHED */
518 }
519 
520 /* this is called when the ethernet configuration changes: */
521 static int
_tme_bsd_bpf_config(struct tme_ethernet_connection * conn_eth,struct tme_ethernet_config * config)522 _tme_bsd_bpf_config(struct tme_ethernet_connection *conn_eth,
523 		    struct tme_ethernet_config *config)
524 {
525   struct tme_bsd_bpf *bpf;
526   struct bpf_insn *bpf_filter;
527   struct bpf_program program;
528   int bpf_filter_size, first_pc;
529   int rc;
530 
531   /* recover our data structures: */
532   bpf = conn_eth->tme_ethernet_connection.tme_connection_element->tme_element_private;
533 
534   /* assume we will succeed: */
535   rc = TME_OK;
536 
537   /* lock the mutex: */
538   tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
539 
540   /* allocate space for the worst-case filter: one insn for the packet
541      accept, one insn for the packet reject, and TME_ETHERNET_ADDR_SIZE
542      * 2 insns for each address - one insn to load an address byte and
543      one insn to test it and branch: */
544   bpf_filter_size = (1
545 		     + 1
546 		     + ((1 + 1)
547 			* TME_ETHERNET_ADDR_SIZE
548 			* config->tme_ethernet_config_addr_count));
549   bpf_filter = tme_new(struct bpf_insn, bpf_filter_size);
550   first_pc = bpf_filter_size;
551 
552   /* if this Ethernet is promiscuous, we will accept all packets: */
553   if (config->tme_ethernet_config_flags & TME_ETHERNET_CONFIG_PROMISC) {
554     bpf_filter[--first_pc] = _tme_bsd_bpf_insn_accept;
555   }
556 
557   /* if this Ethernet does have a set of addresses, we will accept all
558      packets for one of those addresses: */
559   else if (config->tme_ethernet_config_addr_count > 0) {
560 
561     /* the last insn in the filter is always the packet reject,
562        and the next-to-last insn in the filter is always the
563        packet accept.  _tme_bsd_bpf_filter depends on this: */
564     bpf_filter[--first_pc] = _tme_bsd_bpf_insn_reject;
565     bpf_filter[--first_pc] = _tme_bsd_bpf_insn_accept;
566 
567     /* make the address filter: */
568     _tme_bsd_bpf_filter(config,
569 			NULL,
570 			0,
571 			bpf_filter,
572 			bpf_filter_size,
573 			&first_pc);
574   }
575 
576   /* otherwise this filter doesn't need to accept any packets: */
577   else {
578     bpf_filter[--first_pc] = _tme_bsd_bpf_insn_reject;
579   }
580 
581   /* set the filter on the BPF device: */
582   program.bf_len = bpf_filter_size - first_pc;
583   program.bf_insns = bpf_filter + first_pc;
584   if (ioctl(bpf->tme_bsd_bpf_fd, BIOCSETF, &program) < 0) {
585     tme_log(&bpf->tme_bsd_bpf_element->tme_element_log_handle, 1, errno,
586 	    (&bpf->tme_bsd_bpf_element->tme_element_log_handle,
587 	     _("failed to set the filter")));
588     rc = errno;
589   }
590 
591   /* free the filter: */
592   tme_free(bpf_filter);
593 
594   /* unlock the mutex: */
595   tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
596 
597   /* done: */
598   return (rc);
599 }
600 
601 /* this is called when control lines change: */
602 static int
_tme_bsd_bpf_ctrl(struct tme_ethernet_connection * conn_eth,unsigned int ctrl)603 _tme_bsd_bpf_ctrl(struct tme_ethernet_connection *conn_eth,
604 		  unsigned int ctrl)
605 {
606   struct tme_bsd_bpf *bpf;
607   int new_callouts;
608 
609   /* recover our data structures: */
610   bpf = conn_eth->tme_ethernet_connection.tme_connection_element->tme_element_private;
611 
612   /* assume that we won't need any new callouts: */
613   new_callouts = 0;
614 
615   /* lock the mutex: */
616   tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
617 
618   /* if this connection is readable, call out a read: */
619   if (ctrl & TME_ETHERNET_CTRL_OK_READ) {
620     new_callouts |= TME_BSD_BPF_CALLOUT_READ;
621   }
622 
623   /* make any new callouts: */
624   _tme_bsd_bpf_callout(bpf, new_callouts);
625 
626   /* unlock the mutex: */
627   tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
628 
629   return (TME_OK);
630 }
631 
632 /* this is called to read a frame: */
633 static int
_tme_bsd_bpf_read(struct tme_ethernet_connection * conn_eth,tme_ethernet_fid_t * _frame_id,struct tme_ethernet_frame_chunk * frame_chunks,unsigned int flags)634 _tme_bsd_bpf_read(struct tme_ethernet_connection *conn_eth,
635 		  tme_ethernet_fid_t *_frame_id,
636 		  struct tme_ethernet_frame_chunk *frame_chunks,
637 		  unsigned int flags)
638 {
639   struct tme_bsd_bpf *bpf;
640   struct bpf_hdr the_bpf_header;
641   struct tme_ethernet_frame_chunk frame_chunk_buffer;
642   size_t buffer_offset_next;
643   const struct tme_ethernet_header *ethernet_header;
644   const struct tme_net_arp_header *arp_header;
645   const struct tme_net_ipv4_header *ipv4_header;
646   tme_uint16_t ethertype;
647   unsigned int count;
648   int rc;
649 
650   /* recover our data structure: */
651   bpf = conn_eth->tme_ethernet_connection.tme_connection_element->tme_element_private;
652 
653   /* lock our mutex: */
654   tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
655 
656   /* assume that we won't be able to return a packet: */
657   rc = -ENOENT;
658 
659   /* loop until we have a good captured packet or until we
660      exhaust the buffer: */
661   for (;;) {
662 
663     /* if there's not enough for a BPF header, flush the buffer: */
664     if ((bpf->tme_bsd_bpf_buffer_offset
665 	 + sizeof(the_bpf_header))
666 	> bpf->tme_bsd_bpf_buffer_end) {
667       if (bpf->tme_bsd_bpf_buffer_offset
668 	  != bpf->tme_bsd_bpf_buffer_end) {
669 	tme_log(&bpf->tme_bsd_bpf_element->tme_element_log_handle, 1, TME_OK,
670 		(&bpf->tme_bsd_bpf_element->tme_element_log_handle,
671 		 _("flushed garbage BPF header bytes")));
672 	bpf->tme_bsd_bpf_buffer_offset = bpf->tme_bsd_bpf_buffer_end;
673       }
674       break;
675     }
676 
677     /* get the BPF header and check it: */
678     memcpy(&the_bpf_header,
679 	   bpf->tme_bsd_bpf_buffer
680 	   + bpf->tme_bsd_bpf_buffer_offset,
681 	   sizeof(the_bpf_header));
682     buffer_offset_next
683       = (((bpf->tme_bsd_bpf_buffer_offset
684 	   + the_bpf_header.bh_hdrlen
685 	   + the_bpf_header.bh_datalen)
686 	  == bpf->tme_bsd_bpf_buffer_end)
687 	 ? bpf->tme_bsd_bpf_buffer_end
688 	 : (bpf->tme_bsd_bpf_buffer_offset
689 	    + BPF_WORDALIGN(the_bpf_header.bh_hdrlen
690 			    + the_bpf_header.bh_datalen)));
691     bpf->tme_bsd_bpf_buffer_offset += the_bpf_header.bh_hdrlen;
692 
693     /* if we're missing some part of the packet: */
694     if (the_bpf_header.bh_caplen != the_bpf_header.bh_datalen
695 	|| ((bpf->tme_bsd_bpf_buffer_offset + the_bpf_header.bh_datalen)
696 	    > bpf->tme_bsd_bpf_buffer_end)) {
697       tme_log(&bpf->tme_bsd_bpf_element->tme_element_log_handle, 1, TME_OK,
698 	      (&bpf->tme_bsd_bpf_element->tme_element_log_handle,
699 	       _("flushed truncated BPF packet")));
700       bpf->tme_bsd_bpf_buffer_offset = buffer_offset_next;
701       continue;
702     }
703 
704     /* if this packet isn't big enough to even have an Ethernet header: */
705     if (the_bpf_header.bh_datalen < sizeof(struct tme_ethernet_header)) {
706       tme_log(&bpf->tme_bsd_bpf_element->tme_element_log_handle, 1, TME_OK,
707 	      (&bpf->tme_bsd_bpf_element->tme_element_log_handle,
708 	       _("flushed short BPF packet")));
709       bpf->tme_bsd_bpf_buffer_offset = buffer_offset_next;
710       continue;
711     }
712 
713     /* if packets need to be delayed: */
714     if (bpf->tme_bsd_bpf_delay_time > 0) {
715 
716       /* if the current release time is before this packet's time: */
717       if ((bpf->tme_bsd_bpf_delay_release.tv_sec
718 	   < the_bpf_header.bh_tstamp.tv_sec)
719 	  || ((bpf->tme_bsd_bpf_delay_release.tv_sec
720 	       == the_bpf_header.bh_tstamp.tv_sec)
721 	      && (bpf->tme_bsd_bpf_delay_release.tv_usec
722 		  < the_bpf_header.bh_tstamp.tv_usec))) {
723 
724 	/* update the current release time, by taking the current time
725 	   and subtracting the delay time: */
726 	gettimeofday(&bpf->tme_bsd_bpf_delay_release, NULL);
727 	if (bpf->tme_bsd_bpf_delay_release.tv_usec < bpf->tme_bsd_bpf_delay_time) {
728 	  bpf->tme_bsd_bpf_delay_release.tv_usec += 1000000UL;
729 	  bpf->tme_bsd_bpf_delay_release.tv_sec--;
730 	}
731 	bpf->tme_bsd_bpf_delay_release.tv_usec -= bpf->tme_bsd_bpf_delay_time;
732       }
733 
734       /* if the current release time is still before this packet's
735          time: */
736       if ((bpf->tme_bsd_bpf_delay_release.tv_sec
737 	   < the_bpf_header.bh_tstamp.tv_sec)
738 	  || ((bpf->tme_bsd_bpf_delay_release.tv_sec
739 	       == the_bpf_header.bh_tstamp.tv_sec)
740 	      && (bpf->tme_bsd_bpf_delay_release.tv_usec
741 		  < the_bpf_header.bh_tstamp.tv_usec))) {
742 
743 	/* set the sleep time: */
744 	assert ((bpf->tme_bsd_bpf_delay_release.tv_sec
745 		 == the_bpf_header.bh_tstamp.tv_sec)
746 		|| ((bpf->tme_bsd_bpf_delay_release.tv_sec + 1)
747 		    == the_bpf_header.bh_tstamp.tv_sec));
748 	bpf->tme_bsd_bpf_delay_sleep
749 	  = (((bpf->tme_bsd_bpf_delay_release.tv_sec
750 	       == the_bpf_header.bh_tstamp.tv_sec)
751 	      ? 0
752 	      : 1000000UL)
753 	     + the_bpf_header.bh_tstamp.tv_usec
754 	     - bpf->tme_bsd_bpf_delay_release.tv_usec);
755 
756 	/* rewind the buffer pointer: */
757 	bpf->tme_bsd_bpf_buffer_offset -= the_bpf_header.bh_hdrlen;
758 
759 	/* stop now: */
760 	break;
761       }
762     }
763 
764     /* form the single frame chunk: */
765     frame_chunk_buffer.tme_ethernet_frame_chunk_next = NULL;
766     frame_chunk_buffer.tme_ethernet_frame_chunk_bytes
767       = bpf->tme_bsd_bpf_buffer + bpf->tme_bsd_bpf_buffer_offset;
768     frame_chunk_buffer.tme_ethernet_frame_chunk_bytes_count
769       = the_bpf_header.bh_datalen;
770 
771     /* some network interfaces haven't removed the CRC yet when they
772        pass a packet to BPF.  packets in a tme ethernet connection
773        never have CRCs, so here we attempt to detect them and strip
774        them off.
775 
776        unfortunately there's no general way to do this.  there's a
777        chance that the last four bytes of an actual packet just
778        happen to be the Ethernet CRC of all of the previous bytes in
779        the packet, so we can't just strip off what looks like a
780        valid CRC, plus the CRC calculation itself isn't cheap.
781 
782        the only way to do this well seems to be to look at the
783        protocol.  if we can determine what the correct minimum size
784        of the packet should be based on the protocol, and the size
785        we got is four bytes more than that, assume that the last four
786        bytes are a CRC and strip it off: */
787 
788     /* assume that we won't be able to figure out the correct minimum
789        size of the packet: */
790     count = 0;
791 
792     /* get the Ethernet header and packet type: */
793     ethernet_header = (struct tme_ethernet_header *) (bpf->tme_bsd_bpf_buffer + bpf->tme_bsd_bpf_buffer_offset);
794     ethertype = ethernet_header->tme_ethernet_header_type[0];
795     ethertype = (ethertype << 8) + ethernet_header->tme_ethernet_header_type[1];
796 
797     /* dispatch on the packet type: */
798     switch (ethertype) {
799 
800       /* an ARP or RARP packet: */
801     case TME_ETHERNET_TYPE_ARP:
802     case TME_ETHERNET_TYPE_RARP:
803       arp_header = (struct tme_net_arp_header *) (ethernet_header + 1);
804       switch ((((tme_uint16_t) arp_header->tme_net_arp_header_opcode[0]) << 8)
805 	      + arp_header->tme_net_arp_header_opcode[1]) {
806       case TME_NET_ARP_OPCODE_REQUEST:
807       case TME_NET_ARP_OPCODE_REPLY:
808       case TME_NET_ARP_OPCODE_REV_REQUEST:
809       case TME_NET_ARP_OPCODE_REV_REPLY:
810 	count = (TME_ETHERNET_HEADER_SIZE
811 		 + sizeof(struct tme_net_arp_header)
812 		 + (2 * arp_header->tme_net_arp_header_hardware_length)
813 		 + (2 * arp_header->tme_net_arp_header_protocol_length));
814       default:
815 	break;
816       }
817       break;
818 
819       /* an IPv4 packet: */
820     case TME_ETHERNET_TYPE_IPV4:
821       ipv4_header = (struct tme_net_ipv4_header *) (ethernet_header + 1);
822       count = ipv4_header->tme_net_ipv4_header_length[0];
823       count = (count << 8) + ipv4_header->tme_net_ipv4_header_length[1];
824       count += TME_ETHERNET_HEADER_SIZE;
825       break;
826 
827     default:
828       break;
829     }
830 
831     /* if we were able to figure out the correct minimum size of the
832        packet, and the packet from BPF is exactly that minimum size
833        plus the CRC size, set the length of the packet to be the
834        correct minimum size.  NB that we can't let the packet become
835        smaller than (TME_ETHERNET_FRAME_MIN - TME_ETHERNET_CRC_SIZE): */
836     if (count != 0) {
837       count = TME_MAX(count,
838 		      (TME_ETHERNET_FRAME_MIN
839 		       - TME_ETHERNET_CRC_SIZE));
840       if (frame_chunk_buffer.tme_ethernet_frame_chunk_bytes_count
841 	  == (count + TME_ETHERNET_CRC_SIZE)) {
842 	frame_chunk_buffer.tme_ethernet_frame_chunk_bytes_count = count;
843       }
844     }
845 
846     /* copy out the frame: */
847     count = tme_ethernet_chunks_copy(frame_chunks, &frame_chunk_buffer);
848 
849     /* if this is a peek: */
850     if (flags & TME_ETHERNET_READ_PEEK) {
851 
852       /* rewind the buffer pointer: */
853       bpf->tme_bsd_bpf_buffer_offset -= the_bpf_header.bh_hdrlen;
854     }
855 
856     /* otherwise, this isn't a peek: */
857     else {
858 
859       /* update the buffer pointer: */
860       bpf->tme_bsd_bpf_buffer_offset = buffer_offset_next;
861     }
862 
863     /* success: */
864     rc = count;
865     break;
866   }
867 
868   /* if the buffer is empty, or if we failed to read a packet,
869      wake up the reader: */
870   if ((bpf->tme_bsd_bpf_buffer_offset
871        >= bpf->tme_bsd_bpf_buffer_end)
872       || rc <= 0) {
873     tme_cond_notify(&bpf->tme_bsd_bpf_cond_reader, TRUE);
874   }
875 
876   /* unlock our mutex: */
877   tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
878 
879   /* done: */
880   return (rc);
881 }
882 
883 /* this makes a new Ethernet connection: */
884 static int
_tme_bsd_bpf_connection_make(struct tme_connection * conn,unsigned int state)885 _tme_bsd_bpf_connection_make(struct tme_connection *conn, unsigned int state)
886 {
887   struct tme_bsd_bpf *bpf;
888   struct tme_ethernet_connection *conn_eth;
889   struct tme_ethernet_connection *conn_eth_other;
890 
891   /* recover our data structures: */
892   bpf = conn->tme_connection_element->tme_element_private;
893   conn_eth = (struct tme_ethernet_connection *) conn;
894   conn_eth_other = (struct tme_ethernet_connection *) conn->tme_connection_other;
895 
896   /* both sides must be Ethernet connections: */
897   assert(conn->tme_connection_type == TME_CONNECTION_ETHERNET);
898   assert(conn->tme_connection_other->tme_connection_type == TME_CONNECTION_ETHERNET);
899 
900   /* we're always set up to answer calls across the connection, so we
901      only have to do work when the connection has gone full, namely
902      taking the other side of the connection: */
903   if (state == TME_CONNECTION_FULL) {
904 
905     /* lock our mutex: */
906     tme_mutex_lock(&bpf->tme_bsd_bpf_mutex);
907 
908     /* save our connection: */
909     bpf->tme_bsd_bpf_eth_connection = conn_eth_other;
910 
911     /* unlock our mutex: */
912     tme_mutex_unlock(&bpf->tme_bsd_bpf_mutex);
913   }
914 
915   return (TME_OK);
916 }
917 
918 /* this breaks a connection: */
919 static int
_tme_bsd_bpf_connection_break(struct tme_connection * conn,unsigned int state)920 _tme_bsd_bpf_connection_break(struct tme_connection *conn, unsigned int state)
921 {
922   abort();
923 }
924 
925 /* this makes a new connection side for a BPF: */
926 static int
_tme_bsd_bpf_connections_new(struct tme_element * element,const char * const * args,struct tme_connection ** _conns,char ** _output)927 _tme_bsd_bpf_connections_new(struct tme_element *element,
928 			     const char * const *args,
929 			     struct tme_connection **_conns,
930 			     char **_output)
931 {
932   struct tme_bsd_bpf *bpf;
933   struct tme_ethernet_connection *conn_eth;
934   struct tme_connection *conn;
935 
936   /* recover our data structure: */
937   bpf = (struct tme_bsd_bpf *) element->tme_element_private;
938 
939   /* if we already have an Ethernet connection, do nothing: */
940   if (bpf->tme_bsd_bpf_eth_connection != NULL) {
941     return (TME_OK);
942   }
943 
944   /* allocate the new Ethernet connection: */
945   conn_eth = tme_new0(struct tme_ethernet_connection, 1);
946   conn = &conn_eth->tme_ethernet_connection;
947 
948   /* fill in the generic connection: */
949   conn->tme_connection_next = *_conns;
950   conn->tme_connection_type = TME_CONNECTION_ETHERNET;
951   conn->tme_connection_score = tme_ethernet_connection_score;
952   conn->tme_connection_make = _tme_bsd_bpf_connection_make;
953   conn->tme_connection_break = _tme_bsd_bpf_connection_break;
954 
955   /* fill in the Ethernet connection: */
956   conn_eth->tme_ethernet_connection_config = _tme_bsd_bpf_config;
957   conn_eth->tme_ethernet_connection_ctrl = _tme_bsd_bpf_ctrl;
958   conn_eth->tme_ethernet_connection_read = _tme_bsd_bpf_read;
959 
960   /* return the connection side possibility: */
961   *_conns = conn;
962 
963   /* done: */
964   return (TME_OK);
965 }
966 
967 /* the new BPF function: */
TME_ELEMENT_SUB_NEW_DECL(tme_host_bsd,bpf)968 TME_ELEMENT_SUB_NEW_DECL(tme_host_bsd,bpf) {
969   struct tme_bsd_bpf *bpf;
970   int bpf_fd;
971 #define DEV_BPF_FORMAT "/dev/bpf%d"
972   char dev_bpf_filename[sizeof(DEV_BPF_FORMAT) + (sizeof(int) * 3) + 1];
973   int minor;
974   int saved_errno;
975   u_int bpf_opt;
976   struct bpf_version version;
977   u_int packet_buffer_size;
978   const char *ifr_name_user;
979   struct ifreq *ifr;
980   unsigned long delay_time;
981   int arg_i;
982   int usage;
983   int rc;
984 
985   /* check our arguments: */
986   usage = 0;
987   ifr_name_user = NULL;
988   delay_time = 0;
989   arg_i = 1;
990   for (;;) {
991 
992     /* the interface we're supposed to use: */
993     if (TME_ARG_IS(args[arg_i + 0], "interface")
994 	&& args[arg_i + 1] != NULL) {
995       ifr_name_user = args[arg_i + 1];
996       arg_i += 2;
997     }
998 
999     /* a delay time in microseconds: */
1000     else if (TME_ARG_IS(args[arg_i + 0], "delay")
1001 	     && (delay_time = tme_misc_unumber_parse(args[arg_i + 1], 0)) > 0) {
1002       arg_i += 2;
1003     }
1004 
1005     /* if we ran out of arguments: */
1006     else if (args[arg_i + 0] == NULL) {
1007       break;
1008     }
1009 
1010     /* otherwise this is a bad argument: */
1011     else {
1012       tme_output_append_error(_output,
1013 			      "%s %s",
1014 			      args[arg_i],
1015 			      _("unexpected"));
1016       usage = TRUE;
1017       break;
1018     }
1019   }
1020 
1021   if (usage) {
1022     tme_output_append_error(_output,
1023 			    "%s %s [ interface %s ] [ delay %s ]",
1024 			    _("usage:"),
1025 			    args[0],
1026 			    _("INTERFACE"),
1027 			    _("MICROSECONDS"));
1028     return (EINVAL);
1029   }
1030 
1031   /* find the interface we will use: */
1032   rc = tme_bsd_if_find(ifr_name_user, &ifr, NULL, NULL);
1033   if (rc != TME_OK) {
1034     tme_output_append_error(_output, _("couldn't find an interface"));
1035     return (ENOENT);
1036   }
1037   tme_log(&element->tme_element_log_handle, 1, TME_OK,
1038 	  (&element->tme_element_log_handle,
1039 	   "using interface %s",
1040 	   ifr->ifr_name));
1041 
1042   /* loop trying to open a /dev/bpf device: */
1043   for (minor = 0;; minor++) {
1044 
1045     /* form the name of the next device to try, then try opening
1046        it. if we succeed, we're done: */
1047     sprintf(dev_bpf_filename, DEV_BPF_FORMAT, minor);
1048     tme_log(&element->tme_element_log_handle, 1, TME_OK,
1049 	    (&element->tme_element_log_handle,
1050 	     "trying %s",
1051 	     dev_bpf_filename));
1052     if ((bpf_fd = open(dev_bpf_filename, O_RDWR)) >= 0) {
1053       tme_log(&element->tme_element_log_handle, 1, TME_OK,
1054 	      (&element->tme_element_log_handle,
1055 	       "opened %s",
1056 	       dev_bpf_filename));
1057       break;
1058     }
1059 
1060     /* we failed to open this device.  if this device was simply
1061        busy, loop: */
1062     saved_errno = errno;
1063     tme_log(&element->tme_element_log_handle, 1, saved_errno,
1064 	    (&element->tme_element_log_handle,
1065 	     "%s", dev_bpf_filename));
1066     if (saved_errno == EBUSY
1067 	|| saved_errno == EACCES) {
1068       continue;
1069     }
1070 
1071     /* otherwise, we have failed: */
1072     return (saved_errno);
1073   }
1074 
1075   /* this macro helps in closing the BPF socket on error: */
1076 #define _TME_BPF_RAW_OPEN_ERROR(x) saved_errno = errno; x; errno = saved_errno
1077 
1078   /* check the BPF version: */
1079   if (ioctl(bpf_fd, BIOCVERSION, &version) < 0) {
1080     tme_log(&element->tme_element_log_handle, 1, errno,
1081 	    (&element->tme_element_log_handle,
1082 	     _("failed to get the BPF version on %s"),
1083 	     dev_bpf_filename));
1084     _TME_BPF_RAW_OPEN_ERROR(close(bpf_fd));
1085     return (errno);
1086   }
1087   if (version.bv_major != BPF_MAJOR_VERSION
1088       || version.bv_minor < BPF_MINOR_VERSION) {
1089     tme_log(&element->tme_element_log_handle, 1, errno,
1090 	    (&element->tme_element_log_handle,
1091 	     _("kernel BPF version is %d.%d, my BPF version is %d.%d"),
1092 	     version.bv_major, version.bv_minor,
1093 	     BPF_MAJOR_VERSION, BPF_MINOR_VERSION));
1094     close(bpf_fd);
1095     return (ENXIO);
1096   }
1097 
1098   /* put the BPF device into immediate mode: */
1099   bpf_opt = TRUE;
1100   if (ioctl(bpf_fd, BIOCIMMEDIATE, &bpf_opt) < 0) {
1101     tme_log(&element->tme_element_log_handle, 1, errno,
1102 	    (&element->tme_element_log_handle,
1103 	     _("failed to put %s into immediate mode"),
1104 	     dev_bpf_filename));
1105     _TME_BPF_RAW_OPEN_ERROR(close(bpf_fd));
1106     return (errno);
1107   }
1108 
1109   /* tell the BPF device we're providing complete Ethernet headers: */
1110   bpf_opt = TRUE;
1111   if (ioctl(bpf_fd, BIOCSHDRCMPLT, &bpf_opt) < 0) {
1112     tme_log(&element->tme_element_log_handle, 1, errno,
1113 	    (&element->tme_element_log_handle,
1114 	     _("failed to put %s into complete-headers mode"),
1115 	     dev_bpf_filename));
1116     _TME_BPF_RAW_OPEN_ERROR(close(bpf_fd));
1117     return (errno);
1118   }
1119 
1120   /* point the BPF device at the interface we're using: */
1121   if (ioctl(bpf_fd, BIOCSETIF, ifr) < 0) {
1122     tme_log(&element->tme_element_log_handle, 1, errno,
1123 	    (&element->tme_element_log_handle,
1124 	     _("failed to point BPF socket at %s"),
1125 	     ifr->ifr_name));
1126     saved_errno = errno;
1127     close(bpf_fd);
1128     errno = saved_errno;
1129     return (errno);
1130   }
1131 
1132   /* get the BPF read buffer size: */
1133   if (ioctl(bpf_fd, BIOCGBLEN, &packet_buffer_size) < 0) {
1134     tme_log(&element->tme_element_log_handle, 1, errno,
1135 	    (&element->tme_element_log_handle,
1136 	     _("failed to read the buffer size for %s"),
1137 	     dev_bpf_filename));
1138     _TME_BPF_RAW_OPEN_ERROR(close(bpf_fd));
1139     return (errno);
1140   }
1141   tme_log(&element->tme_element_log_handle, 1, errno,
1142 	  (&element->tme_element_log_handle,
1143 	   _("buffer size for %s is %u"),
1144 	   dev_bpf_filename, packet_buffer_size));
1145 
1146   /* set the interface into promiscuous mode: */
1147   if (ioctl(bpf_fd, BIOCPROMISC) < 0) {
1148     tme_log(&element->tme_element_log_handle, 1, errno,
1149 	    (&element->tme_element_log_handle,
1150 	     _("failed to set promiscuous mode on %s"),
1151 	     dev_bpf_filename));
1152     _TME_BPF_RAW_OPEN_ERROR(close(bpf_fd));
1153     return (errno);
1154   }
1155 
1156   /* start our data structure: */
1157   bpf = tme_new0(struct tme_bsd_bpf, 1);
1158   bpf->tme_bsd_bpf_element = element;
1159   bpf->tme_bsd_bpf_fd = bpf_fd;
1160   bpf->tme_bsd_bpf_buffer_size = packet_buffer_size;
1161   bpf->tme_bsd_bpf_buffer = tme_new(tme_uint8_t, packet_buffer_size);
1162   bpf->tme_bsd_bpf_delay_time = delay_time;
1163 
1164   /* start the threads: */
1165   tme_mutex_init(&bpf->tme_bsd_bpf_mutex);
1166   tme_cond_init(&bpf->tme_bsd_bpf_cond_reader);
1167   tme_thread_create((tme_thread_t) _tme_bsd_bpf_th_reader, bpf);
1168 
1169   /* fill the element: */
1170   element->tme_element_private = bpf;
1171   element->tme_element_connections_new = _tme_bsd_bpf_connections_new;
1172 
1173   return (TME_OK);
1174 #undef _TME_BPF_RAW_OPEN_ERROR
1175 }
1176