xref: /freebsd/sys/dev/netmap/netmap_legacy.c (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 Vincenzo Maffione
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *   1. Redistributions of source code must retain the above copyright
11  *      notice, this list of conditions and the following disclaimer.
12  *   2. Redistributions in binary form must reproduce the above copyright
13  *      notice, this list of conditions and the following disclaimer in the
14  *      documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* $FreeBSD$ */
30 
31 #if defined(__FreeBSD__)
32 #include <sys/cdefs.h> /* prerequisite */
33 #include <sys/types.h>
34 #include <sys/param.h>	/* defines used in kernel.h */
35 #include <sys/filio.h>	/* FIONBIO */
36 #include <sys/malloc.h>
37 #include <sys/socketvar.h>	/* struct socket */
38 #include <sys/socket.h> /* sockaddrs */
39 #include <sys/sysctl.h>
40 #include <net/if.h>
41 #include <net/if_var.h>
42 #include <net/bpf.h>		/* BIOCIMMEDIATE */
43 #include <machine/bus.h>	/* bus_dmamap_* */
44 #include <sys/endian.h>
45 #elif defined(linux)
46 #include "bsd_glue.h"
47 #elif defined(__APPLE__)
48 #warning OSX support is only partial
49 #include "osx_glue.h"
50 #elif defined (_WIN32)
51 #include "win_glue.h"
52 #endif
53 
54 /*
55  * common headers
56  */
57 #include <net/netmap.h>
58 #include <dev/netmap/netmap_kern.h>
59 #include <dev/netmap/netmap_bdg.h>
60 
61 static int
62 nmreq_register_from_legacy(struct nmreq *nmr, struct nmreq_header *hdr,
63 				struct nmreq_register *req)
64 {
65 	req->nr_offset = nmr->nr_offset;
66 	req->nr_memsize = nmr->nr_memsize;
67 	req->nr_tx_slots = nmr->nr_tx_slots;
68 	req->nr_rx_slots = nmr->nr_rx_slots;
69 	req->nr_tx_rings = nmr->nr_tx_rings;
70 	req->nr_rx_rings = nmr->nr_rx_rings;
71 	req->nr_mem_id = nmr->nr_arg2;
72 	req->nr_ringid = nmr->nr_ringid & NETMAP_RING_MASK;
73 	if ((nmr->nr_flags & NR_REG_MASK) == NR_REG_DEFAULT) {
74 		/* Convert the older nmr->nr_ringid (original
75 		 * netmap control API) to nmr->nr_flags. */
76 		u_int regmode = NR_REG_DEFAULT;
77 		if (req->nr_ringid & NETMAP_SW_RING) {
78 			regmode = NR_REG_SW;
79 		} else if (req->nr_ringid & NETMAP_HW_RING) {
80 			regmode = NR_REG_ONE_NIC;
81 		} else {
82 			regmode = NR_REG_ALL_NIC;
83 		}
84 		req->nr_mode = regmode;
85 	} else {
86 		req->nr_mode = nmr->nr_flags & NR_REG_MASK;
87 	}
88 
89 	/* Fix nr_name, nr_mode and nr_ringid to handle pipe requests. */
90 	if (req->nr_mode == NR_REG_PIPE_MASTER ||
91 			req->nr_mode == NR_REG_PIPE_SLAVE) {
92 		char suffix[10];
93 		snprintf(suffix, sizeof(suffix), "%c%d",
94 			(req->nr_mode == NR_REG_PIPE_MASTER ? '{' : '}'),
95 			req->nr_ringid);
96 		if (strlen(hdr->nr_name) + strlen(suffix)
97 					>= sizeof(hdr->nr_name)) {
98 			/* No space for the pipe suffix. */
99 			return ENOBUFS;
100 		}
101 		strncat(hdr->nr_name, suffix, strlen(suffix));
102 		req->nr_mode = NR_REG_ALL_NIC;
103 		req->nr_ringid = 0;
104 	}
105 	req->nr_flags = nmr->nr_flags & (~NR_REG_MASK);
106 	if (nmr->nr_ringid & NETMAP_NO_TX_POLL) {
107 		req->nr_flags |= NR_NO_TX_POLL;
108 	}
109 	if (nmr->nr_ringid & NETMAP_DO_RX_POLL) {
110 		req->nr_flags |= NR_DO_RX_POLL;
111 	}
112 	/* nmr->nr_arg1 (nr_pipes) ignored */
113 	req->nr_extra_bufs = nmr->nr_arg3;
114 
115 	return 0;
116 }
117 
118 /* Convert the legacy 'nmr' struct into one of the nmreq_xyz structs
119  * (new API). The new struct is dynamically allocated. */
120 static struct nmreq_header *
121 nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
122 {
123 	struct nmreq_header *hdr = nm_os_malloc(sizeof(*hdr));
124 
125 	if (hdr == NULL) {
126 		goto oom;
127 	}
128 
129 	/* Sanitize nmr->nr_name by adding the string terminator. */
130 	if (ioctl_cmd == NIOCGINFO || ioctl_cmd == NIOCREGIF) {
131 		nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0';
132 	}
133 
134 	/* First prepare the request header. */
135 	hdr->nr_version = NETMAP_API; /* new API */
136 	strlcpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name));
137 	hdr->nr_options = (uintptr_t)NULL;
138 	hdr->nr_body = (uintptr_t)NULL;
139 
140 	switch (ioctl_cmd) {
141 	case NIOCREGIF: {
142 		switch (nmr->nr_cmd) {
143 		case 0: {
144 			/* Regular NIOCREGIF operation. */
145 			struct nmreq_register *req = nm_os_malloc(sizeof(*req));
146 			if (!req) { goto oom; }
147 			hdr->nr_body = (uintptr_t)req;
148 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
149 			if (nmreq_register_from_legacy(nmr, hdr, req)) {
150 				goto oom;
151 			}
152 			break;
153 		}
154 		case NETMAP_BDG_ATTACH: {
155 			struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req));
156 			if (!req) { goto oom; }
157 			hdr->nr_body = (uintptr_t)req;
158 			hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH;
159 			if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) {
160 				goto oom;
161 			}
162 			/* Fix nr_mode, starting from nr_arg1. */
163 			if (nmr->nr_arg1 & NETMAP_BDG_HOST) {
164 				req->reg.nr_mode = NR_REG_NIC_SW;
165 			} else {
166 				req->reg.nr_mode = NR_REG_ALL_NIC;
167 			}
168 			break;
169 		}
170 		case NETMAP_BDG_DETACH: {
171 			hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH;
172 			hdr->nr_body = (uintptr_t)nm_os_malloc(sizeof(struct nmreq_vale_detach));
173 			break;
174 		}
175 		case NETMAP_BDG_VNET_HDR:
176 		case NETMAP_VNET_HDR_GET: {
177 			struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req));
178 			if (!req) { goto oom; }
179 			hdr->nr_body = (uintptr_t)req;
180 			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ?
181 				NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET;
182 			req->nr_hdr_len = nmr->nr_arg1;
183 			break;
184 		}
185 		case NETMAP_BDG_NEWIF : {
186 			struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req));
187 			if (!req) { goto oom; }
188 			hdr->nr_body = (uintptr_t)req;
189 			hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
190 			req->nr_tx_slots = nmr->nr_tx_slots;
191 			req->nr_rx_slots = nmr->nr_rx_slots;
192 			req->nr_tx_rings = nmr->nr_tx_rings;
193 			req->nr_rx_rings = nmr->nr_rx_rings;
194 			req->nr_mem_id = nmr->nr_arg2;
195 			break;
196 		}
197 		case NETMAP_BDG_DELIF: {
198 			hdr->nr_reqtype = NETMAP_REQ_VALE_DELIF;
199 			break;
200 		}
201 		case NETMAP_BDG_POLLING_ON:
202 		case NETMAP_BDG_POLLING_OFF: {
203 			struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req));
204 			if (!req) { goto oom; }
205 			hdr->nr_body = (uintptr_t)req;
206 			hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ?
207 				NETMAP_REQ_VALE_POLLING_ENABLE :
208 				NETMAP_REQ_VALE_POLLING_DISABLE;
209 			switch (nmr->nr_flags & NR_REG_MASK) {
210 			default:
211 				req->nr_mode = 0; /* invalid */
212 				break;
213 			case NR_REG_ONE_NIC:
214 				req->nr_mode = NETMAP_POLLING_MODE_MULTI_CPU;
215 				break;
216 			case NR_REG_ALL_NIC:
217 				req->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU;
218 				break;
219 			}
220 			req->nr_first_cpu_id = nmr->nr_ringid & NETMAP_RING_MASK;
221 			req->nr_num_polling_cpus = nmr->nr_arg1;
222 			break;
223 		}
224 		case NETMAP_PT_HOST_CREATE:
225 		case NETMAP_PT_HOST_DELETE: {
226 			nm_prerr("Netmap passthrough not supported yet");
227 			return NULL;
228 			break;
229 		}
230 		}
231 		break;
232 	}
233 	case NIOCGINFO: {
234 		if (nmr->nr_cmd == NETMAP_BDG_LIST) {
235 			struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req));
236 			if (!req) { goto oom; }
237 			hdr->nr_body = (uintptr_t)req;
238 			hdr->nr_reqtype = NETMAP_REQ_VALE_LIST;
239 			req->nr_bridge_idx = nmr->nr_arg1;
240 			req->nr_port_idx = nmr->nr_arg2;
241 		} else {
242 			/* Regular NIOCGINFO. */
243 			struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req));
244 			if (!req) { goto oom; }
245 			hdr->nr_body = (uintptr_t)req;
246 			hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET;
247 			req->nr_memsize = nmr->nr_memsize;
248 			req->nr_tx_slots = nmr->nr_tx_slots;
249 			req->nr_rx_slots = nmr->nr_rx_slots;
250 			req->nr_tx_rings = nmr->nr_tx_rings;
251 			req->nr_rx_rings = nmr->nr_rx_rings;
252 			req->nr_mem_id = nmr->nr_arg2;
253 		}
254 		break;
255 	}
256 	}
257 
258 	return hdr;
259 oom:
260 	if (hdr) {
261 		if (hdr->nr_body) {
262 			nm_os_free((void *)(uintptr_t)hdr->nr_body);
263 		}
264 		nm_os_free(hdr);
265 	}
266 	nm_prerr("Failed to allocate memory for nmreq_xyz struct");
267 
268 	return NULL;
269 }
270 
271 static void
272 nmreq_register_to_legacy(const struct nmreq_register *req, struct nmreq *nmr)
273 {
274 	nmr->nr_offset = req->nr_offset;
275 	nmr->nr_memsize = req->nr_memsize;
276 	nmr->nr_tx_slots = req->nr_tx_slots;
277 	nmr->nr_rx_slots = req->nr_rx_slots;
278 	nmr->nr_tx_rings = req->nr_tx_rings;
279 	nmr->nr_rx_rings = req->nr_rx_rings;
280 	nmr->nr_arg2 = req->nr_mem_id;
281 	nmr->nr_arg3 = req->nr_extra_bufs;
282 }
283 
284 /* Convert a nmreq_xyz struct (new API) to the legacy 'nmr' struct.
285  * It also frees the nmreq_xyz struct, as it was allocated by
286  * nmreq_from_legacy(). */
287 static int
288 nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
289 {
290 	int ret = 0;
291 
292 	/* We only write-back the fields that the user expects to be
293 	 * written back. */
294 	switch (hdr->nr_reqtype) {
295 	case NETMAP_REQ_REGISTER: {
296 		struct nmreq_register *req =
297 			(struct nmreq_register *)(uintptr_t)hdr->nr_body;
298 		nmreq_register_to_legacy(req, nmr);
299 		break;
300 	}
301 	case NETMAP_REQ_PORT_INFO_GET: {
302 		struct nmreq_port_info_get *req =
303 			(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
304 		nmr->nr_memsize = req->nr_memsize;
305 		nmr->nr_tx_slots = req->nr_tx_slots;
306 		nmr->nr_rx_slots = req->nr_rx_slots;
307 		nmr->nr_tx_rings = req->nr_tx_rings;
308 		nmr->nr_rx_rings = req->nr_rx_rings;
309 		nmr->nr_arg2 = req->nr_mem_id;
310 		break;
311 	}
312 	case NETMAP_REQ_VALE_ATTACH: {
313 		struct nmreq_vale_attach *req =
314 			(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
315 		nmreq_register_to_legacy(&req->reg, nmr);
316 		break;
317 	}
318 	case NETMAP_REQ_VALE_DETACH: {
319 		break;
320 	}
321 	case NETMAP_REQ_VALE_LIST: {
322 		struct nmreq_vale_list *req =
323 			(struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
324 		strlcpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name));
325 		nmr->nr_arg1 = req->nr_bridge_idx;
326 		nmr->nr_arg2 = req->nr_port_idx;
327 		break;
328 	}
329 	case NETMAP_REQ_PORT_HDR_SET:
330 	case NETMAP_REQ_PORT_HDR_GET: {
331 		struct nmreq_port_hdr *req =
332 			(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
333 		nmr->nr_arg1 = req->nr_hdr_len;
334 		break;
335 	}
336 	case NETMAP_REQ_VALE_NEWIF: {
337 		struct nmreq_vale_newif *req =
338 			(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
339 		nmr->nr_tx_slots = req->nr_tx_slots;
340 		nmr->nr_rx_slots = req->nr_rx_slots;
341 		nmr->nr_tx_rings = req->nr_tx_rings;
342 		nmr->nr_rx_rings = req->nr_rx_rings;
343 		nmr->nr_arg2 = req->nr_mem_id;
344 		break;
345 	}
346 	case NETMAP_REQ_VALE_DELIF:
347 	case NETMAP_REQ_VALE_POLLING_ENABLE:
348 	case NETMAP_REQ_VALE_POLLING_DISABLE: {
349 		break;
350 	}
351 	}
352 
353 	return ret;
354 }
355 
356 int
357 netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
358 			struct thread *td)
359 {
360 	int error = 0;
361 
362 	switch (cmd) {
363 	case NIOCGINFO:
364 	case NIOCREGIF: {
365 		/* Request for the legacy control API. Convert it to a
366 		 * NIOCCTRL request. */
367 		struct nmreq *nmr = (struct nmreq *) data;
368 		struct nmreq_header *hdr;
369 
370 		if (nmr->nr_version < 11) {
371 			nm_prerr("Minimum supported API is 11 (requested %u)",
372 			    nmr->nr_version);
373 			return EINVAL;
374 		}
375 		hdr = nmreq_from_legacy(nmr, cmd);
376 		if (hdr == NULL) { /* out of memory */
377 			return ENOMEM;
378 		}
379 		error = netmap_ioctl(priv, NIOCCTRL, (caddr_t)hdr, td,
380 					/*nr_body_is_user=*/0);
381 		if (error == 0) {
382 			nmreq_to_legacy(hdr, nmr);
383 		}
384 		if (hdr->nr_body) {
385 			nm_os_free((void *)(uintptr_t)hdr->nr_body);
386 		}
387 		nm_os_free(hdr);
388 		break;
389 	}
390 #ifdef WITH_VALE
391 	case NIOCCONFIG: {
392 		struct nm_ifreq *nr = (struct nm_ifreq *)data;
393 		error = netmap_bdg_config(nr);
394 		break;
395 	}
396 #endif
397 #ifdef __FreeBSD__
398 	case FIONBIO:
399 	case FIOASYNC:
400 		/* FIONBIO/FIOASYNC are no-ops. */
401 		break;
402 
403 	case BIOCIMMEDIATE:
404 	case BIOCGHDRCMPLT:
405 	case BIOCSHDRCMPLT:
406 	case BIOCSSEESENT:
407 		/* Ignore these commands. */
408 		break;
409 
410 	default:	/* allow device-specific ioctls */
411 	    {
412 		struct nmreq *nmr = (struct nmreq *)data;
413 		struct ifnet *ifp = ifunit_ref(nmr->nr_name);
414 		if (ifp == NULL) {
415 			error = ENXIO;
416 		} else {
417 			struct socket so;
418 
419 			bzero(&so, sizeof(so));
420 			so.so_vnet = ifp->if_vnet;
421 			// so->so_proto not null.
422 			error = ifioctl(&so, cmd, data, td);
423 			if_rele(ifp);
424 		}
425 		break;
426 	    }
427 
428 #else /* linux */
429 	default:
430 		error = EOPNOTSUPP;
431 #endif /* linux */
432 	}
433 
434 	return error;
435 }
436