1 /*
2  * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stdint.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <byteswap.h>
30 #include <ipxe/pci.h>
31 #include <ipxe/io.h>
32 #include <ipxe/malloc.h>
33 #include <ipxe/profile.h>
34 #include <ipxe/iobuf.h>
35 #include <ipxe/netdevice.h>
36 #include <ipxe/if_ether.h>
37 #include <ipxe/ethernet.h>
38 #include "vmxnet3.h"
39 
40 /**
41  * @file
42  *
43  * VMware vmxnet3 virtual NIC driver
44  *
45  */
46 
47 /** VM command profiler */
48 static struct profiler vmxnet3_vm_command_profiler __profiler =
49 	{ .name = "vmxnet3.vm_command" };
50 
51 /** VM transmit profiler */
52 static struct profiler vmxnet3_vm_tx_profiler __profiler =
53 	{ .name = "vmxnet3.vm_tx" };
54 
55 /** VM receive refill profiler */
56 static struct profiler vmxnet3_vm_refill_profiler __profiler =
57 	{ .name = "vmxnet3.vm_refill" };
58 
59 /** VM event profiler */
60 static struct profiler vmxnet3_vm_event_profiler __profiler =
61 	{ .name = "vmxnet3.vm_event" };
62 
63 /**
64  * Issue command
65  *
66  * @v vmxnet		vmxnet3 NIC
67  * @v command		Command to issue
68  * @ret result		Command result
69  */
vmxnet3_command(struct vmxnet3_nic * vmxnet,uint32_t command)70 static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
71 					 uint32_t command ) {
72 	uint32_t result;
73 
74 	/* Issue command */
75 	profile_start ( &vmxnet3_vm_command_profiler );
76 	writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
77 	result = readl ( vmxnet->vd + VMXNET3_VD_CMD );
78 	profile_stop ( &vmxnet3_vm_command_profiler );
79 	profile_exclude ( &vmxnet3_vm_command_profiler );
80 
81 	return result;
82 }
83 
84 /**
85  * Transmit packet
86  *
87  * @v netdev		Network device
88  * @v iobuf		I/O buffer
89  * @ret rc		Return status code
90  */
vmxnet3_transmit(struct net_device * netdev,struct io_buffer * iobuf)91 static int vmxnet3_transmit ( struct net_device *netdev,
92 			      struct io_buffer *iobuf ) {
93 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
94 	struct vmxnet3_tx_desc *tx_desc;
95 	unsigned int fill;
96 	unsigned int desc_idx;
97 	unsigned int generation;
98 
99 	/* Check that we have a free transmit descriptor */
100 	fill = ( vmxnet->count.tx_prod - vmxnet->count.tx_cons );
101 	if ( fill >= VMXNET3_TX_FILL ) {
102 		DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
103 		       vmxnet );
104 		return -ENOBUFS;
105 	}
106 
107 	/* Locate transmit descriptor */
108 	desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
109 	generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
110 		       0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
111 	assert ( vmxnet->tx_iobuf[desc_idx] == NULL );
112 
113 	/* Increment producer counter */
114 	vmxnet->count.tx_prod++;
115 
116 	/* Store I/O buffer for later completion */
117 	vmxnet->tx_iobuf[desc_idx] = iobuf;
118 
119 	/* Populate transmit descriptor */
120 	tx_desc = &vmxnet->dma->tx_desc[desc_idx];
121 	tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
122 	tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
123 	tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
124 
125 	/* Hand over descriptor to NIC */
126 	wmb();
127 	profile_start ( &vmxnet3_vm_tx_profiler );
128 	writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
129 		 ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
130 	profile_stop ( &vmxnet3_vm_tx_profiler );
131 	profile_exclude ( &vmxnet3_vm_tx_profiler );
132 
133 	return 0;
134 }
135 
136 /**
137  * Poll for completed transmissions
138  *
139  * @v netdev		Network device
140  */
vmxnet3_poll_tx(struct net_device * netdev)141 static void vmxnet3_poll_tx ( struct net_device *netdev ) {
142 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
143 	struct vmxnet3_tx_comp *tx_comp;
144 	struct io_buffer *iobuf;
145 	unsigned int comp_idx;
146 	unsigned int desc_idx;
147 	unsigned int generation;
148 
149 	while ( 1 ) {
150 
151 		/* Look for completed descriptors */
152 		comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
153 		generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
154 			       0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
155 		tx_comp = &vmxnet->dma->tx_comp[comp_idx];
156 		if ( generation != ( tx_comp->flags &
157 				     cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
158 			break;
159 		}
160 
161 		/* Increment consumer counter */
162 		vmxnet->count.tx_cons++;
163 
164 		/* Locate corresponding transmit descriptor */
165 		desc_idx = ( le32_to_cpu ( tx_comp->index ) %
166 			     VMXNET3_NUM_TX_DESC );
167 		iobuf = vmxnet->tx_iobuf[desc_idx];
168 		if ( ! iobuf ) {
169 			DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
170 			       "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
171 			netdev_tx_err ( netdev, NULL, -ENOTTY );
172 			continue;
173 		}
174 
175 		/* Remove I/O buffer from transmit queue */
176 		vmxnet->tx_iobuf[desc_idx] = NULL;
177 
178 		/* Report transmission completion to network layer */
179 		DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
180 			vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
181 		netdev_tx_complete ( netdev, iobuf );
182 	}
183 }
184 
185 /**
186  * Flush any uncompleted transmit buffers
187  *
188  * @v netdev		Network device
189  */
vmxnet3_flush_tx(struct net_device * netdev)190 static void vmxnet3_flush_tx ( struct net_device *netdev ) {
191 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
192 	unsigned int i;
193 
194 	for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
195 		if ( vmxnet->tx_iobuf[i] ) {
196 			netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
197 						 -ECANCELED );
198 			vmxnet->tx_iobuf[i] = NULL;
199 		}
200 	}
201 }
202 
203 /**
204  * Refill receive ring
205  *
206  * @v netdev		Network device
207  */
vmxnet3_refill_rx(struct net_device * netdev)208 static void vmxnet3_refill_rx ( struct net_device *netdev ) {
209 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
210 	struct vmxnet3_rx_desc *rx_desc;
211 	struct io_buffer *iobuf;
212 	unsigned int orig_rx_prod = vmxnet->count.rx_prod;
213 	unsigned int desc_idx;
214 	unsigned int generation;
215 
216 	/* Fill receive ring to specified fill level */
217 	while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
218 
219 		/* Locate receive descriptor */
220 		desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
221 		generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
222 			       0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
223 		assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
224 
225 		/* Allocate I/O buffer */
226 		iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
227 		if ( ! iobuf ) {
228 			/* Non-fatal low memory condition */
229 			break;
230 		}
231 		iob_reserve ( iobuf, NET_IP_ALIGN );
232 
233 		/* Increment producer counter and fill level */
234 		vmxnet->count.rx_prod++;
235 		vmxnet->count.rx_fill++;
236 
237 		/* Store I/O buffer for later completion */
238 		vmxnet->rx_iobuf[desc_idx] = iobuf;
239 
240 		/* Populate receive descriptor */
241 		rx_desc = &vmxnet->dma->rx_desc[desc_idx];
242 		rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
243 		rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
244 
245 	}
246 
247 	/* Hand over any new descriptors to NIC */
248 	if ( vmxnet->count.rx_prod != orig_rx_prod ) {
249 		wmb();
250 		profile_start ( &vmxnet3_vm_refill_profiler );
251 		writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
252 			 ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
253 		profile_stop ( &vmxnet3_vm_refill_profiler );
254 		profile_exclude ( &vmxnet3_vm_refill_profiler );
255 	}
256 }
257 
258 /**
259  * Poll for received packets
260  *
261  * @v netdev		Network device
262  */
vmxnet3_poll_rx(struct net_device * netdev)263 static void vmxnet3_poll_rx ( struct net_device *netdev ) {
264 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
265 	struct vmxnet3_rx_comp *rx_comp;
266 	struct io_buffer *iobuf;
267 	unsigned int comp_idx;
268 	unsigned int desc_idx;
269 	unsigned int generation;
270 	size_t len;
271 
272 	while ( 1 ) {
273 
274 		/* Look for completed descriptors */
275 		comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
276 		generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
277 			       0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
278 		rx_comp = &vmxnet->dma->rx_comp[comp_idx];
279 		if ( generation != ( rx_comp->flags &
280 				     cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
281 			break;
282 		}
283 
284 		/* Increment consumer counter */
285 		vmxnet->count.rx_cons++;
286 
287 		/* Locate corresponding receive descriptor */
288 		desc_idx = ( le32_to_cpu ( rx_comp->index ) %
289 			     VMXNET3_NUM_RX_DESC );
290 		iobuf = vmxnet->rx_iobuf[desc_idx];
291 		if ( ! iobuf ) {
292 			DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
293 			       "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
294 			netdev_rx_err ( netdev, NULL, -ENOTTY );
295 			continue;
296 		}
297 
298 		/* Remove I/O buffer from receive queue */
299 		vmxnet->rx_iobuf[desc_idx] = NULL;
300 		vmxnet->count.rx_fill--;
301 
302 		/* Deliver packet to network layer */
303 		len = ( le32_to_cpu ( rx_comp->len ) &
304 			( VMXNET3_MAX_PACKET_LEN - 1 ) );
305 		DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
306 			vmxnet, comp_idx, desc_idx, len );
307 		iob_put ( iobuf, len );
308 		netdev_rx ( netdev, iobuf );
309 	}
310 }
311 
312 /**
313  * Flush any uncompleted receive buffers
314  *
315  * @v netdev		Network device
316  */
vmxnet3_flush_rx(struct net_device * netdev)317 static void vmxnet3_flush_rx ( struct net_device *netdev ) {
318 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
319 	struct io_buffer *iobuf;
320 	unsigned int i;
321 
322 	for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
323 		if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
324 			netdev_rx_err ( netdev, iobuf, -ECANCELED );
325 			vmxnet->rx_iobuf[i] = NULL;
326 		}
327 	}
328 }
329 
330 /**
331  * Check link state
332  *
333  * @v netdev		Network device
334  */
vmxnet3_check_link(struct net_device * netdev)335 static void vmxnet3_check_link ( struct net_device *netdev ) {
336 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
337 	uint32_t state;
338 	int link_up;
339 	unsigned int link_speed;
340 
341 	/* Get link state */
342 	state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
343 	link_up = ( state & 1 );
344 	link_speed = ( state >> 16 );
345 
346 	/* Report link state to network device */
347 	if ( link_up ) {
348 		DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
349 		       vmxnet, link_speed );
350 		netdev_link_up ( netdev );
351 	} else {
352 		DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
353 		netdev_link_down ( netdev );
354 	}
355 }
356 
357 /**
358  * Poll for events
359  *
360  * @v netdev		Network device
361  */
vmxnet3_poll_events(struct net_device * netdev)362 static void vmxnet3_poll_events ( struct net_device *netdev ) {
363 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
364 	uint32_t events;
365 
366 	/* Do nothing unless there are events to process */
367 	if ( ! vmxnet->dma->shared.ecr )
368 		return;
369 	events = le32_to_cpu ( vmxnet->dma->shared.ecr );
370 
371 	/* Acknowledge these events */
372 	profile_start ( &vmxnet3_vm_event_profiler );
373 	writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
374 	profile_stop ( &vmxnet3_vm_event_profiler );
375 	profile_exclude ( &vmxnet3_vm_event_profiler );
376 
377 	/* Check for link state change */
378 	if ( events & VMXNET3_ECR_LINK ) {
379 		vmxnet3_check_link ( netdev );
380 		events &= ~VMXNET3_ECR_LINK;
381 	}
382 
383 	/* Check for queue errors */
384 	if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
385 		vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
386 		DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
387 		       "%08x)\n", vmxnet,
388 		       le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
389 		       le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
390 		/* Report errors to allow for visibility via "ifstat" */
391 		if ( events & VMXNET3_ECR_TQERR )
392 			netdev_tx_err ( netdev, NULL, -EPIPE );
393 		if ( events & VMXNET3_ECR_RQERR )
394 			netdev_rx_err ( netdev, NULL, -EPIPE );
395 		events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
396 	}
397 
398 	/* Check for unknown events */
399 	if ( events ) {
400 		DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
401 		       vmxnet, events );
402 		/* Report error to allow for visibility via "ifstat" */
403 		netdev_rx_err ( netdev, NULL, -ENODEV );
404 	}
405 }
406 
407 /**
408  * Poll network device
409  *
410  * @v netdev		Network device
411  */
vmxnet3_poll(struct net_device * netdev)412 static void vmxnet3_poll ( struct net_device *netdev ) {
413 
414 	vmxnet3_poll_events ( netdev );
415 	vmxnet3_poll_tx ( netdev );
416 	vmxnet3_poll_rx ( netdev );
417 	vmxnet3_refill_rx ( netdev );
418 }
419 
420 /**
421  * Enable/disable interrupts
422  *
423  * @v netdev		Network device
424  * @v enable		Interrupts should be enabled
425  */
vmxnet3_irq(struct net_device * netdev,int enable)426 static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
427 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
428 
429 	DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
430 	       vmxnet, ( enable ? "enable" : "disable" ) );
431 }
432 
433 /**
434  * Set MAC address
435  *
436  * @v vmxnet		vmxnet3 NIC
437  * @v ll_addr		Link-layer address to set
438  */
vmxnet3_set_ll_addr(struct vmxnet3_nic * vmxnet,const void * ll_addr)439 static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
440 				  const void *ll_addr ) {
441 	struct {
442 		uint32_t low;
443 		uint32_t high;
444 	} __attribute__ (( packed )) mac;
445 
446 	memset ( &mac, 0, sizeof ( mac ) );
447 	memcpy ( &mac, ll_addr, ETH_ALEN );
448 	writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
449 	writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
450 }
451 
452 /**
453  * Open NIC
454  *
455  * @v netdev		Network device
456  * @ret rc		Return status code
457  */
vmxnet3_open(struct net_device * netdev)458 static int vmxnet3_open ( struct net_device *netdev ) {
459 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
460 	struct vmxnet3_shared *shared;
461 	struct vmxnet3_queues *queues;
462 	uint64_t shared_bus;
463 	uint64_t queues_bus;
464 	uint32_t status;
465 	int rc;
466 
467 	/* Allocate DMA areas */
468 	vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
469 	if ( ! vmxnet->dma ) {
470 		DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
471 		       vmxnet );
472 		rc = -ENOMEM;
473 		goto err_alloc_dma;
474 	}
475 	memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
476 
477 	/* Populate queue descriptors */
478 	queues = &vmxnet->dma->queues;
479 	queues->tx.cfg.desc_address =
480 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
481 	queues->tx.cfg.comp_address =
482 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
483 	queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
484 	queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
485 	queues->rx.cfg.desc_address[0] =
486 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
487 	queues->rx.cfg.comp_address =
488 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
489 	queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
490 	queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
491 	queues_bus = virt_to_bus ( queues );
492 	DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
493 	       vmxnet, queues_bus, sizeof ( *queues ) );
494 
495 	/* Populate shared area */
496 	shared = &vmxnet->dma->shared;
497 	shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
498 	shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
499 	shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
500 	shared->misc.upt_version_support =
501 		cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
502 	shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
503 	shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
504 	shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
505 	shared->misc.num_tx_queues = 1;
506 	shared->misc.num_rx_queues = 1;
507 	shared->interrupt.num_intrs = 1;
508 	shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
509 	shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
510 					       VMXNET3_RXM_BCAST |
511 					       VMXNET3_RXM_ALL_MULTI );
512 	shared_bus = virt_to_bus ( shared );
513 	DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
514 	       vmxnet, shared_bus, sizeof ( *shared ) );
515 
516 	/* Zero counters */
517 	memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
518 
519 	/* Set MAC address */
520 	vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
521 
522 	/* Pass shared area to device */
523 	writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
524 	writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
525 
526 	/* Activate device */
527 	if ( ( status = vmxnet3_command ( vmxnet,
528 					  VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
529 		DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
530 		       vmxnet, status );
531 		rc = -EIO;
532 		goto err_activate;
533 	}
534 
535 	/* Fill receive ring */
536 	vmxnet3_refill_rx ( netdev );
537 
538 	return 0;
539 
540 	vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
541 	vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
542  err_activate:
543 	vmxnet3_flush_tx ( netdev );
544 	vmxnet3_flush_rx ( netdev );
545 	free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
546  err_alloc_dma:
547 	return rc;
548 }
549 
550 /**
551  * Close NIC
552  *
553  * @v netdev		Network device
554  */
vmxnet3_close(struct net_device * netdev)555 static void vmxnet3_close ( struct net_device *netdev ) {
556 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
557 
558 	vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
559 	vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
560 	vmxnet3_flush_tx ( netdev );
561 	vmxnet3_flush_rx ( netdev );
562 	free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
563 }
564 
565 /** vmxnet3 net device operations */
566 static struct net_device_operations vmxnet3_operations = {
567 	.open		= vmxnet3_open,
568 	.close		= vmxnet3_close,
569 	.transmit	= vmxnet3_transmit,
570 	.poll		= vmxnet3_poll,
571 	.irq		= vmxnet3_irq,
572 };
573 
574 /**
575  * Check version
576  *
577  * @v vmxnet		vmxnet3 NIC
578  * @ret rc		Return status code
579  */
vmxnet3_check_version(struct vmxnet3_nic * vmxnet)580 static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
581 	uint32_t version;
582 	uint32_t upt_version;
583 
584 	/* Read version */
585 	version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
586 	upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
587 	DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
588 	       vmxnet, version, upt_version );
589 
590 	/* Inform NIC of driver version */
591 	writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
592 	writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
593 
594 	return 0;
595 }
596 
597 /**
598  * Get permanent MAC address
599  *
600  * @v vmxnet		vmxnet3 NIC
601  * @v hw_addr		Hardware address to fill in
602  */
vmxnet3_get_hw_addr(struct vmxnet3_nic * vmxnet,void * hw_addr)603 static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
604 	struct {
605 		uint32_t low;
606 		uint32_t high;
607 	} __attribute__ (( packed )) mac;
608 
609 	mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
610 					       VMXNET3_CMD_GET_PERM_MAC_LO ) );
611 	mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
612 					       VMXNET3_CMD_GET_PERM_MAC_HI ) );
613 	memcpy ( hw_addr, &mac, ETH_ALEN );
614 }
615 
616 /**
617  * Probe PCI device
618  *
619  * @v pci		PCI device
620  * @v id		PCI ID
621  * @ret rc		Return status code
622  */
vmxnet3_probe(struct pci_device * pci)623 static int vmxnet3_probe ( struct pci_device *pci ) {
624 	struct net_device *netdev;
625 	struct vmxnet3_nic *vmxnet;
626 	int rc;
627 
628 	/* Allocate network device */
629 	netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
630 	if ( ! netdev ) {
631 		rc = -ENOMEM;
632 		goto err_alloc_etherdev;
633 	}
634 	netdev_init ( netdev, &vmxnet3_operations );
635 	vmxnet = netdev_priv ( netdev );
636 	pci_set_drvdata ( pci, netdev );
637 	netdev->dev = &pci->dev;
638 	memset ( vmxnet, 0, sizeof ( *vmxnet ) );
639 
640 	/* Fix up PCI device */
641 	adjust_pci_device ( pci );
642 
643 	/* Map PCI BARs */
644 	vmxnet->pt = ioremap ( pci_bar_start ( pci, VMXNET3_PT_BAR ),
645 			       VMXNET3_PT_LEN );
646 	if ( ! vmxnet->pt ) {
647 		rc = -ENODEV;
648 		goto err_ioremap_pt;
649 	}
650 	vmxnet->vd = ioremap ( pci_bar_start ( pci, VMXNET3_VD_BAR ),
651 			       VMXNET3_VD_LEN );
652 	if ( ! vmxnet->vd ) {
653 		rc = -ENODEV;
654 		goto err_ioremap_vd;
655 	}
656 
657 	/* Version check */
658 	if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
659 		goto err_check_version;
660 
661 	/* Reset device */
662 	if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
663 		goto err_reset;
664 
665 	/* Read initial MAC address */
666 	vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
667 
668 	/* Register network device */
669 	if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
670 		DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
671 		       "%s\n", vmxnet, strerror ( rc ) );
672 		goto err_register_netdev;
673 	}
674 
675 	/* Get initial link state */
676 	vmxnet3_check_link ( netdev );
677 
678 	return 0;
679 
680 	unregister_netdev ( netdev );
681  err_register_netdev:
682  err_reset:
683  err_check_version:
684 	iounmap ( vmxnet->vd );
685  err_ioremap_vd:
686 	iounmap ( vmxnet->pt );
687  err_ioremap_pt:
688 	netdev_nullify ( netdev );
689 	netdev_put ( netdev );
690  err_alloc_etherdev:
691 	return rc;
692 }
693 
694 /**
695  * Remove PCI device
696  *
697  * @v pci		PCI device
698  */
vmxnet3_remove(struct pci_device * pci)699 static void vmxnet3_remove ( struct pci_device *pci ) {
700 	struct net_device *netdev = pci_get_drvdata ( pci );
701 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
702 
703 	unregister_netdev ( netdev );
704 	iounmap ( vmxnet->vd );
705 	iounmap ( vmxnet->pt );
706 	netdev_nullify ( netdev );
707 	netdev_put ( netdev );
708 }
709 
710 /** vmxnet3 PCI IDs */
711 static struct pci_device_id vmxnet3_nics[] = {
712 	PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
713 };
714 
715 /** vmxnet3 PCI driver */
716 struct pci_driver vmxnet3_driver __pci_driver = {
717 	.ids = vmxnet3_nics,
718 	.id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
719 	.probe = vmxnet3_probe,
720 	.remove = vmxnet3_remove,
721 };
722